Merge pull request #15893 from BruceForstall/FixR2RCrossgen
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1
2 // Licensed to the .NET Foundation under one or more agreements.
3 // The .NET Foundation licenses this file to you under the MIT license.
4 // See the LICENSE file in the project root for more information.
5
6 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 XX                                                                           XX
9 XX                           Importer                                        XX
10 XX                                                                           XX
11 XX   Imports the given method and converts it to semantic trees              XX
12 XX                                                                           XX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
15 */
16
17 #include "jitpch.h"
18 #ifdef _MSC_VER
19 #pragma hdrstop
20 #endif
21
22 #include "corexcep.h"
23
24 #define Verify(cond, msg)                                                                                              \
25     do                                                                                                                 \
26     {                                                                                                                  \
27         if (!(cond))                                                                                                   \
28         {                                                                                                              \
29             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
30         }                                                                                                              \
31     } while (0)
32
33 #define VerifyOrReturn(cond, msg)                                                                                      \
34     do                                                                                                                 \
35     {                                                                                                                  \
36         if (!(cond))                                                                                                   \
37         {                                                                                                              \
38             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
39             return;                                                                                                    \
40         }                                                                                                              \
41     } while (0)
42
43 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
44     do                                                                                                                 \
45     {                                                                                                                  \
46         if (speculative)                                                                                               \
47         {                                                                                                              \
48             if (!(cond))                                                                                               \
49             {                                                                                                          \
50                 return false;                                                                                          \
51             }                                                                                                          \
52         }                                                                                                              \
53         else                                                                                                           \
54         {                                                                                                              \
55             if (!(cond))                                                                                               \
56             {                                                                                                          \
57                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
58                 return false;                                                                                          \
59             }                                                                                                          \
60         }                                                                                                              \
61     } while (0)
62
63 /*****************************************************************************/
64
65 void Compiler::impInit()
66 {
67
68 #ifdef DEBUG
69     impTreeList        = nullptr;
70     impTreeLast        = nullptr;
71     impInlinedCodeSize = 0;
72 #endif
73 }
74
75 /*****************************************************************************
76  *
77  *  Pushes the given tree on the stack.
78  */
79
80 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 {
82     /* Check for overflow. If inlining, we may be using a bigger stack */
83
84     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
85         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86     {
87         BADCODE("stack overflow");
88     }
89
90 #ifdef DEBUG
91     // If we are pushing a struct, make certain we know the precise type!
92     if (tree->TypeGet() == TYP_STRUCT)
93     {
94         assert(ti.IsType(TI_STRUCT));
95         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
96         assert(clsHnd != NO_CLASS_HANDLE);
97     }
98
99     if (tiVerificationNeeded && !ti.IsDead())
100     {
101         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102
103         // The ti type is consistent with the tree type.
104         //
105
106         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
107         // In the verification type system, we always transform "native int" to "TI_INT".
108         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
109         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
110         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
111         // method used in the last disjunct allows exactly this mismatch.
112         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
113                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
114                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
115                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
116                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
117                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
118
119         // If it is a struct type, make certain we normalized the primitive types
120         assert(!ti.IsType(TI_STRUCT) ||
121                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
122     }
123
124 #if VERBOSE_VERIFY
125     if (VERBOSE && tiVerificationNeeded)
126     {
127         printf("\n");
128         printf(TI_DUMP_PADDING);
129         printf("About to push to stack: ");
130         ti.Dump();
131     }
132 #endif // VERBOSE_VERIFY
133
134 #endif // DEBUG
135
136     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
137     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
138
139     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
140     {
141         compLongUsed = true;
142     }
143     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144     {
145         compFloatingPointUsed = true;
146     }
147 }
148
149 inline void Compiler::impPushNullObjRefOnStack()
150 {
151     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
152 }
153
154 // This method gets called when we run into unverifiable code
155 // (and we are verifying the method)
156
157 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
158                                                           DEBUGARG(unsigned line))
159 {
160     // Remember that the code is not verifiable
161     // Note that the method may yet pass canSkipMethodVerification(),
162     // and so the presence of unverifiable code may not be an issue.
163     tiIsVerifiableCode = FALSE;
164
165 #ifdef DEBUG
166     const char* tail = strrchr(file, '\\');
167     if (tail)
168     {
169         file = tail + 1;
170     }
171
172     if (JitConfig.JitBreakOnUnsafeCode())
173     {
174         assert(!"Unsafe code detected");
175     }
176 #endif
177
178     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
179             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
180
181     if (verNeedsVerification() || compIsForImportOnly())
182     {
183         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
184                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
185         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
186     }
187 }
188
189 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
190                                                                     DEBUGARG(unsigned line))
191 {
192     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
193             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
194
195 #ifdef DEBUG
196     //    BreakIfDebuggerPresent();
197     if (getBreakOnBadCode())
198     {
199         assert(!"Typechecking error");
200     }
201 #endif
202
203     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
204     UNREACHABLE();
205 }
206
207 // helper function that will tell us if the IL instruction at the addr passed
208 // by param consumes an address at the top of the stack. We use it to save
209 // us lvAddrTaken
210 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
211 {
212     assert(!compIsForInlining());
213
214     OPCODE opcode;
215
216     opcode = (OPCODE)getU1LittleEndian(codeAddr);
217
218     switch (opcode)
219     {
220         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
221         // like
222         //
223         //          ldloca.0
224         //          ldflda whatever
225         //
226         // of a primitivelike struct, you end up after morphing with addr of a local
227         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
228         // for structs that contain other structs, which isnt a case we handle very
229         // well now for other reasons.
230
231         case CEE_LDFLD:
232         {
233             // We won't collapse small fields. This is probably not the right place to have this
234             // check, but we're only using the function for this purpose, and is easy to factor
235             // out if we need to do so.
236
237             CORINFO_RESOLVED_TOKEN resolvedToken;
238             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
239
240             CORINFO_CLASS_HANDLE clsHnd;
241             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
242
243             // Preserve 'small' int types
244             if (!varTypeIsSmall(lclTyp))
245             {
246                 lclTyp = genActualType(lclTyp);
247             }
248
249             if (varTypeIsSmall(lclTyp))
250             {
251                 return false;
252             }
253
254             return true;
255         }
256         default:
257             break;
258     }
259
260     return false;
261 }
262
263 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
264 {
265     pResolvedToken->tokenContext = impTokenLookupContextHandle;
266     pResolvedToken->tokenScope   = info.compScopeHnd;
267     pResolvedToken->token        = getU4LittleEndian(addr);
268     pResolvedToken->tokenType    = kind;
269
270     if (!tiVerificationNeeded)
271     {
272         info.compCompHnd->resolveToken(pResolvedToken);
273     }
274     else
275     {
276         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
277     }
278 }
279
280 /*****************************************************************************
281  *
282  *  Pop one tree from the stack.
283  */
284
285 StackEntry Compiler::impPopStack()
286 {
287     if (verCurrentState.esStackDepth == 0)
288     {
289         BADCODE("stack underflow");
290     }
291
292 #ifdef DEBUG
293 #if VERBOSE_VERIFY
294     if (VERBOSE && tiVerificationNeeded)
295     {
296         JITDUMP("\n");
297         printf(TI_DUMP_PADDING);
298         printf("About to pop from the stack: ");
299         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
300         ti.Dump();
301     }
302 #endif // VERBOSE_VERIFY
303 #endif // DEBUG
304
305     return verCurrentState.esStack[--verCurrentState.esStackDepth];
306 }
307
308 /*****************************************************************************
309  *
310  *  Peep at n'th (0-based) tree on the top of the stack.
311  */
312
313 StackEntry& Compiler::impStackTop(unsigned n)
314 {
315     if (verCurrentState.esStackDepth <= n)
316     {
317         BADCODE("stack underflow");
318     }
319
320     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
321 }
322
323 unsigned Compiler::impStackHeight()
324 {
325     return verCurrentState.esStackDepth;
326 }
327
328 /*****************************************************************************
329  *  Some of the trees are spilled specially. While unspilling them, or
330  *  making a copy, these need to be handled specially. The function
331  *  enumerates the operators possible after spilling.
332  */
333
334 #ifdef DEBUG // only used in asserts
335 static bool impValidSpilledStackEntry(GenTreePtr tree)
336 {
337     if (tree->gtOper == GT_LCL_VAR)
338     {
339         return true;
340     }
341
342     if (tree->OperIsConst())
343     {
344         return true;
345     }
346
347     return false;
348 }
349 #endif
350
351 /*****************************************************************************
352  *
353  *  The following logic is used to save/restore stack contents.
354  *  If 'copy' is true, then we make a copy of the trees on the stack. These
355  *  have to all be cloneable/spilled values.
356  */
357
358 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
359 {
360     savePtr->ssDepth = verCurrentState.esStackDepth;
361
362     if (verCurrentState.esStackDepth)
363     {
364         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
365         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
366
367         if (copy)
368         {
369             StackEntry* table = savePtr->ssTrees;
370
371             /* Make a fresh copy of all the stack entries */
372
373             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
374             {
375                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
376                 GenTreePtr tree   = verCurrentState.esStack[level].val;
377
378                 assert(impValidSpilledStackEntry(tree));
379
380                 switch (tree->gtOper)
381                 {
382                     case GT_CNS_INT:
383                     case GT_CNS_LNG:
384                     case GT_CNS_DBL:
385                     case GT_CNS_STR:
386                     case GT_LCL_VAR:
387                         table->val = gtCloneExpr(tree);
388                         break;
389
390                     default:
391                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
392                         break;
393                 }
394             }
395         }
396         else
397         {
398             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
399         }
400     }
401 }
402
403 void Compiler::impRestoreStackState(SavedStack* savePtr)
404 {
405     verCurrentState.esStackDepth = savePtr->ssDepth;
406
407     if (verCurrentState.esStackDepth)
408     {
409         memcpy(verCurrentState.esStack, savePtr->ssTrees,
410                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
411     }
412 }
413
414 /*****************************************************************************
415  *
416  *  Get the tree list started for a new basic block.
417  */
418 inline void Compiler::impBeginTreeList()
419 {
420     assert(impTreeList == nullptr && impTreeLast == nullptr);
421
422     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
423 }
424
425 /*****************************************************************************
426  *
427  *  Store the given start and end stmt in the given basic block. This is
428  *  mostly called by impEndTreeList(BasicBlock *block). It is called
429  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
430  */
431
432 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
433 {
434     assert(firstStmt->gtOper == GT_STMT);
435     assert(lastStmt->gtOper == GT_STMT);
436
437     /* Make the list circular, so that we can easily walk it backwards */
438
439     firstStmt->gtPrev = lastStmt;
440
441     /* Store the tree list in the basic block */
442
443     block->bbTreeList = firstStmt;
444
445     /* The block should not already be marked as imported */
446     assert((block->bbFlags & BBF_IMPORTED) == 0);
447
448     block->bbFlags |= BBF_IMPORTED;
449 }
450
451 /*****************************************************************************
452  *
453  *  Store the current tree list in the given basic block.
454  */
455
456 inline void Compiler::impEndTreeList(BasicBlock* block)
457 {
458     assert(impTreeList->gtOper == GT_BEG_STMTS);
459
460     GenTreePtr firstTree = impTreeList->gtNext;
461
462     if (!firstTree)
463     {
464         /* The block should not already be marked as imported */
465         assert((block->bbFlags & BBF_IMPORTED) == 0);
466
467         // Empty block. Just mark it as imported
468         block->bbFlags |= BBF_IMPORTED;
469     }
470     else
471     {
472         // Ignore the GT_BEG_STMTS
473         assert(firstTree->gtPrev == impTreeList);
474
475         impEndTreeList(block, firstTree, impTreeLast);
476     }
477
478 #ifdef DEBUG
479     if (impLastILoffsStmt != nullptr)
480     {
481         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
482         impLastILoffsStmt                          = nullptr;
483     }
484
485     impTreeList = impTreeLast = nullptr;
486 #endif
487 }
488
489 /*****************************************************************************
490  *
491  *  Check that storing the given tree doesnt mess up the semantic order. Note
492  *  that this has only limited value as we can only check [0..chkLevel).
493  */
494
495 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
496 {
497 #ifndef DEBUG
498     return;
499 #else
500     assert(stmt->gtOper == GT_STMT);
501
502     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
503     {
504         chkLevel = verCurrentState.esStackDepth;
505     }
506
507     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
508     {
509         return;
510     }
511
512     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
513
514     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
515
516     if (tree->gtFlags & GTF_CALL)
517     {
518         for (unsigned level = 0; level < chkLevel; level++)
519         {
520             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
521         }
522     }
523
524     if (tree->gtOper == GT_ASG)
525     {
526         // For an assignment to a local variable, all references of that
527         // variable have to be spilled. If it is aliased, all calls and
528         // indirect accesses have to be spilled
529
530         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
531         {
532             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
533             for (unsigned level = 0; level < chkLevel; level++)
534             {
535                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
536                 assert(!lvaTable[lclNum].lvAddrExposed ||
537                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
538             }
539         }
540
541         // If the access may be to global memory, all side effects have to be spilled.
542
543         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
544         {
545             for (unsigned level = 0; level < chkLevel; level++)
546             {
547                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
548             }
549         }
550     }
551 #endif
552 }
553
554 /*****************************************************************************
555  *
556  *  Append the given GT_STMT node to the current block's tree list.
557  *  [0..chkLevel) is the portion of the stack which we will check for
558  *    interference with stmt and spill if needed.
559  */
560
561 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
562 {
563     assert(stmt->gtOper == GT_STMT);
564     noway_assert(impTreeLast != nullptr);
565
566     /* If the statement being appended has any side-effects, check the stack
567        to see if anything needs to be spilled to preserve correct ordering. */
568
569     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
570     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
571
572     // Assignment to (unaliased) locals don't count as a side-effect as
573     // we handle them specially using impSpillLclRefs(). Temp locals should
574     // be fine too.
575
576     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
577         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
578     {
579         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
580         assert(flags == (op2Flags | GTF_ASG));
581         flags = op2Flags;
582     }
583
584     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
585     {
586         chkLevel = verCurrentState.esStackDepth;
587     }
588
589     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
590     {
591         assert(chkLevel <= verCurrentState.esStackDepth);
592
593         if (flags)
594         {
595             // If there is a call, we have to spill global refs
596             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
597
598             if (expr->gtOper == GT_ASG)
599             {
600                 GenTree* lhs = expr->gtGetOp1();
601                 // If we are assigning to a global ref, we have to spill global refs on stack.
602                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
603                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
604                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
605                 if (!expr->OperIsBlkOp())
606                 {
607                     // If we are assigning to a global ref, we have to spill global refs on stack
608                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
609                     {
610                         spillGlobEffects = true;
611                     }
612                 }
613                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
614                          ((lhs->OperGet() == GT_LCL_VAR) &&
615                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
616                 {
617                     spillGlobEffects = true;
618                 }
619             }
620
621             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
622         }
623         else
624         {
625             impSpillSpecialSideEff();
626         }
627     }
628
629     impAppendStmtCheck(stmt, chkLevel);
630
631     /* Point 'prev' at the previous node, so that we can walk backwards */
632
633     stmt->gtPrev = impTreeLast;
634
635     /* Append the expression statement to the list */
636
637     impTreeLast->gtNext = stmt;
638     impTreeLast         = stmt;
639
640 #ifdef FEATURE_SIMD
641     impMarkContiguousSIMDFieldAssignments(stmt);
642 #endif
643
644     /* Once we set impCurStmtOffs in an appended tree, we are ready to
645        report the following offsets. So reset impCurStmtOffs */
646
647     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
648     {
649         impCurStmtOffsSet(BAD_IL_OFFSET);
650     }
651
652 #ifdef DEBUG
653     if (impLastILoffsStmt == nullptr)
654     {
655         impLastILoffsStmt = stmt;
656     }
657
658     if (verbose)
659     {
660         printf("\n\n");
661         gtDispTree(stmt);
662     }
663 #endif
664 }
665
666 /*****************************************************************************
667  *
668  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
669  */
670
671 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
672 {
673     assert(stmt->gtOper == GT_STMT);
674     assert(stmtBefore->gtOper == GT_STMT);
675
676     GenTreePtr stmtPrev = stmtBefore->gtPrev;
677     stmt->gtPrev        = stmtPrev;
678     stmt->gtNext        = stmtBefore;
679     stmtPrev->gtNext    = stmt;
680     stmtBefore->gtPrev  = stmt;
681 }
682
683 /*****************************************************************************
684  *
685  *  Append the given expression tree to the current block's tree list.
686  *  Return the newly created statement.
687  */
688
689 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
690 {
691     assert(tree);
692
693     /* Allocate an 'expression statement' node */
694
695     GenTreePtr expr = gtNewStmt(tree, offset);
696
697     /* Append the statement to the current block's stmt list */
698
699     impAppendStmt(expr, chkLevel);
700
701     return expr;
702 }
703
704 /*****************************************************************************
705  *
706  *  Insert the given exression tree before GT_STMT "stmtBefore"
707  */
708
709 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
710 {
711     assert(stmtBefore->gtOper == GT_STMT);
712
713     /* Allocate an 'expression statement' node */
714
715     GenTreePtr expr = gtNewStmt(tree, offset);
716
717     /* Append the statement to the current block's stmt list */
718
719     impInsertStmtBefore(expr, stmtBefore);
720 }
721
722 /*****************************************************************************
723  *
724  *  Append an assignment of the given value to a temp to the current tree list.
725  *  curLevel is the stack level for which the spill to the temp is being done.
726  */
727
728 void Compiler::impAssignTempGen(unsigned    tmp,
729                                 GenTreePtr  val,
730                                 unsigned    curLevel,
731                                 GenTreePtr* pAfterStmt, /* = NULL */
732                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
733                                 BasicBlock* block       /* = NULL */
734                                 )
735 {
736     GenTreePtr asg = gtNewTempAssign(tmp, val);
737
738     if (!asg->IsNothingNode())
739     {
740         if (pAfterStmt)
741         {
742             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
743             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
744         }
745         else
746         {
747             impAppendTree(asg, curLevel, impCurStmtOffs);
748         }
749     }
750 }
751
752 /*****************************************************************************
753  * same as above, but handle the valueclass case too
754  */
755
756 void Compiler::impAssignTempGen(unsigned             tmpNum,
757                                 GenTreePtr           val,
758                                 CORINFO_CLASS_HANDLE structType,
759                                 unsigned             curLevel,
760                                 GenTreePtr*          pAfterStmt, /* = NULL */
761                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
762                                 BasicBlock*          block       /* = NULL */
763                                 )
764 {
765     GenTreePtr asg;
766
767     if (varTypeIsStruct(val))
768     {
769         assert(tmpNum < lvaCount);
770         assert(structType != NO_CLASS_HANDLE);
771
772         // if the method is non-verifiable the assert is not true
773         // so at least ignore it in the case when verification is turned on
774         // since any block that tries to use the temp would have failed verification.
775         var_types varType = lvaTable[tmpNum].lvType;
776         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
777         lvaSetStruct(tmpNum, structType, false);
778
779         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
780         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
781         // that has been passed in for the value being assigned to the temp, in which case we
782         // need to set 'val' to that same type.
783         // Note also that if we always normalized the types of any node that might be a struct
784         // type, this would not be necessary - but that requires additional JIT/EE interface
785         // calls that may not actually be required - e.g. if we only access a field of a struct.
786
787         val->gtType = lvaTable[tmpNum].lvType;
788
789         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
790         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
791     }
792     else
793     {
794         asg = gtNewTempAssign(tmpNum, val);
795     }
796
797     if (!asg->IsNothingNode())
798     {
799         if (pAfterStmt)
800         {
801             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
802             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
803         }
804         else
805         {
806             impAppendTree(asg, curLevel, impCurStmtOffs);
807         }
808     }
809 }
810
811 /*****************************************************************************
812  *
813  *  Pop the given number of values from the stack and return a list node with
814  *  their values.
815  *  The 'prefixTree' argument may optionally contain an argument
816  *  list that is prepended to the list returned from this function.
817  *
818  *  The notion of prepended is a bit misleading in that the list is backwards
819  *  from the way I would expect: The first element popped is at the end of
820  *  the returned list, and prefixTree is 'before' that, meaning closer to
821  *  the end of the list.  To get to prefixTree, you have to walk to the
822  *  end of the list.
823  *
824  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
825  *  such we reverse its meaning such that returnValue has a reversed
826  *  prefixTree at the head of the list.
827  */
828
829 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
830 {
831     assert(sig == nullptr || count == sig->numArgs);
832
833     CORINFO_CLASS_HANDLE structType;
834     GenTreeArgList*      treeList;
835
836     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
837     {
838         treeList = nullptr;
839     }
840     else
841     { // ARG_ORDER_L2R
842         treeList = prefixTree;
843     }
844
845     while (count--)
846     {
847         StackEntry se   = impPopStack();
848         typeInfo   ti   = se.seTypeInfo;
849         GenTreePtr temp = se.val;
850
851         if (varTypeIsStruct(temp))
852         {
853             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
854             assert(ti.IsType(TI_STRUCT));
855             structType = ti.GetClassHandleForValueClass();
856 #ifdef DEBUG
857             if (verbose)
858             {
859                 printf("Calling impNormStructVal on:\n");
860                 gtDispTree(temp);
861             }
862 #endif
863             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
864 #ifdef DEBUG
865             if (verbose)
866             {
867                 printf("resulting tree:\n");
868                 gtDispTree(temp);
869             }
870 #endif
871         }
872
873         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
874         treeList = gtNewListNode(temp, treeList);
875     }
876
877     if (sig != nullptr)
878     {
879         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
880             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
881         {
882             // Make sure that all valuetypes (including enums) that we push are loaded.
883             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
884             // all valuetypes in the method signature are already loaded.
885             // We need to be able to find the size of the valuetypes, but we cannot
886             // do a class-load from within GC.
887             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
888         }
889
890         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
891         CORINFO_CLASS_HANDLE    argClass;
892         CORINFO_CLASS_HANDLE    argRealClass;
893         GenTreeArgList*         args;
894
895         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
896         {
897             PREFIX_ASSUME(args != nullptr);
898
899             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
900
901             // insert implied casts (from float to double or double to float)
902
903             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
904             {
905                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
906             }
907             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
908             {
909                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
910             }
911
912             // insert any widening or narrowing casts for backwards compatibility
913
914             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
915
916             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
917                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
918             {
919                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
920                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
921                 // primitive types.
922                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
923                 // details).
924                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
925                 {
926                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
927                 }
928
929                 // Make sure that all valuetypes (including enums) that we push are loaded.
930                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
931                 // all valuetypes in the method signature are already loaded.
932                 // We need to be able to find the size of the valuetypes, but we cannot
933                 // do a class-load from within GC.
934                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
935             }
936
937             argLst = info.compCompHnd->getArgNext(argLst);
938         }
939     }
940
941     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
942     {
943         // Prepend the prefixTree
944
945         // Simple in-place reversal to place treeList
946         // at the end of a reversed prefixTree
947         while (prefixTree != nullptr)
948         {
949             GenTreeArgList* next = prefixTree->Rest();
950             prefixTree->Rest()   = treeList;
951             treeList             = prefixTree;
952             prefixTree           = next;
953         }
954     }
955     return treeList;
956 }
957
958 /*****************************************************************************
959  *
960  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
961  *  The first "skipReverseCount" items are not reversed.
962  */
963
964 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
965
966 {
967     assert(skipReverseCount <= count);
968
969     GenTreeArgList* list = impPopList(count, sig);
970
971     // reverse the list
972     if (list == nullptr || skipReverseCount == count)
973     {
974         return list;
975     }
976
977     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
978     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
979
980     if (skipReverseCount == 0)
981     {
982         ptr = list;
983     }
984     else
985     {
986         lastSkipNode = list;
987         // Get to the first node that needs to be reversed
988         for (unsigned i = 0; i < skipReverseCount - 1; i++)
989         {
990             lastSkipNode = lastSkipNode->Rest();
991         }
992
993         PREFIX_ASSUME(lastSkipNode != nullptr);
994         ptr = lastSkipNode->Rest();
995     }
996
997     GenTreeArgList* reversedList = nullptr;
998
999     do
1000     {
1001         GenTreeArgList* tmp = ptr->Rest();
1002         ptr->Rest()         = reversedList;
1003         reversedList        = ptr;
1004         ptr                 = tmp;
1005     } while (ptr != nullptr);
1006
1007     if (skipReverseCount)
1008     {
1009         lastSkipNode->Rest() = reversedList;
1010         return list;
1011     }
1012     else
1013     {
1014         return reversedList;
1015     }
1016 }
1017
1018 /*****************************************************************************
1019    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1020    class of type 'clsHnd'.  It returns the tree that should be appended to the
1021    statement list that represents the assignment.
1022    Temp assignments may be appended to impTreeList if spilling is necessary.
1023    curLevel is the stack level for which a spill may be being done.
1024  */
1025
1026 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1027                                      GenTreePtr           src,
1028                                      CORINFO_CLASS_HANDLE structHnd,
1029                                      unsigned             curLevel,
1030                                      GenTreePtr*          pAfterStmt, /* = NULL */
1031                                      BasicBlock*          block       /* = NULL */
1032                                      )
1033 {
1034     assert(varTypeIsStruct(dest));
1035
1036     while (dest->gtOper == GT_COMMA)
1037     {
1038         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1039
1040         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1041         if (pAfterStmt)
1042         {
1043             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1044         }
1045         else
1046         {
1047             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1048         }
1049
1050         // set dest to the second thing
1051         dest = dest->gtOp.gtOp2;
1052     }
1053
1054     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1055            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1056
1057     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1058         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1059     {
1060         // Make this a NOP
1061         return gtNewNothingNode();
1062     }
1063
1064     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1065     // or re-creating a Blk node if it is.
1066     GenTreePtr destAddr;
1067
1068     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1069     {
1070         destAddr = dest->gtOp.gtOp1;
1071     }
1072     else
1073     {
1074         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1075     }
1076
1077     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1078 }
1079
1080 /*****************************************************************************/
1081
1082 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1083                                         GenTreePtr           src,
1084                                         CORINFO_CLASS_HANDLE structHnd,
1085                                         unsigned             curLevel,
1086                                         GenTreePtr*          pAfterStmt, /* = NULL */
1087                                         BasicBlock*          block       /* = NULL */
1088                                         )
1089 {
1090     var_types  destType;
1091     GenTreePtr dest      = nullptr;
1092     unsigned   destFlags = 0;
1093
1094 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1095     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1096     // TODO-ARM-BUG: Does ARM need this?
1097     // TODO-ARM64-BUG: Does ARM64 need this?
1098     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1099            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1100            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1101            (src->TypeGet() != TYP_STRUCT &&
1102             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1103 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1104     assert(varTypeIsStruct(src));
1105
1106     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1107            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1108            src->gtOper == GT_COMMA ||
1109            (src->TypeGet() != TYP_STRUCT &&
1110             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1111 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1112     if (destAddr->OperGet() == GT_ADDR)
1113     {
1114         GenTree* destNode = destAddr->gtGetOp1();
1115         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1116         // will be morphed, don't insert an OBJ(ADDR).
1117         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1118 #ifndef LEGACY_BACKEND
1119             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1120 #endif // !LEGACY_BACKEND
1121                 )
1122         {
1123             dest = destNode;
1124         }
1125         destType = destNode->TypeGet();
1126     }
1127     else
1128     {
1129         destType = src->TypeGet();
1130     }
1131
1132     var_types asgType = src->TypeGet();
1133
1134     if (src->gtOper == GT_CALL)
1135     {
1136         if (src->AsCall()->TreatAsHasRetBufArg(this))
1137         {
1138             // Case of call returning a struct via hidden retbuf arg
1139
1140             // insert the return value buffer into the argument list as first byref parameter
1141             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1142
1143             // now returns void, not a struct
1144             src->gtType = TYP_VOID;
1145
1146             // return the morphed call node
1147             return src;
1148         }
1149         else
1150         {
1151             // Case of call returning a struct in one or more registers.
1152
1153             var_types returnType = (var_types)src->gtCall.gtReturnType;
1154
1155             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1156             src->gtType = genActualType(returnType);
1157
1158             // First we try to change this to "LclVar/LclFld = call"
1159             //
1160             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1161             {
1162                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1163                 // That is, the IR will be of the form lclVar = call for multi-reg return
1164                 //
1165                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1166                 if (src->AsCall()->HasMultiRegRetVal())
1167                 {
1168                     // Mark the struct LclVar as used in a MultiReg return context
1169                     //  which currently makes it non promotable.
1170                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1171                     // handle multireg returns.
1172                     lcl->gtFlags |= GTF_DONT_CSE;
1173                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1174                 }
1175                 else // The call result is not a multireg return
1176                 {
1177                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1178                     lcl->ChangeOper(GT_LCL_FLD);
1179                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1180                 }
1181
1182                 lcl->gtType = src->gtType;
1183                 asgType     = src->gtType;
1184                 dest        = lcl;
1185
1186 #if defined(_TARGET_ARM_)
1187                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1188                 // but that method has not been updadted to include ARM.
1189                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1190                 lcl->gtFlags |= GTF_DONT_CSE;
1191 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1192                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1193                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1194
1195                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1196                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1197                 // handle multireg returns.
1198                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1199                 // non-multireg returns.
1200                 lcl->gtFlags |= GTF_DONT_CSE;
1201                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1202 #endif
1203             }
1204             else // we don't have a GT_ADDR of a GT_LCL_VAR
1205             {
1206                 // !!! The destination could be on stack. !!!
1207                 // This flag will let us choose the correct write barrier.
1208                 asgType   = returnType;
1209                 destFlags = GTF_IND_TGTANYWHERE;
1210             }
1211         }
1212     }
1213     else if (src->gtOper == GT_RET_EXPR)
1214     {
1215         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1216         noway_assert(call->gtOper == GT_CALL);
1217
1218         if (call->HasRetBufArg())
1219         {
1220             // insert the return value buffer into the argument list as first byref parameter
1221             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1222
1223             // now returns void, not a struct
1224             src->gtType  = TYP_VOID;
1225             call->gtType = TYP_VOID;
1226
1227             // We already have appended the write to 'dest' GT_CALL's args
1228             // So now we just return an empty node (pruning the GT_RET_EXPR)
1229             return src;
1230         }
1231         else
1232         {
1233             // Case of inline method returning a struct in one or more registers.
1234             //
1235             var_types returnType = (var_types)call->gtReturnType;
1236
1237             // We won't need a return buffer
1238             asgType      = returnType;
1239             src->gtType  = genActualType(returnType);
1240             call->gtType = src->gtType;
1241
1242             // If we've changed the type, and it no longer matches a local destination,
1243             // we must use an indirection.
1244             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1245             {
1246                 dest = nullptr;
1247             }
1248
1249             // !!! The destination could be on stack. !!!
1250             // This flag will let us choose the correct write barrier.
1251             destFlags = GTF_IND_TGTANYWHERE;
1252         }
1253     }
1254     else if (src->OperIsBlk())
1255     {
1256         asgType = impNormStructType(structHnd);
1257         if (src->gtOper == GT_OBJ)
1258         {
1259             assert(src->gtObj.gtClass == structHnd);
1260         }
1261     }
1262     else if (src->gtOper == GT_INDEX)
1263     {
1264         asgType = impNormStructType(structHnd);
1265         assert(src->gtIndex.gtStructElemClass == structHnd);
1266     }
1267     else if (src->gtOper == GT_MKREFANY)
1268     {
1269         // Since we are assigning the result of a GT_MKREFANY,
1270         // "destAddr" must point to a refany.
1271
1272         GenTreePtr destAddrClone;
1273         destAddr =
1274             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1275
1276         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1277         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1278         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1279         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1280         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1281         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1282         GenTreePtr typeSlot =
1283             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1284
1285         // append the assign of the pointer value
1286         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1287         if (pAfterStmt)
1288         {
1289             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1290         }
1291         else
1292         {
1293             impAppendTree(asg, curLevel, impCurStmtOffs);
1294         }
1295
1296         // return the assign of the type value, to be appended
1297         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1298     }
1299     else if (src->gtOper == GT_COMMA)
1300     {
1301         // The second thing is the struct or its address.
1302         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1303         if (pAfterStmt)
1304         {
1305             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1306         }
1307         else
1308         {
1309             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1310         }
1311
1312         // Evaluate the second thing using recursion.
1313         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1314     }
1315     else if (src->IsLocal())
1316     {
1317         asgType = src->TypeGet();
1318     }
1319     else if (asgType == TYP_STRUCT)
1320     {
1321         asgType     = impNormStructType(structHnd);
1322         src->gtType = asgType;
1323 #ifdef LEGACY_BACKEND
1324         if (asgType == TYP_STRUCT)
1325         {
1326             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1327             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1328         }
1329 #endif
1330     }
1331     if (dest == nullptr)
1332     {
1333         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1334         // if this is a known struct type.
1335         if (asgType == TYP_STRUCT)
1336         {
1337             dest = gtNewObjNode(structHnd, destAddr);
1338             gtSetObjGcInfo(dest->AsObj());
1339             // Although an obj as a call argument was always assumed to be a globRef
1340             // (which is itself overly conservative), that is not true of the operands
1341             // of a block assignment.
1342             dest->gtFlags &= ~GTF_GLOB_REF;
1343             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1344         }
1345         else if (varTypeIsStruct(asgType))
1346         {
1347             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1348         }
1349         else
1350         {
1351             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1352         }
1353     }
1354     else
1355     {
1356         dest->gtType = asgType;
1357     }
1358
1359     dest->gtFlags |= destFlags;
1360     destFlags = dest->gtFlags;
1361
1362     // return an assignment node, to be appended
1363     GenTree* asgNode = gtNewAssignNode(dest, src);
1364     gtBlockOpInit(asgNode, dest, src, false);
1365
1366     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1367     // of assignments.
1368     if ((destFlags & GTF_DONT_CSE) == 0)
1369     {
1370         dest->gtFlags &= ~(GTF_DONT_CSE);
1371     }
1372     return asgNode;
1373 }
1374
1375 /*****************************************************************************
1376    Given a struct value, and the class handle for that structure, return
1377    the expression for the address for that structure value.
1378
1379    willDeref - does the caller guarantee to dereference the pointer.
1380 */
1381
1382 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1383                                       CORINFO_CLASS_HANDLE structHnd,
1384                                       unsigned             curLevel,
1385                                       bool                 willDeref)
1386 {
1387     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1388
1389     var_types type = structVal->TypeGet();
1390
1391     genTreeOps oper = structVal->gtOper;
1392
1393     if (oper == GT_OBJ && willDeref)
1394     {
1395         assert(structVal->gtObj.gtClass == structHnd);
1396         return (structVal->gtObj.Addr());
1397     }
1398     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1399     {
1400         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1401
1402         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1403
1404         // The 'return value' is now the temp itself
1405
1406         type            = genActualType(lvaTable[tmpNum].TypeGet());
1407         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1408         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1409         return temp;
1410     }
1411     else if (oper == GT_COMMA)
1412     {
1413         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1414
1415         GenTreePtr oldTreeLast = impTreeLast;
1416         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417         structVal->gtType      = TYP_BYREF;
1418
1419         if (oldTreeLast != impTreeLast)
1420         {
1421             // Some temp assignment statement was placed on the statement list
1422             // for Op2, but that would be out of order with op1, so we need to
1423             // spill op1 onto the statement list after whatever was last
1424             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426             structVal->gtOp.gtOp1 = gtNewNothingNode();
1427         }
1428
1429         return (structVal);
1430     }
1431
1432     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1433 }
1434
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 //                    and optionally determine the GC layout of the struct.
1438 //
1439 // Arguments:
1440 //    structHnd       - The class handle for the struct type of interest.
1441 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 //                      into which the gcLayout will be written.
1443 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 //                      which will be set to the number of GC fields in the struct.
1445 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 //                      type, set to the SIMD base type
1447 //
1448 // Return Value:
1449 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1452 //
1453 // Assumptions:
1454 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1456 //
1457 // Notes:
1458 //    Normalizing the type involves examining the struct type to determine if it should
1459 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1460 //    for full enregistration, e.g. TYP_SIMD16.
1461
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1463                                       BYTE*                gcLayout,
1464                                       unsigned*            pNumGCVars,
1465                                       var_types*           pSimdBaseType)
1466 {
1467     assert(structHnd != NO_CLASS_HANDLE);
1468
1469     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470     var_types   structType  = TYP_STRUCT;
1471
1472     // On coreclr the check for GC includes a "may" to account for the special
1473     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1475     // pointer.
1476     const bool mayContainGCPtrs =
1477         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1478
1479 #ifdef FEATURE_SIMD
1480     // Check to see if this is a SIMD type.
1481     if (featureSIMD && !mayContainGCPtrs)
1482     {
1483         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1484
1485         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1486         {
1487             unsigned int sizeBytes;
1488             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489             if (simdBaseType != TYP_UNKNOWN)
1490             {
1491                 assert(sizeBytes == originalSize);
1492                 structType = getSIMDTypeForSize(sizeBytes);
1493                 if (pSimdBaseType != nullptr)
1494                 {
1495                     *pSimdBaseType = simdBaseType;
1496                 }
1497                 // Also indicate that we use floating point registers.
1498                 compFloatingPointUsed = true;
1499             }
1500         }
1501     }
1502 #endif // FEATURE_SIMD
1503
1504     // Fetch GC layout info if requested
1505     if (gcLayout != nullptr)
1506     {
1507         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1508
1509         // Verify that the quick test up above via the class attributes gave a
1510         // safe view of the type's GCness.
1511         //
1512         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513         // does not report any gc fields.
1514
1515         assert(mayContainGCPtrs || (numGCVars == 0));
1516
1517         if (pNumGCVars != nullptr)
1518         {
1519             *pNumGCVars = numGCVars;
1520         }
1521     }
1522     else
1523     {
1524         // Can't safely ask for number of GC pointers without also
1525         // asking for layout.
1526         assert(pNumGCVars == nullptr);
1527     }
1528
1529     return structType;
1530 }
1531
1532 //****************************************************************************
1533 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1535 //
1536 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1537                                       CORINFO_CLASS_HANDLE structHnd,
1538                                       unsigned             curLevel,
1539                                       bool                 forceNormalization /*=false*/)
1540 {
1541     assert(forceNormalization || varTypeIsStruct(structVal));
1542     assert(structHnd != NO_CLASS_HANDLE);
1543     var_types structType = structVal->TypeGet();
1544     bool      makeTemp   = false;
1545     if (structType == TYP_STRUCT)
1546     {
1547         structType = impNormStructType(structHnd);
1548     }
1549     bool                 alreadyNormalized = false;
1550     GenTreeLclVarCommon* structLcl         = nullptr;
1551
1552     genTreeOps oper = structVal->OperGet();
1553     switch (oper)
1554     {
1555         // GT_RETURN and GT_MKREFANY don't capture the handle.
1556         case GT_RETURN:
1557             break;
1558         case GT_MKREFANY:
1559             alreadyNormalized = true;
1560             break;
1561
1562         case GT_CALL:
1563             structVal->gtCall.gtRetClsHnd = structHnd;
1564             makeTemp                      = true;
1565             break;
1566
1567         case GT_RET_EXPR:
1568             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1569             makeTemp                         = true;
1570             break;
1571
1572         case GT_ARGPLACE:
1573             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1574             break;
1575
1576         case GT_INDEX:
1577             // This will be transformed to an OBJ later.
1578             alreadyNormalized                    = true;
1579             structVal->gtIndex.gtStructElemClass = structHnd;
1580             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1581             break;
1582
1583         case GT_FIELD:
1584             // Wrap it in a GT_OBJ.
1585             structVal->gtType = structType;
1586             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1587             break;
1588
1589         case GT_LCL_VAR:
1590         case GT_LCL_FLD:
1591             structLcl = structVal->AsLclVarCommon();
1592             // Wrap it in a GT_OBJ.
1593             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1594             __fallthrough;
1595
1596         case GT_OBJ:
1597         case GT_BLK:
1598         case GT_DYN_BLK:
1599         case GT_ASG:
1600             // These should already have the appropriate type.
1601             assert(structVal->gtType == structType);
1602             alreadyNormalized = true;
1603             break;
1604
1605         case GT_IND:
1606             assert(structVal->gtType == structType);
1607             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608             alreadyNormalized = true;
1609             break;
1610
1611 #ifdef FEATURE_SIMD
1612         case GT_SIMD:
1613             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1614             break;
1615 #endif // FEATURE_SIMD
1616 #if FEATURE_HW_INTRINSICS
1617         case GT_HWIntrinsic:
1618             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1619             break;
1620 #endif
1621
1622         case GT_COMMA:
1623         {
1624             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625             GenTree* blockNode = structVal->gtOp.gtOp2;
1626             assert(blockNode->gtType == structType);
1627
1628             // Is this GT_COMMA(op1, GT_COMMA())?
1629             GenTree* parent = structVal;
1630             if (blockNode->OperGet() == GT_COMMA)
1631             {
1632                 // Find the last node in the comma chain.
1633                 do
1634                 {
1635                     assert(blockNode->gtType == structType);
1636                     parent    = blockNode;
1637                     blockNode = blockNode->gtOp.gtOp2;
1638                 } while (blockNode->OperGet() == GT_COMMA);
1639             }
1640
1641             if (blockNode->OperGet() == GT_FIELD)
1642             {
1643                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1645             }
1646
1647 #ifdef FEATURE_SIMD
1648             if (blockNode->OperGet() == GT_SIMD)
1649             {
1650                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651                 alreadyNormalized  = true;
1652             }
1653             else
1654 #endif
1655 #if FEATURE_HW_INTRINSICS
1656                 if (blockNode->OperGet() == GT_HWIntrinsic && blockNode->AsHWIntrinsic()->isSIMD())
1657             {
1658                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1659                 alreadyNormalized  = true;
1660             }
1661             else
1662 #endif
1663             {
1664                 noway_assert(blockNode->OperIsBlk());
1665
1666                 // Sink the GT_COMMA below the blockNode addr.
1667                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1668                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1669                 //
1670                 // In case of a chained GT_COMMA case, we sink the last
1671                 // GT_COMMA below the blockNode addr.
1672                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1673                 assert(blockNodeAddr->gtType == TYP_BYREF);
1674                 GenTree* commaNode    = parent;
1675                 commaNode->gtType     = TYP_BYREF;
1676                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1677                 blockNode->gtOp.gtOp1 = commaNode;
1678                 if (parent == structVal)
1679                 {
1680                     structVal = blockNode;
1681                 }
1682                 alreadyNormalized = true;
1683             }
1684         }
1685         break;
1686
1687         default:
1688             noway_assert(!"Unexpected node in impNormStructVal()");
1689             break;
1690     }
1691     structVal->gtType  = structType;
1692     GenTree* structObj = structVal;
1693
1694     if (!alreadyNormalized || forceNormalization)
1695     {
1696         if (makeTemp)
1697         {
1698             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1699
1700             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1701
1702             // The structVal is now the temp itself
1703
1704             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1705             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1706             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1707         }
1708         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1709         {
1710             // Wrap it in a GT_OBJ
1711             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1712         }
1713     }
1714
1715     if (structLcl != nullptr)
1716     {
1717         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1718         // so we don't set GTF_EXCEPT here.
1719         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1720         {
1721             structObj->gtFlags &= ~GTF_GLOB_REF;
1722         }
1723     }
1724     else
1725     {
1726         // In general a OBJ is an indirection and could raise an exception.
1727         structObj->gtFlags |= GTF_EXCEPT;
1728     }
1729     return (structObj);
1730 }
1731
1732 /******************************************************************************/
1733 // Given a type token, generate code that will evaluate to the correct
1734 // handle representation of that token (type handle, field handle, or method handle)
1735 //
1736 // For most cases, the handle is determined at compile-time, and the code
1737 // generated is simply an embedded handle.
1738 //
1739 // Run-time lookup is required if the enclosing method is shared between instantiations
1740 // and the token refers to formal type parameters whose instantiation is not known
1741 // at compile-time.
1742 //
1743 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1744                                       BOOL*                   pRuntimeLookup /* = NULL */,
1745                                       BOOL                    mustRestoreHandle /* = FALSE */,
1746                                       BOOL                    importParent /* = FALSE */)
1747 {
1748     assert(!fgGlobalMorph);
1749
1750     CORINFO_GENERICHANDLE_RESULT embedInfo;
1751     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1752
1753     if (pRuntimeLookup)
1754     {
1755         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1756     }
1757
1758     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1759     {
1760         switch (embedInfo.handleType)
1761         {
1762             case CORINFO_HANDLETYPE_CLASS:
1763                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1764                 break;
1765
1766             case CORINFO_HANDLETYPE_METHOD:
1767                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1768                 break;
1769
1770             case CORINFO_HANDLETYPE_FIELD:
1771                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1772                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1773                 break;
1774
1775             default:
1776                 break;
1777         }
1778     }
1779
1780     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1781     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1782                                       embedInfo.compileTimeHandle);
1783
1784     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1785     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1786     {
1787         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1788     }
1789
1790     return result;
1791 }
1792
1793 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1794                                      CORINFO_LOOKUP*         pLookup,
1795                                      unsigned                handleFlags,
1796                                      void*                   compileTimeHandle)
1797 {
1798     if (!pLookup->lookupKind.needsRuntimeLookup)
1799     {
1800         // No runtime lookup is required.
1801         // Access is direct or memory-indirect (of a fixed address) reference
1802
1803         CORINFO_GENERIC_HANDLE handle       = nullptr;
1804         void*                  pIndirection = nullptr;
1805         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1806
1807         if (pLookup->constLookup.accessType == IAT_VALUE)
1808         {
1809             handle = pLookup->constLookup.handle;
1810         }
1811         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1812         {
1813             pIndirection = pLookup->constLookup.addr;
1814         }
1815         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1816     }
1817     else if (compIsForInlining())
1818     {
1819         // Don't import runtime lookups when inlining
1820         // Inlining has to be aborted in such a case
1821         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1822         return nullptr;
1823     }
1824     else
1825     {
1826         // Need to use dictionary-based access which depends on the typeContext
1827         // which is only available at runtime, not at compile-time.
1828
1829         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1830     }
1831 }
1832
1833 #ifdef FEATURE_READYTORUN_COMPILER
1834 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1835                                                unsigned              handleFlags,
1836                                                void*                 compileTimeHandle)
1837 {
1838     CORINFO_GENERIC_HANDLE handle       = nullptr;
1839     void*                  pIndirection = nullptr;
1840     assert(pLookup->accessType != IAT_PPVALUE);
1841
1842     if (pLookup->accessType == IAT_VALUE)
1843     {
1844         handle = pLookup->handle;
1845     }
1846     else if (pLookup->accessType == IAT_PVALUE)
1847     {
1848         pIndirection = pLookup->addr;
1849     }
1850     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1851 }
1852
1853 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1854     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1855     CorInfoHelpFunc         helper,
1856     var_types               type,
1857     GenTreeArgList*         args /* =NULL*/,
1858     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1859 {
1860     CORINFO_CONST_LOOKUP lookup;
1861     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1862     {
1863         return nullptr;
1864     }
1865
1866     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1867
1868     op1->setEntryPoint(lookup);
1869
1870     return op1;
1871 }
1872 #endif
1873
1874 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1875 {
1876     GenTreePtr op1 = nullptr;
1877
1878     switch (pCallInfo->kind)
1879     {
1880         case CORINFO_CALL:
1881             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1882
1883 #ifdef FEATURE_READYTORUN_COMPILER
1884             if (opts.IsReadyToRun())
1885             {
1886                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1887             }
1888             else
1889             {
1890                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1891             }
1892 #endif
1893             break;
1894
1895         case CORINFO_CALL_CODE_POINTER:
1896             if (compIsForInlining())
1897             {
1898                 // Don't import runtime lookups when inlining
1899                 // Inlining has to be aborted in such a case
1900                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1901                 return nullptr;
1902             }
1903
1904             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1905             break;
1906
1907         default:
1908             noway_assert(!"unknown call kind");
1909             break;
1910     }
1911
1912     return op1;
1913 }
1914
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1917 //
1918 // Arguments:
1919 //    kind - lookup kind.
1920 //
1921 // Return Value:
1922 //    Return GenTree pointer to generic shared context.
1923 //
1924 // Notes:
1925 //    Reports about generic context using.
1926
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1928 {
1929     GenTreePtr ctxTree = nullptr;
1930
1931     // Collectible types requires that for shared generic code, if we use the generic context parameter
1932     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933     // context parameter is this that we don't need the eager reporting logic.)
1934     lvaGenericsContextUseCount++;
1935
1936     if (kind == CORINFO_LOOKUP_THISOBJ)
1937     {
1938         // this Object
1939         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1940
1941         // Vtable pointer of this object
1942         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1945     }
1946     else
1947     {
1948         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1949
1950         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1951     }
1952     return ctxTree;
1953 }
1954
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957    generic instantiations.
1958    The lookup depends on the typeContext which is only available at
1959    runtime, and not at compile-time.
1960    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1961    The cases are:
1962
1963    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964       instantiation-specific handle, and the tokens to lookup the handle.
1965    2. pLookup->indirections != CORINFO_USEHELPER :
1966       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1967           to get the handle.
1968       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969           If it is non-NULL, it is the handle required. Else, call a helper
1970           to lookup the handle.
1971  */
1972
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974                                             CORINFO_LOOKUP*         pLookup,
1975                                             void*                   compileTimeHandle)
1976 {
1977
1978     // This method can only be called from the importer instance of the Compiler.
1979     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980     assert(!compIsForInlining());
1981
1982     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1983
1984     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1985     // It's available only via the run-time helper function
1986     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1987     {
1988 #ifdef FEATURE_READYTORUN_COMPILER
1989         if (opts.IsReadyToRun())
1990         {
1991             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1992                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1993         }
1994 #endif
1995         GenTree* argNode =
1996             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1997         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1998
1999         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2000     }
2001
2002     // Slot pointer
2003     GenTreePtr slotPtrTree = ctxTree;
2004
2005     if (pRuntimeLookup->testForNull)
2006     {
2007         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2008                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2009     }
2010
2011     GenTreePtr indOffTree = nullptr;
2012
2013     // Applied repeated indirections
2014     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2015     {
2016         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2017         {
2018             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2019                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2020         }
2021
2022         if (i != 0)
2023         {
2024             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2025             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2026             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2027         }
2028
2029         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2030         {
2031             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2032         }
2033
2034         if (pRuntimeLookup->offsets[i] != 0)
2035         {
2036             slotPtrTree =
2037                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2038         }
2039     }
2040
2041     // No null test required
2042     if (!pRuntimeLookup->testForNull)
2043     {
2044         if (pRuntimeLookup->indirections == 0)
2045         {
2046             return slotPtrTree;
2047         }
2048
2049         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2050         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2051
2052         if (!pRuntimeLookup->testForFixup)
2053         {
2054             return slotPtrTree;
2055         }
2056
2057         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2058
2059         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2060         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2061
2062         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2063         // downcast the pointer to a TYP_INT on 64-bit targets
2064         slot = impImplicitIorI4Cast(slot, TYP_INT);
2065         // Use a GT_AND to check for the lowest bit and indirect if it is set
2066         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2067         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2068         relop->gtFlags |= GTF_RELOP_QMARK;
2069
2070         // slot = GT_IND(slot - 1)
2071         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2072         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2073         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2074         indir->gtFlags |= GTF_IND_NONFAULTING;
2075         indir->gtFlags |= GTF_IND_INVARIANT;
2076
2077         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2078         GenTree* asg   = gtNewAssignNode(slot, indir);
2079         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2080         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2081         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2082
2083         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2084     }
2085
2086     assert(pRuntimeLookup->indirections != 0);
2087
2088     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2089
2090     // Extract the handle
2091     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2092     handle->gtFlags |= GTF_IND_NONFAULTING;
2093
2094     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2095                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2096
2097     // Call to helper
2098     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2099
2100     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2101     GenTreePtr      helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2102
2103     // Check for null and possibly call helper
2104     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2105     relop->gtFlags |= GTF_RELOP_QMARK;
2106
2107     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2108                                                          gtNewNothingNode(), // do nothing if nonnull
2109                                                          helperCall);
2110
2111     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2112
2113     unsigned tmp;
2114     if (handleCopy->IsLocal())
2115     {
2116         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2117     }
2118     else
2119     {
2120         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2121     }
2122
2123     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2124     return gtNewLclvNode(tmp, TYP_I_IMPL);
2125 }
2126
2127 /******************************************************************************
2128  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2129  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2130  *     else, grab a new temp.
2131  *  For structs (which can be pushed on the stack using obj, etc),
2132  *  special handling is needed
2133  */
2134
2135 struct RecursiveGuard
2136 {
2137 public:
2138     RecursiveGuard()
2139     {
2140         m_pAddress = nullptr;
2141     }
2142
2143     ~RecursiveGuard()
2144     {
2145         if (m_pAddress)
2146         {
2147             *m_pAddress = false;
2148         }
2149     }
2150
2151     void Init(bool* pAddress, bool bInitialize)
2152     {
2153         assert(pAddress && *pAddress == false && "Recursive guard violation");
2154         m_pAddress = pAddress;
2155
2156         if (bInitialize)
2157         {
2158             *m_pAddress = true;
2159         }
2160     }
2161
2162 protected:
2163     bool* m_pAddress;
2164 };
2165
2166 bool Compiler::impSpillStackEntry(unsigned level,
2167                                   unsigned tnum
2168 #ifdef DEBUG
2169                                   ,
2170                                   bool        bAssertOnRecursion,
2171                                   const char* reason
2172 #endif
2173                                   )
2174 {
2175
2176 #ifdef DEBUG
2177     RecursiveGuard guard;
2178     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2179 #endif
2180
2181     GenTreePtr tree = verCurrentState.esStack[level].val;
2182
2183     /* Allocate a temp if we haven't been asked to use a particular one */
2184
2185     if (tiVerificationNeeded)
2186     {
2187         // Ignore bad temp requests (they will happen with bad code and will be
2188         // catched when importing the destblock)
2189         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2190         {
2191             return false;
2192         }
2193     }
2194     else
2195     {
2196         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2197         {
2198             return false;
2199         }
2200     }
2201
2202     bool isNewTemp = false;
2203
2204     if (tnum == BAD_VAR_NUM)
2205     {
2206         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2207         isNewTemp = true;
2208     }
2209     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2210     {
2211         // if verification is needed and tnum's type is incompatible with
2212         // type on that stack, we grab a new temp. This is safe since
2213         // we will throw a verification exception in the dest block.
2214
2215         var_types valTyp = tree->TypeGet();
2216         var_types dstTyp = lvaTable[tnum].TypeGet();
2217
2218         // if the two types are different, we return. This will only happen with bad code and will
2219         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2220         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2221             !(
2222 #ifndef _TARGET_64BIT_
2223                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2224 #endif // !_TARGET_64BIT_
2225                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2226         {
2227             if (verNeedsVerification())
2228             {
2229                 return false;
2230             }
2231         }
2232     }
2233
2234     /* Assign the spilled entry to the temp */
2235     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2236
2237     // If temp is newly introduced and a ref type, grab what type info we can.
2238     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2239     {
2240         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2241         lvaSetClass(tnum, tree, stkHnd);
2242     }
2243
2244     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2245     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2246     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2247     verCurrentState.esStack[level].val = temp;
2248
2249     return true;
2250 }
2251
2252 /*****************************************************************************
2253  *
2254  *  Ensure that the stack has only spilled values
2255  */
2256
2257 void Compiler::impSpillStackEnsure(bool spillLeaves)
2258 {
2259     assert(!spillLeaves || opts.compDbgCode);
2260
2261     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2262     {
2263         GenTreePtr tree = verCurrentState.esStack[level].val;
2264
2265         if (!spillLeaves && tree->OperIsLeaf())
2266         {
2267             continue;
2268         }
2269
2270         // Temps introduced by the importer itself don't need to be spilled
2271
2272         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2273
2274         if (isTempLcl)
2275         {
2276             continue;
2277         }
2278
2279         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2280     }
2281 }
2282
2283 void Compiler::impSpillEvalStack()
2284 {
2285     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2286     {
2287         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2288     }
2289 }
2290
2291 /*****************************************************************************
2292  *
2293  *  If the stack contains any trees with side effects in them, assign those
2294  *  trees to temps and append the assignments to the statement list.
2295  *  On return the stack is guaranteed to be empty.
2296  */
2297
2298 inline void Compiler::impEvalSideEffects()
2299 {
2300     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2301     verCurrentState.esStackDepth = 0;
2302 }
2303
2304 /*****************************************************************************
2305  *
2306  *  If the stack contains any trees with side effects in them, assign those
2307  *  trees to temps and replace them on the stack with refs to their temps.
2308  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2309  */
2310
2311 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2312 {
2313     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2314
2315     /* Before we make any appends to the tree list we must spill the
2316      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2317
2318     impSpillSpecialSideEff();
2319
2320     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2321     {
2322         chkLevel = verCurrentState.esStackDepth;
2323     }
2324
2325     assert(chkLevel <= verCurrentState.esStackDepth);
2326
2327     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2328
2329     for (unsigned i = 0; i < chkLevel; i++)
2330     {
2331         GenTreePtr tree = verCurrentState.esStack[i].val;
2332
2333         GenTreePtr lclVarTree;
2334
2335         if ((tree->gtFlags & spillFlags) != 0 ||
2336             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2337              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2338              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2339                                            // lvAddrTaken flag.
2340         {
2341             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2342         }
2343     }
2344 }
2345
2346 /*****************************************************************************
2347  *
2348  *  If the stack contains any trees with special side effects in them, assign
2349  *  those trees to temps and replace them on the stack with refs to their temps.
2350  */
2351
2352 inline void Compiler::impSpillSpecialSideEff()
2353 {
2354     // Only exception objects need to be carefully handled
2355
2356     if (!compCurBB->bbCatchTyp)
2357     {
2358         return;
2359     }
2360
2361     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2362     {
2363         GenTreePtr tree = verCurrentState.esStack[level].val;
2364         // Make sure if we have an exception object in the sub tree we spill ourselves.
2365         if (gtHasCatchArg(tree))
2366         {
2367             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2368         }
2369     }
2370 }
2371
2372 /*****************************************************************************
2373  *
2374  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2375  */
2376
2377 void Compiler::impSpillValueClasses()
2378 {
2379     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2380     {
2381         GenTreePtr tree = verCurrentState.esStack[level].val;
2382
2383         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2384         {
2385             // Tree walk was aborted, which means that we found a
2386             // value class on the stack.  Need to spill that
2387             // stack entry.
2388
2389             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2390         }
2391     }
2392 }
2393
2394 /*****************************************************************************
2395  *
2396  *  Callback that checks if a tree node is TYP_STRUCT
2397  */
2398
2399 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2400 {
2401     fgWalkResult walkResult = WALK_CONTINUE;
2402
2403     if ((*pTree)->gtType == TYP_STRUCT)
2404     {
2405         // Abort the walk and indicate that we found a value class
2406
2407         walkResult = WALK_ABORT;
2408     }
2409
2410     return walkResult;
2411 }
2412
2413 /*****************************************************************************
2414  *
2415  *  If the stack contains any trees with references to local #lclNum, assign
2416  *  those trees to temps and replace their place on the stack with refs to
2417  *  their temps.
2418  */
2419
2420 void Compiler::impSpillLclRefs(ssize_t lclNum)
2421 {
2422     /* Before we make any appends to the tree list we must spill the
2423      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2424
2425     impSpillSpecialSideEff();
2426
2427     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2428     {
2429         GenTreePtr tree = verCurrentState.esStack[level].val;
2430
2431         /* If the tree may throw an exception, and the block has a handler,
2432            then we need to spill assignments to the local if the local is
2433            live on entry to the handler.
2434            Just spill 'em all without considering the liveness */
2435
2436         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2437
2438         /* Skip the tree if it doesn't have an affected reference,
2439            unless xcptnCaught */
2440
2441         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2442         {
2443             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2444         }
2445     }
2446 }
2447
2448 /*****************************************************************************
2449  *
2450  *  Push catch arg onto the stack.
2451  *  If there are jumps to the beginning of the handler, insert basic block
2452  *  and spill catch arg to a temp. Update the handler block if necessary.
2453  *
2454  *  Returns the basic block of the actual handler.
2455  */
2456
2457 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2458 {
2459     // Do not inject the basic block twice on reimport. This should be
2460     // hit only under JIT stress. See if the block is the one we injected.
2461     // Note that EH canonicalization can inject internal blocks here. We might
2462     // be able to re-use such a block (but we don't, right now).
2463     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2464         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2465     {
2466         GenTreePtr tree = hndBlk->bbTreeList;
2467
2468         if (tree != nullptr && tree->gtOper == GT_STMT)
2469         {
2470             tree = tree->gtStmt.gtStmtExpr;
2471             assert(tree != nullptr);
2472
2473             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2474                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2475             {
2476                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2477
2478                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2479
2480                 return hndBlk->bbNext;
2481             }
2482         }
2483
2484         // If we get here, it must have been some other kind of internal block. It's possible that
2485         // someone prepended something to our injected block, but that's unlikely.
2486     }
2487
2488     /* Push the exception address value on the stack */
2489     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2490
2491     /* Mark the node as having a side-effect - i.e. cannot be
2492      * moved around since it is tied to a fixed location (EAX) */
2493     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2494
2495 #if defined(JIT32_GCENCODER)
2496     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2497 #else
2498     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2499 #endif // defined(JIT32_GCENCODER)
2500
2501     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2502     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2503     {
2504         if (hndBlk->bbRefs == 1)
2505         {
2506             hndBlk->bbRefs++;
2507         }
2508
2509         /* Create extra basic block for the spill */
2510         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2511         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2512         newBlk->setBBWeight(hndBlk->bbWeight);
2513         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2514
2515         /* Account for the new link we are about to create */
2516         hndBlk->bbRefs++;
2517
2518         /* Spill into a temp */
2519         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2520         lvaTable[tempNum].lvType = TYP_REF;
2521         arg                      = gtNewTempAssign(tempNum, arg);
2522
2523         hndBlk->bbStkTempsIn = tempNum;
2524
2525         /* Report the debug info. impImportBlockCode won't treat
2526          * the actual handler as exception block and thus won't do it for us. */
2527         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2528         {
2529             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2530             arg            = gtNewStmt(arg, impCurStmtOffs);
2531         }
2532
2533         fgInsertStmtAtEnd(newBlk, arg);
2534
2535         arg = gtNewLclvNode(tempNum, TYP_REF);
2536     }
2537
2538     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2539
2540     return hndBlk;
2541 }
2542
2543 /*****************************************************************************
2544  *
2545  *  Given a tree, clone it. *pClone is set to the cloned tree.
2546  *  Returns the original tree if the cloning was easy,
2547  *   else returns the temp to which the tree had to be spilled to.
2548  *  If the tree has side-effects, it will be spilled to a temp.
2549  */
2550
2551 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2552                                   GenTreePtr*          pClone,
2553                                   CORINFO_CLASS_HANDLE structHnd,
2554                                   unsigned             curLevel,
2555                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2556 {
2557     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2558     {
2559         GenTreePtr clone = gtClone(tree, true);
2560
2561         if (clone)
2562         {
2563             *pClone = clone;
2564             return tree;
2565         }
2566     }
2567
2568     /* Store the operand in a temp and return the temp */
2569
2570     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2571
2572     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2573     // return a struct type. It also may modify the struct type to a more
2574     // specialized type (e.g. a SIMD type).  So we will get the type from
2575     // the lclVar AFTER calling impAssignTempGen().
2576
2577     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2578     var_types type = genActualType(lvaTable[temp].TypeGet());
2579
2580     *pClone = gtNewLclvNode(temp, type);
2581     return gtNewLclvNode(temp, type);
2582 }
2583
2584 /*****************************************************************************
2585  * Remember the IL offset (including stack-empty info) for the trees we will
2586  * generate now.
2587  */
2588
2589 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2590 {
2591     if (compIsForInlining())
2592     {
2593         GenTreePtr callStmt = impInlineInfo->iciStmt;
2594         assert(callStmt->gtOper == GT_STMT);
2595         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2596     }
2597     else
2598     {
2599         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2600         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2601         impCurStmtOffs    = offs | stkBit;
2602     }
2603 }
2604
2605 /*****************************************************************************
2606  * Returns current IL offset with stack-empty and call-instruction info incorporated
2607  */
2608 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2609 {
2610     if (compIsForInlining())
2611     {
2612         return BAD_IL_OFFSET;
2613     }
2614     else
2615     {
2616         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2617         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2618         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2619         return offs | stkBit | callInstructionBit;
2620     }
2621 }
2622
2623 //------------------------------------------------------------------------
2624 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2625 //
2626 // Arguments:
2627 //    prevOpcode - last importer opcode
2628 //
2629 // Return Value:
2630 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2631 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2632 {
2633     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2634     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2635     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2636 }
2637
2638 /*****************************************************************************
2639  *
2640  *  Remember the instr offset for the statements
2641  *
2642  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2643  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2644  *  as some of the trees corresponding to code up to impCurOpcOffs might
2645  *  still be sitting on the stack.
2646  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2647  *  This should be called when an opcode finally/explicitly causes
2648  *  impAppendTree(tree) to be called (as opposed to being called because of
2649  *  a spill caused by the opcode)
2650  */
2651
2652 #ifdef DEBUG
2653
2654 void Compiler::impNoteLastILoffs()
2655 {
2656     if (impLastILoffsStmt == nullptr)
2657     {
2658         // We should have added a statement for the current basic block
2659         // Is this assert correct ?
2660
2661         assert(impTreeLast);
2662         assert(impTreeLast->gtOper == GT_STMT);
2663
2664         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2665     }
2666     else
2667     {
2668         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2669         impLastILoffsStmt                          = nullptr;
2670     }
2671 }
2672
2673 #endif // DEBUG
2674
2675 /*****************************************************************************
2676  * We don't create any GenTree (excluding spills) for a branch.
2677  * For debugging info, we need a placeholder so that we can note
2678  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2679  */
2680
2681 void Compiler::impNoteBranchOffs()
2682 {
2683     if (opts.compDbgCode)
2684     {
2685         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2686     }
2687 }
2688
2689 /*****************************************************************************
2690  * Locate the next stmt boundary for which we need to record info.
2691  * We will have to spill the stack at such boundaries if it is not
2692  * already empty.
2693  * Returns the next stmt boundary (after the start of the block)
2694  */
2695
2696 unsigned Compiler::impInitBlockLineInfo()
2697 {
2698     /* Assume the block does not correspond with any IL offset. This prevents
2699        us from reporting extra offsets. Extra mappings can cause confusing
2700        stepping, especially if the extra mapping is a jump-target, and the
2701        debugger does not ignore extra mappings, but instead rewinds to the
2702        nearest known offset */
2703
2704     impCurStmtOffsSet(BAD_IL_OFFSET);
2705
2706     if (compIsForInlining())
2707     {
2708         return ~0;
2709     }
2710
2711     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2712
2713     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2714     {
2715         impCurStmtOffsSet(blockOffs);
2716     }
2717
2718     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2719     {
2720         impCurStmtOffsSet(blockOffs);
2721     }
2722
2723     /* Always report IL offset 0 or some tests get confused.
2724        Probably a good idea anyways */
2725
2726     if (blockOffs == 0)
2727     {
2728         impCurStmtOffsSet(blockOffs);
2729     }
2730
2731     if (!info.compStmtOffsetsCount)
2732     {
2733         return ~0;
2734     }
2735
2736     /* Find the lowest explicit stmt boundary within the block */
2737
2738     /* Start looking at an entry that is based on our instr offset */
2739
2740     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2741
2742     if (index >= info.compStmtOffsetsCount)
2743     {
2744         index = info.compStmtOffsetsCount - 1;
2745     }
2746
2747     /* If we've guessed too far, back up */
2748
2749     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2750     {
2751         index--;
2752     }
2753
2754     /* If we guessed short, advance ahead */
2755
2756     while (info.compStmtOffsets[index] < blockOffs)
2757     {
2758         index++;
2759
2760         if (index == info.compStmtOffsetsCount)
2761         {
2762             return info.compStmtOffsetsCount;
2763         }
2764     }
2765
2766     assert(index < info.compStmtOffsetsCount);
2767
2768     if (info.compStmtOffsets[index] == blockOffs)
2769     {
2770         /* There is an explicit boundary for the start of this basic block.
2771            So we will start with bbCodeOffs. Else we will wait until we
2772            get to the next explicit boundary */
2773
2774         impCurStmtOffsSet(blockOffs);
2775
2776         index++;
2777     }
2778
2779     return index;
2780 }
2781
2782 /*****************************************************************************/
2783
2784 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2785 {
2786     switch (opcode)
2787     {
2788         case CEE_CALL:
2789         case CEE_CALLI:
2790         case CEE_CALLVIRT:
2791             return true;
2792
2793         default:
2794             return false;
2795     }
2796 }
2797
2798 /*****************************************************************************/
2799
2800 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2801 {
2802     switch (opcode)
2803     {
2804         case CEE_CALL:
2805         case CEE_CALLI:
2806         case CEE_CALLVIRT:
2807         case CEE_JMP:
2808         case CEE_NEWOBJ:
2809         case CEE_NEWARR:
2810             return true;
2811
2812         default:
2813             return false;
2814     }
2815 }
2816
2817 /*****************************************************************************/
2818
2819 // One might think it is worth caching these values, but results indicate
2820 // that it isn't.
2821 // In addition, caching them causes SuperPMI to be unable to completely
2822 // encapsulate an individual method context.
2823 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2824 {
2825     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2826     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2827     return refAnyClass;
2828 }
2829
2830 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2831 {
2832     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2833     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2834     return typeHandleClass;
2835 }
2836
2837 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2838 {
2839     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2840     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2841     return argIteratorClass;
2842 }
2843
2844 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2845 {
2846     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2847     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2848     return stringClass;
2849 }
2850
2851 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2852 {
2853     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2854     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2855     return objectClass;
2856 }
2857
2858 /*****************************************************************************
2859  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2860  *  set its type to TYP_BYREF when we create it. We know if it can be
2861  *  changed to TYP_I_IMPL only at the point where we use it
2862  */
2863
2864 /* static */
2865 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2866 {
2867     if (tree1->IsVarAddr())
2868     {
2869         tree1->gtType = TYP_I_IMPL;
2870     }
2871
2872     if (tree2 && tree2->IsVarAddr())
2873     {
2874         tree2->gtType = TYP_I_IMPL;
2875     }
2876 }
2877
2878 /*****************************************************************************
2879  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2880  *  to make that an explicit cast in our trees, so any implicit casts that
2881  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2882  *  turned into explicit casts here.
2883  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2884  */
2885
2886 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2887 {
2888     var_types currType   = genActualType(tree->gtType);
2889     var_types wantedType = genActualType(dstTyp);
2890
2891     if (wantedType != currType)
2892     {
2893         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2894         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2895         {
2896             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2897             {
2898                 tree->gtType = TYP_I_IMPL;
2899             }
2900         }
2901 #ifdef _TARGET_64BIT_
2902         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2903         {
2904             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2905             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2906         }
2907         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2908         {
2909             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2910             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2911         }
2912 #endif // _TARGET_64BIT_
2913     }
2914
2915     return tree;
2916 }
2917
2918 /*****************************************************************************
2919  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2920  *  but we want to make that an explicit cast in our trees, so any implicit casts
2921  *  that exist in the IL are turned into explicit casts here.
2922  */
2923
2924 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2925 {
2926 #ifndef LEGACY_BACKEND
2927     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2928     {
2929         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2930     }
2931 #endif // !LEGACY_BACKEND
2932
2933     return tree;
2934 }
2935
2936 //------------------------------------------------------------------------
2937 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2938 //    with a GT_COPYBLK node.
2939 //
2940 // Arguments:
2941 //    sig - The InitializeArray signature.
2942 //
2943 // Return Value:
2944 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2945 //    nullptr otherwise.
2946 //
2947 // Notes:
2948 //    The function recognizes the following IL pattern:
2949 //      ldc <length> or a list of ldc <lower bound>/<length>
2950 //      newarr or newobj
2951 //      dup
2952 //      ldtoken <field handle>
2953 //      call InitializeArray
2954 //    The lower bounds need not be constant except when the array rank is 1.
2955 //    The function recognizes all kinds of arrays thus enabling a small runtime
2956 //    such as CoreRT to skip providing an implementation for InitializeArray.
2957
2958 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2959 {
2960     assert(sig->numArgs == 2);
2961
2962     GenTreePtr fieldTokenNode = impStackTop(0).val;
2963     GenTreePtr arrayLocalNode = impStackTop(1).val;
2964
2965     //
2966     // Verify that the field token is known and valid.  Note that It's also
2967     // possible for the token to come from reflection, in which case we cannot do
2968     // the optimization and must therefore revert to calling the helper.  You can
2969     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2970     //
2971
2972     // Check to see if the ldtoken helper call is what we see here.
2973     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2974         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2975     {
2976         return nullptr;
2977     }
2978
2979     // Strip helper call away
2980     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2981
2982     if (fieldTokenNode->gtOper == GT_IND)
2983     {
2984         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2985     }
2986
2987     // Check for constant
2988     if (fieldTokenNode->gtOper != GT_CNS_INT)
2989     {
2990         return nullptr;
2991     }
2992
2993     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2994     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2995     {
2996         return nullptr;
2997     }
2998
2999     //
3000     // We need to get the number of elements in the array and the size of each element.
3001     // We verify that the newarr statement is exactly what we expect it to be.
3002     // If it's not then we just return NULL and we don't optimize this call
3003     //
3004
3005     //
3006     // It is possible the we don't have any statements in the block yet
3007     //
3008     if (impTreeLast->gtOper != GT_STMT)
3009     {
3010         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3011         return nullptr;
3012     }
3013
3014     //
3015     // We start by looking at the last statement, making sure it's an assignment, and
3016     // that the target of the assignment is the array passed to InitializeArray.
3017     //
3018     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3019     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3020         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3021         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3022     {
3023         return nullptr;
3024     }
3025
3026     //
3027     // Make sure that the object being assigned is a helper call.
3028     //
3029
3030     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
3031     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3032     {
3033         return nullptr;
3034     }
3035
3036     //
3037     // Verify that it is one of the new array helpers.
3038     //
3039
3040     bool isMDArray = false;
3041
3042     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3043         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3044         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3045         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3046 #ifdef FEATURE_READYTORUN_COMPILER
3047         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3048         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3049 #endif
3050             )
3051     {
3052         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3053         {
3054             return nullptr;
3055         }
3056
3057         isMDArray = true;
3058     }
3059
3060     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3061
3062     //
3063     // Make sure we found a compile time handle to the array
3064     //
3065
3066     if (!arrayClsHnd)
3067     {
3068         return nullptr;
3069     }
3070
3071     unsigned rank = 0;
3072     S_UINT32 numElements;
3073
3074     if (isMDArray)
3075     {
3076         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3077
3078         if (rank == 0)
3079         {
3080             return nullptr;
3081         }
3082
3083         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3084         assert(tokenArg != nullptr);
3085         GenTreeArgList* numArgsArg = tokenArg->Rest();
3086         assert(numArgsArg != nullptr);
3087         GenTreeArgList* argsArg = numArgsArg->Rest();
3088         assert(argsArg != nullptr);
3089
3090         //
3091         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3092         // so at least one length must be present and the rank can't exceed 32 so there can
3093         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3094         //
3095
3096         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3097             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3098         {
3099             return nullptr;
3100         }
3101
3102         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3103         bool     lowerBoundsSpecified;
3104
3105         if (numArgs == rank * 2)
3106         {
3107             lowerBoundsSpecified = true;
3108         }
3109         else if (numArgs == rank)
3110         {
3111             lowerBoundsSpecified = false;
3112
3113             //
3114             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3115             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3116             // we get a SDArray as well, see the for loop below.
3117             //
3118
3119             if (rank == 1)
3120             {
3121                 isMDArray = false;
3122             }
3123         }
3124         else
3125         {
3126             return nullptr;
3127         }
3128
3129         //
3130         // The rank is known to be at least 1 so we can start with numElements being 1
3131         // to avoid the need to special case the first dimension.
3132         //
3133
3134         numElements = S_UINT32(1);
3135
3136         struct Match
3137         {
3138             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3139             {
3140                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3141                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3142             }
3143
3144             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3145             {
3146                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3147                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3148                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3149             }
3150
3151             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3152             {
3153                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3154                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3155             }
3156
3157             static bool IsComma(GenTree* tree)
3158             {
3159                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3160             }
3161         };
3162
3163         unsigned argIndex = 0;
3164         GenTree* comma;
3165
3166         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3167         {
3168             if (lowerBoundsSpecified)
3169             {
3170                 //
3171                 // In general lower bounds can be ignored because they're not needed to
3172                 // calculate the total number of elements. But for single dimensional arrays
3173                 // we need to know if the lower bound is 0 because in this case the runtime
3174                 // creates a SDArray and this affects the way the array data offset is calculated.
3175                 //
3176
3177                 if (rank == 1)
3178                 {
3179                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3180                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3181                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3182
3183                     if (lowerBoundNode->IsIntegralConst(0))
3184                     {
3185                         isMDArray = false;
3186                     }
3187                 }
3188
3189                 comma = comma->gtGetOp2();
3190                 argIndex++;
3191             }
3192
3193             GenTree* lengthNodeAssign = comma->gtGetOp1();
3194             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3195             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3196
3197             if (!lengthNode->IsCnsIntOrI())
3198             {
3199                 return nullptr;
3200             }
3201
3202             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3203             argIndex++;
3204         }
3205
3206         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3207
3208         if (argIndex != numArgs)
3209         {
3210             return nullptr;
3211         }
3212     }
3213     else
3214     {
3215         //
3216         // Make sure there are exactly two arguments:  the array class and
3217         // the number of elements.
3218         //
3219
3220         GenTreePtr arrayLengthNode;
3221
3222         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3223 #ifdef FEATURE_READYTORUN_COMPILER
3224         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3225         {
3226             // Array length is 1st argument for readytorun helper
3227             arrayLengthNode = args->Current();
3228         }
3229         else
3230 #endif
3231         {
3232             // Array length is 2nd argument for regular helper
3233             arrayLengthNode = args->Rest()->Current();
3234         }
3235
3236         //
3237         // Make sure that the number of elements look valid.
3238         //
3239         if (arrayLengthNode->gtOper != GT_CNS_INT)
3240         {
3241             return nullptr;
3242         }
3243
3244         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3245
3246         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3247         {
3248             return nullptr;
3249         }
3250     }
3251
3252     CORINFO_CLASS_HANDLE elemClsHnd;
3253     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3254
3255     //
3256     // Note that genTypeSize will return zero for non primitive types, which is exactly
3257     // what we want (size will then be 0, and we will catch this in the conditional below).
3258     // Note that we don't expect this to fail for valid binaries, so we assert in the
3259     // non-verification case (the verification case should not assert but rather correctly
3260     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3261     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3262     // why.
3263     //
3264
3265     S_UINT32 elemSize(genTypeSize(elementType));
3266     S_UINT32 size = elemSize * S_UINT32(numElements);
3267
3268     if (size.IsOverflow())
3269     {
3270         return nullptr;
3271     }
3272
3273     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3274     {
3275         assert(verNeedsVerification());
3276         return nullptr;
3277     }
3278
3279     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3280     if (!initData)
3281     {
3282         return nullptr;
3283     }
3284
3285     //
3286     // At this point we are ready to commit to implementing the InitializeArray
3287     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3288     // return the struct assignment node.
3289     //
3290
3291     impPopStack();
3292     impPopStack();
3293
3294     const unsigned blkSize = size.Value();
3295     unsigned       dataOffset;
3296
3297     if (isMDArray)
3298     {
3299         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3300     }
3301     else
3302     {
3303         dataOffset = eeGetArrayDataOffset(elementType);
3304     }
3305
3306     GenTreePtr dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3307     GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3308     GenTreePtr src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3309
3310     return gtNewBlkOpNode(blk,     // dst
3311                           src,     // src
3312                           blkSize, // size
3313                           false,   // volatil
3314                           true);   // copyBlock
3315 }
3316
3317 //------------------------------------------------------------------------
3318 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3319 //
3320 // Arguments:
3321 //    newobjThis - for constructor calls, the tree for the newly allocated object
3322 //    clsHnd - handle for the intrinsic method's class
3323 //    method - handle for the intrinsic method
3324 //    sig    - signature of the intrinsic method
3325 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3326 //    memberRef - the token for the intrinsic method
3327 //    readonlyCall - true if call has a readonly prefix
3328 //    tailCall - true if call is in tail position
3329 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3330 //       if call is not constrained
3331 //    constraintCallThisTransform -- this transform to apply for a constrained call
3332 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3333 //       for "traditional" jit intrinsics
3334 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3335 //       that is amenable to special downstream optimization opportunities
3336 //
3337 // Returns:
3338 //    IR tree to use in place of the call, or nullptr if the jit should treat
3339 //    the intrinsic call like a normal call.
3340 //
3341 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3342 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3343 //
3344 //    isSpecial set true if the expansion is subject to special
3345 //    optimizations later in the jit processing
3346 //
3347 // Notes:
3348 //    On success the IR tree may be a call to a different method or an inline
3349 //    sequence. If it is a call, then the intrinsic processing here is responsible
3350 //    for handling all the special cases, as upon return to impImportCall
3351 //    expanded intrinsics bypass most of the normal call processing.
3352 //
3353 //    Intrinsics are generally not recognized in minopts and debug codegen.
3354 //
3355 //    However, certain traditional intrinsics are identifed as "must expand"
3356 //    if there is no fallback implmentation to invoke; these must be handled
3357 //    in all codegen modes.
3358 //
3359 //    New style intrinsics (where the fallback implementation is in IL) are
3360 //    identified as "must expand" if they are invoked from within their
3361 //    own method bodies.
3362 //
3363
3364 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3365                                 CORINFO_CLASS_HANDLE    clsHnd,
3366                                 CORINFO_METHOD_HANDLE   method,
3367                                 CORINFO_SIG_INFO*       sig,
3368                                 unsigned                methodFlags,
3369                                 int                     memberRef,
3370                                 bool                    readonlyCall,
3371                                 bool                    tailCall,
3372                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3373                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3374                                 CorInfoIntrinsics*      pIntrinsicID,
3375                                 bool*                   isSpecialIntrinsic)
3376 {
3377     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3378
3379     bool              mustExpand  = false;
3380     bool              isSpecial   = false;
3381     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3382     NamedIntrinsic    ni          = NI_Illegal;
3383
3384     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3385     {
3386         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3387     }
3388
3389     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3390     {
3391         // The recursive calls to Jit intrinsics are must-expand by convention.
3392         mustExpand = mustExpand || gtIsRecursiveCall(method);
3393
3394         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3395         {
3396             ni = lookupNamedIntrinsic(method);
3397
3398 #if FEATURE_HW_INTRINSICS
3399 #ifdef _TARGET_XARCH_
3400             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3401             {
3402                 return impX86HWIntrinsic(ni, method, sig, mustExpand);
3403             }
3404 #endif // _TARGET_XARCH_
3405 #endif // FEATURE_HW_INTRINSICS
3406         }
3407     }
3408
3409     *pIntrinsicID = intrinsicID;
3410
3411 #ifndef _TARGET_ARM_
3412     genTreeOps interlockedOperator;
3413 #endif
3414
3415     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3416     {
3417         // must be done regardless of DbgCode and MinOpts
3418         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3419     }
3420 #ifdef _TARGET_64BIT_
3421     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3422     {
3423         // must be done regardless of DbgCode and MinOpts
3424         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3425     }
3426 #else
3427     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3428 #endif
3429
3430     GenTreePtr retNode = nullptr;
3431
3432     // Under debug and minopts, only expand what is required.
3433     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3434     {
3435         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3436         return retNode;
3437     }
3438
3439     var_types callType = JITtype2varType(sig->retType);
3440
3441     /* First do the intrinsics which are always smaller than a call */
3442
3443     switch (intrinsicID)
3444     {
3445         GenTreePtr op1, op2;
3446
3447         case CORINFO_INTRINSIC_Sin:
3448         case CORINFO_INTRINSIC_Cbrt:
3449         case CORINFO_INTRINSIC_Sqrt:
3450         case CORINFO_INTRINSIC_Abs:
3451         case CORINFO_INTRINSIC_Cos:
3452         case CORINFO_INTRINSIC_Round:
3453         case CORINFO_INTRINSIC_Cosh:
3454         case CORINFO_INTRINSIC_Sinh:
3455         case CORINFO_INTRINSIC_Tan:
3456         case CORINFO_INTRINSIC_Tanh:
3457         case CORINFO_INTRINSIC_Asin:
3458         case CORINFO_INTRINSIC_Asinh:
3459         case CORINFO_INTRINSIC_Acos:
3460         case CORINFO_INTRINSIC_Acosh:
3461         case CORINFO_INTRINSIC_Atan:
3462         case CORINFO_INTRINSIC_Atan2:
3463         case CORINFO_INTRINSIC_Atanh:
3464         case CORINFO_INTRINSIC_Log10:
3465         case CORINFO_INTRINSIC_Pow:
3466         case CORINFO_INTRINSIC_Exp:
3467         case CORINFO_INTRINSIC_Ceiling:
3468         case CORINFO_INTRINSIC_Floor:
3469             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3470             break;
3471
3472 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3473         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3474         case CORINFO_INTRINSIC_InterlockedAdd32:
3475             interlockedOperator = GT_LOCKADD;
3476             goto InterlockedBinOpCommon;
3477         case CORINFO_INTRINSIC_InterlockedXAdd32:
3478             interlockedOperator = GT_XADD;
3479             goto InterlockedBinOpCommon;
3480         case CORINFO_INTRINSIC_InterlockedXchg32:
3481             interlockedOperator = GT_XCHG;
3482             goto InterlockedBinOpCommon;
3483
3484 #ifdef _TARGET_64BIT_
3485         case CORINFO_INTRINSIC_InterlockedAdd64:
3486             interlockedOperator = GT_LOCKADD;
3487             goto InterlockedBinOpCommon;
3488         case CORINFO_INTRINSIC_InterlockedXAdd64:
3489             interlockedOperator = GT_XADD;
3490             goto InterlockedBinOpCommon;
3491         case CORINFO_INTRINSIC_InterlockedXchg64:
3492             interlockedOperator = GT_XCHG;
3493             goto InterlockedBinOpCommon;
3494 #endif // _TARGET_AMD64_
3495
3496         InterlockedBinOpCommon:
3497             assert(callType != TYP_STRUCT);
3498             assert(sig->numArgs == 2);
3499
3500             op2 = impPopStack().val;
3501             op1 = impPopStack().val;
3502
3503             // This creates:
3504             //   val
3505             // XAdd
3506             //   addr
3507             //     field (for example)
3508             //
3509             // In the case where the first argument is the address of a local, we might
3510             // want to make this *not* make the var address-taken -- but atomic instructions
3511             // on a local are probably pretty useless anyway, so we probably don't care.
3512
3513             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3514             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3515             retNode = op1;
3516             break;
3517 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3518
3519         case CORINFO_INTRINSIC_MemoryBarrier:
3520
3521             assert(sig->numArgs == 0);
3522
3523             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3524             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3525             retNode = op1;
3526             break;
3527
3528 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3529         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3530         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3531 #ifdef _TARGET_64BIT_
3532         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3533 #endif
3534         {
3535             assert(callType != TYP_STRUCT);
3536             assert(sig->numArgs == 3);
3537             GenTreePtr op3;
3538
3539             op3 = impPopStack().val; // comparand
3540             op2 = impPopStack().val; // value
3541             op1 = impPopStack().val; // location
3542
3543             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3544
3545             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3546             retNode = node;
3547             break;
3548         }
3549 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3550
3551         case CORINFO_INTRINSIC_StringLength:
3552             op1 = impPopStack().val;
3553             if (!opts.MinOpts() && !opts.compDbgCode)
3554             {
3555                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3556                 op1                   = arrLen;
3557             }
3558             else
3559             {
3560                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3561                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3562                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3563                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3564             }
3565
3566             // Getting the length of a null string should throw
3567             op1->gtFlags |= GTF_EXCEPT;
3568
3569             retNode = op1;
3570             break;
3571
3572         case CORINFO_INTRINSIC_StringGetChar:
3573             op2 = impPopStack().val;
3574             op1 = impPopStack().val;
3575             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3576             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3577             retNode = op1;
3578             break;
3579
3580         case CORINFO_INTRINSIC_InitializeArray:
3581             retNode = impInitializeArrayIntrinsic(sig);
3582             break;
3583
3584         case CORINFO_INTRINSIC_Array_Address:
3585         case CORINFO_INTRINSIC_Array_Get:
3586         case CORINFO_INTRINSIC_Array_Set:
3587             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3588             break;
3589
3590         case CORINFO_INTRINSIC_GetTypeFromHandle:
3591             op1 = impStackTop(0).val;
3592             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3593                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3594             {
3595                 op1 = impPopStack().val;
3596                 // Change call to return RuntimeType directly.
3597                 op1->gtType = TYP_REF;
3598                 retNode     = op1;
3599             }
3600             // Call the regular function.
3601             break;
3602
3603         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3604             op1 = impStackTop(0).val;
3605             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3606                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3607             {
3608                 // Old tree
3609                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3610                 //
3611                 // New tree
3612                 // TreeToGetNativeTypeHandle
3613
3614                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3615                 // to that helper.
3616
3617                 op1 = impPopStack().val;
3618
3619                 // Get native TypeHandle argument to old helper
3620                 op1 = op1->gtCall.gtCallArgs;
3621                 assert(op1->OperIsList());
3622                 assert(op1->gtOp.gtOp2 == nullptr);
3623                 op1     = op1->gtOp.gtOp1;
3624                 retNode = op1;
3625             }
3626             // Call the regular function.
3627             break;
3628
3629 #ifndef LEGACY_BACKEND
3630         case CORINFO_INTRINSIC_Object_GetType:
3631         {
3632             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3633             op1 = impStackTop(0).val;
3634
3635             // If we're calling GetType on a boxed value, just get the type directly.
3636             if (op1->IsBoxedValue())
3637             {
3638                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3639
3640                 // Try and clean up the box. Obtain the handle we
3641                 // were going to pass to the newobj.
3642                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3643
3644                 if (boxTypeHandle != nullptr)
3645                 {
3646                     // Note we don't need to play the TYP_STRUCT games here like
3647                     // do for LDTOKEN since the return value of this operator is Type,
3648                     // not RuntimeTypeHandle.
3649                     impPopStack();
3650                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3651                     GenTree*        runtimeType =
3652                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3653                     retNode = runtimeType;
3654                 }
3655             }
3656
3657             // If we have a constrained callvirt with a "box this" transform
3658             // we know we have a value class and hence an exact type.
3659             //
3660             // If so, instead of boxing and then extracting the type, just
3661             // construct the type directly.
3662             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3663                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3664             {
3665                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3666                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3667                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3668
3669                 if (isSafeToOptimize)
3670                 {
3671                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3672                     impPopStack();
3673                     GenTree* typeHandleOp =
3674                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3675                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3676                     GenTree*        runtimeType =
3677                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3678                     retNode = runtimeType;
3679                 }
3680             }
3681
3682 #ifdef DEBUG
3683             if (retNode != nullptr)
3684             {
3685                 JITDUMP("Optimized result for call to GetType is\n");
3686                 if (verbose)
3687                 {
3688                     gtDispTree(retNode);
3689                 }
3690             }
3691 #endif
3692
3693             // Else expand as an intrinsic, unless the call is constrained,
3694             // in which case we defer expansion to allow impImportCall do the
3695             // special constraint processing.
3696             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3697             {
3698                 JITDUMP("Expanding as special intrinsic\n");
3699                 impPopStack();
3700                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3701
3702                 // Set the CALL flag to indicate that the operator is implemented by a call.
3703                 // Set also the EXCEPTION flag because the native implementation of
3704                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3705                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3706                 retNode = op1;
3707                 // Might be further optimizable, so arrange to leave a mark behind
3708                 isSpecial = true;
3709             }
3710
3711             if (retNode == nullptr)
3712             {
3713                 JITDUMP("Leaving as normal call\n");
3714                 // Might be further optimizable, so arrange to leave a mark behind
3715                 isSpecial = true;
3716             }
3717
3718             break;
3719         }
3720
3721 #endif
3722         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3723         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3724         // substitution.  The parameter byref will be assigned into the newly allocated object.
3725         case CORINFO_INTRINSIC_ByReference_Ctor:
3726         {
3727             // Remove call to constructor and directly assign the byref passed
3728             // to the call to the first slot of the ByReference struct.
3729             op1                                    = impPopStack().val;
3730             GenTreePtr           thisptr           = newobjThis;
3731             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3732             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3733             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3734             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3735             assert(byReferenceStruct != nullptr);
3736             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3737             retNode = assign;
3738             break;
3739         }
3740         // Implement ptr value getter for ByReference struct.
3741         case CORINFO_INTRINSIC_ByReference_Value:
3742         {
3743             op1                         = impPopStack().val;
3744             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3745             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3746             retNode                     = field;
3747             break;
3748         }
3749         case CORINFO_INTRINSIC_Span_GetItem:
3750         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3751         {
3752             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3753             //
3754             // For Span<T>
3755             //   Comma
3756             //     BoundsCheck(index, s->_length)
3757             //     s->_pointer + index * sizeof(T)
3758             //
3759             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3760             //
3761             // Signature should show one class type parameter, which
3762             // we need to examine.
3763             assert(sig->sigInst.classInstCount == 1);
3764             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3765             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3766             assert(elemSize > 0);
3767
3768             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3769
3770             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3771                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3772
3773             GenTreePtr index          = impPopStack().val;
3774             GenTreePtr ptrToSpan      = impPopStack().val;
3775             GenTreePtr indexClone     = nullptr;
3776             GenTreePtr ptrToSpanClone = nullptr;
3777
3778 #if defined(DEBUG)
3779             if (verbose)
3780             {
3781                 printf("with ptr-to-span\n");
3782                 gtDispTree(ptrToSpan);
3783                 printf("and index\n");
3784                 gtDispTree(index);
3785             }
3786 #endif // defined(DEBUG)
3787
3788             // We need to use both index and ptr-to-span twice, so clone or spill.
3789             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3790                                  nullptr DEBUGARG("Span.get_Item index"));
3791             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3792                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3793
3794             // Bounds check
3795             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3796             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3797             GenTreePtr           length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3798             GenTreePtr           boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3799                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3800
3801             // Element access
3802             GenTreePtr           indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3803             GenTreePtr           sizeofNode  = gtNewIconNode(elemSize);
3804             GenTreePtr           mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3805             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3806             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3807             GenTreePtr           data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3808             GenTreePtr           result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3809
3810             // Prepare result
3811             var_types resultType = JITtype2varType(sig->retType);
3812             assert(resultType == result->TypeGet());
3813             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3814
3815             break;
3816         }
3817
3818         case CORINFO_INTRINSIC_GetRawHandle:
3819         {
3820             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3821             CORINFO_RESOLVED_TOKEN resolvedToken;
3822             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3823             resolvedToken.tokenScope   = info.compScopeHnd;
3824             resolvedToken.token        = memberRef;
3825             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3826
3827             CORINFO_GENERICHANDLE_RESULT embedInfo;
3828             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3829
3830             GenTreePtr rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3831                                                    embedInfo.compileTimeHandle);
3832             if (rawHandle == nullptr)
3833             {
3834                 return nullptr;
3835             }
3836
3837             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3838
3839             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3840             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3841
3842             GenTreePtr lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3843             GenTreePtr lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3844             var_types  resultType = JITtype2varType(sig->retType);
3845             retNode               = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3846
3847             break;
3848         }
3849
3850         case CORINFO_INTRINSIC_TypeEQ:
3851         case CORINFO_INTRINSIC_TypeNEQ:
3852         {
3853             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3854             op1              = impStackTop(1).val;
3855             op2              = impStackTop(0).val;
3856             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3857             if (optTree != nullptr)
3858             {
3859                 // Success, clean up the evaluation stack.
3860                 impPopStack();
3861                 impPopStack();
3862
3863                 // See if we can optimize even further, to a handle compare.
3864                 optTree = gtFoldTypeCompare(optTree);
3865
3866                 // See if we can now fold a handle compare to a constant.
3867                 optTree = gtFoldExpr(optTree);
3868
3869                 retNode = optTree;
3870             }
3871             else
3872             {
3873                 // Retry optimizing these later
3874                 isSpecial = true;
3875             }
3876             break;
3877         }
3878
3879         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3880         case CORINFO_INTRINSIC_GetManagedThreadId:
3881         {
3882             // Retry optimizing these during morph
3883             isSpecial = true;
3884             break;
3885         }
3886
3887         default:
3888             /* Unknown intrinsic */
3889             intrinsicID = CORINFO_INTRINSIC_Illegal;
3890             break;
3891     }
3892
3893     // Look for new-style jit intrinsics by name
3894     if (ni != NI_Illegal)
3895     {
3896         assert(retNode == nullptr);
3897         switch (ni)
3898         {
3899             case NI_System_Enum_HasFlag:
3900             {
3901                 GenTree* thisOp  = impStackTop(1).val;
3902                 GenTree* flagOp  = impStackTop(0).val;
3903                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3904
3905                 if (optTree != nullptr)
3906                 {
3907                     // Optimization successful. Pop the stack for real.
3908                     impPopStack();
3909                     impPopStack();
3910                     retNode = optTree;
3911                 }
3912                 else
3913                 {
3914                     // Retry optimizing this during morph.
3915                     isSpecial = true;
3916                 }
3917
3918                 break;
3919             }
3920
3921             case NI_MathF_Round:
3922             case NI_Math_Round:
3923             {
3924                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3925                 // to simplify the transition, we will just treat it as if it was still the
3926                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3927                 // everywhere else.
3928
3929                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3930                 break;
3931             }
3932
3933             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3934             {
3935                 // Flag for later handling during devirtualization.
3936                 isSpecial = true;
3937                 break;
3938             }
3939
3940             default:
3941                 break;
3942         }
3943     }
3944
3945     if (mustExpand)
3946     {
3947         if (retNode == nullptr)
3948         {
3949             NO_WAY("JIT must expand the intrinsic!");
3950         }
3951     }
3952
3953     // Optionally report if this intrinsic is special
3954     // (that is, potentially re-optimizable during morph).
3955     if (isSpecialIntrinsic != nullptr)
3956     {
3957         *isSpecialIntrinsic = isSpecial;
3958     }
3959
3960     return retNode;
3961 }
3962
3963 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3964                                     CORINFO_SIG_INFO*     sig,
3965                                     var_types             callType,
3966                                     CorInfoIntrinsics     intrinsicID,
3967                                     bool                  tailCall)
3968 {
3969     GenTree* op1;
3970     GenTree* op2;
3971
3972     assert(callType != TYP_STRUCT);
3973     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3974            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3975            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3976            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3977            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3980            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3981            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3982            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3983            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3984
3985     op1 = nullptr;
3986
3987 #if defined(LEGACY_BACKEND)
3988     if (IsTargetIntrinsic(intrinsicID))
3989 #elif !defined(_TARGET_X86_)
3990     // Intrinsics that are not implemented directly by target instructions will
3991     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3992     // don't do this optimization, because
3993     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3994     //  b) It will be non-trivial task or too late to re-materialize a surviving
3995     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3996     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3997 #else
3998     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3999     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4000     // code generation for certain EH constructs.
4001     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4002 #endif
4003     {
4004         switch (sig->numArgs)
4005         {
4006             case 1:
4007                 op1 = impPopStack().val;
4008
4009 #if FEATURE_X87_DOUBLES
4010
4011                 // X87 stack doesn't differentiate between float/double
4012                 // so it doesn't need a cast, but everybody else does
4013                 // Just double check it is at least a FP type
4014                 noway_assert(varTypeIsFloating(op1));
4015
4016 #else // FEATURE_X87_DOUBLES
4017
4018                 if (op1->TypeGet() != callType)
4019                 {
4020                     op1 = gtNewCastNode(callType, op1, callType);
4021                 }
4022
4023 #endif // FEATURE_X87_DOUBLES
4024
4025                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4026                 break;
4027
4028             case 2:
4029                 op2 = impPopStack().val;
4030                 op1 = impPopStack().val;
4031
4032 #if FEATURE_X87_DOUBLES
4033
4034                 // X87 stack doesn't differentiate between float/double
4035                 // so it doesn't need a cast, but everybody else does
4036                 // Just double check it is at least a FP type
4037                 noway_assert(varTypeIsFloating(op2));
4038                 noway_assert(varTypeIsFloating(op1));
4039
4040 #else // FEATURE_X87_DOUBLES
4041
4042                 if (op2->TypeGet() != callType)
4043                 {
4044                     op2 = gtNewCastNode(callType, op2, callType);
4045                 }
4046                 if (op1->TypeGet() != callType)
4047                 {
4048                     op1 = gtNewCastNode(callType, op1, callType);
4049                 }
4050
4051 #endif // FEATURE_X87_DOUBLES
4052
4053                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4054                 break;
4055
4056             default:
4057                 NO_WAY("Unsupported number of args for Math Instrinsic");
4058         }
4059
4060 #ifndef LEGACY_BACKEND
4061         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4062         {
4063             op1->gtFlags |= GTF_CALL;
4064         }
4065 #endif
4066     }
4067
4068     return op1;
4069 }
4070
4071 //------------------------------------------------------------------------
4072 // lookupNamedIntrinsic: map method to jit named intrinsic value
4073 //
4074 // Arguments:
4075 //    method -- method handle for method
4076 //
4077 // Return Value:
4078 //    Id for the named intrinsic, or Illegal if none.
4079 //
4080 // Notes:
4081 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4082 //    otherwise it is not a named jit intrinsic.
4083 //
4084
4085 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4086 {
4087     NamedIntrinsic result = NI_Illegal;
4088
4089     const char* className     = nullptr;
4090     const char* namespaceName = nullptr;
4091     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4092
4093     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4094     {
4095         return result;
4096     }
4097
4098     if (strcmp(namespaceName, "System") == 0)
4099     {
4100         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4101         {
4102             result = NI_System_Enum_HasFlag;
4103         }
4104         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4105         {
4106             result = NI_MathF_Round;
4107         }
4108         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4109         {
4110             result = NI_Math_Round;
4111         }
4112     }
4113     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4114     {
4115         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4116         {
4117             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4118         }
4119     }
4120
4121 #if FEATURE_HW_INTRINSICS && defined(_TARGET_XARCH_)
4122     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4123     {
4124         InstructionSet isa = lookupHWIntrinsicISA(className);
4125         result             = lookupHWIntrinsic(methodName, isa);
4126     }
4127 #endif // FEATURE_HW_INTRINSICS
4128     return result;
4129 }
4130
4131 /*****************************************************************************/
4132
4133 GenTreePtr Compiler::impArrayAccessIntrinsic(
4134     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4135 {
4136     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4137        the following, as it generates fatter code.
4138     */
4139
4140     if (compCodeOpt() == SMALL_CODE)
4141     {
4142         return nullptr;
4143     }
4144
4145     /* These intrinsics generate fatter (but faster) code and are only
4146        done if we don't need SMALL_CODE */
4147
4148     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4149
4150     // The rank 1 case is special because it has to handle two array formats
4151     // we will simply not do that case
4152     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4153     {
4154         return nullptr;
4155     }
4156
4157     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4158     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4159
4160     // For the ref case, we will only be able to inline if the types match
4161     // (verifier checks for this, we don't care for the nonverified case and the
4162     // type is final (so we don't need to do the cast)
4163     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4164     {
4165         // Get the call site signature
4166         CORINFO_SIG_INFO LocalSig;
4167         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4168         assert(LocalSig.hasThis());
4169
4170         CORINFO_CLASS_HANDLE actualElemClsHnd;
4171
4172         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4173         {
4174             // Fetch the last argument, the one that indicates the type we are setting.
4175             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4176             for (unsigned r = 0; r < rank; r++)
4177             {
4178                 argType = info.compCompHnd->getArgNext(argType);
4179             }
4180
4181             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4182             actualElemClsHnd = argInfo.GetClassHandle();
4183         }
4184         else
4185         {
4186             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4187
4188             // Fetch the return type
4189             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4190             assert(retInfo.IsByRef());
4191             actualElemClsHnd = retInfo.GetClassHandle();
4192         }
4193
4194         // if it's not final, we can't do the optimization
4195         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4196         {
4197             return nullptr;
4198         }
4199     }
4200
4201     unsigned arrayElemSize;
4202     if (elemType == TYP_STRUCT)
4203     {
4204         assert(arrElemClsHnd);
4205
4206         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4207     }
4208     else
4209     {
4210         arrayElemSize = genTypeSize(elemType);
4211     }
4212
4213     if ((unsigned char)arrayElemSize != arrayElemSize)
4214     {
4215         // arrayElemSize would be truncated as an unsigned char.
4216         // This means the array element is too large. Don't do the optimization.
4217         return nullptr;
4218     }
4219
4220     GenTreePtr val = nullptr;
4221
4222     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4223     {
4224         // Assignment of a struct is more work, and there are more gets than sets.
4225         if (elemType == TYP_STRUCT)
4226         {
4227             return nullptr;
4228         }
4229
4230         val = impPopStack().val;
4231         assert(genActualType(elemType) == genActualType(val->gtType) ||
4232                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4233                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4234                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4235     }
4236
4237     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4238
4239     GenTreePtr inds[GT_ARR_MAX_RANK];
4240     for (unsigned k = rank; k > 0; k--)
4241     {
4242         inds[k - 1] = impPopStack().val;
4243     }
4244
4245     GenTreePtr arr = impPopStack().val;
4246     assert(arr->gtType == TYP_REF);
4247
4248     GenTreePtr arrElem =
4249         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4250                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4251
4252     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4253     {
4254         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4255     }
4256
4257     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4258     {
4259         assert(val != nullptr);
4260         return gtNewAssignNode(arrElem, val);
4261     }
4262     else
4263     {
4264         return arrElem;
4265     }
4266 }
4267
4268 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4269 {
4270     unsigned i;
4271
4272     // do some basic checks first
4273     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4274     {
4275         return FALSE;
4276     }
4277
4278     if (verCurrentState.esStackDepth > 0)
4279     {
4280         // merge stack types
4281         StackEntry* parentStack = block->bbStackOnEntry();
4282         StackEntry* childStack  = verCurrentState.esStack;
4283
4284         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4285         {
4286             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4287             {
4288                 return FALSE;
4289             }
4290         }
4291     }
4292
4293     // merge initialization status of this ptr
4294
4295     if (verTrackObjCtorInitState)
4296     {
4297         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4298         assert(verCurrentState.thisInitialized != TIS_Bottom);
4299
4300         // If the successor block's thisInit state is unknown, copy it from the current state.
4301         if (block->bbThisOnEntry() == TIS_Bottom)
4302         {
4303             *changed = true;
4304             verSetThisInit(block, verCurrentState.thisInitialized);
4305         }
4306         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4307         {
4308             if (block->bbThisOnEntry() != TIS_Top)
4309             {
4310                 *changed = true;
4311                 verSetThisInit(block, TIS_Top);
4312
4313                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4314                 {
4315                     // The block is bad. Control can flow through the block to any handler that catches the
4316                     // verification exception, but the importer ignores bad blocks and therefore won't model
4317                     // this flow in the normal way. To complete the merge into the bad block, the new state
4318                     // needs to be manually pushed to the handlers that may be reached after the verification
4319                     // exception occurs.
4320                     //
4321                     // Usually, the new state was already propagated to the relevant handlers while processing
4322                     // the predecessors of the bad block. The exception is when the bad block is at the start
4323                     // of a try region, meaning it is protected by additional handlers that do not protect its
4324                     // predecessors.
4325                     //
4326                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4327                     {
4328                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4329                         // recursive calls back into this code path (if successors of the current bad block are
4330                         // also bad blocks).
4331                         //
4332                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4333                         verCurrentState.thisInitialized = TIS_Top;
4334                         impVerifyEHBlock(block, true);
4335                         verCurrentState.thisInitialized = origTIS;
4336                     }
4337                 }
4338             }
4339         }
4340     }
4341     else
4342     {
4343         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4344     }
4345
4346     return TRUE;
4347 }
4348
4349 /*****************************************************************************
4350  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4351  *   already logged it (presumably in a more detailed fashion than done here)
4352  * 'bVerificationException' is true for a verification exception, false for a
4353  *   "call unauthorized by host" exception.
4354  */
4355
4356 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4357 {
4358     block->bbJumpKind = BBJ_THROW;
4359     block->bbFlags |= BBF_FAILED_VERIFICATION;
4360
4361     impCurStmtOffsSet(block->bbCodeOffs);
4362
4363 #ifdef DEBUG
4364     // we need this since BeginTreeList asserts otherwise
4365     impTreeList = impTreeLast = nullptr;
4366     block->bbFlags &= ~BBF_IMPORTED;
4367
4368     if (logMsg)
4369     {
4370         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4371                 block->bbCodeOffs, block->bbCodeOffsEnd));
4372         if (verbose)
4373         {
4374             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4375         }
4376     }
4377
4378     if (JitConfig.DebugBreakOnVerificationFailure())
4379     {
4380         DebugBreak();
4381     }
4382 #endif
4383
4384     impBeginTreeList();
4385
4386     // if the stack is non-empty evaluate all the side-effects
4387     if (verCurrentState.esStackDepth > 0)
4388     {
4389         impEvalSideEffects();
4390     }
4391     assert(verCurrentState.esStackDepth == 0);
4392
4393     GenTreePtr op1 =
4394         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4395     // verCurrentState.esStackDepth = 0;
4396     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4397
4398     // The inliner is not able to handle methods that require throw block, so
4399     // make sure this methods never gets inlined.
4400     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4401 }
4402
4403 /*****************************************************************************
4404  *
4405  */
4406 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4407
4408 {
4409     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4410     // slightly different mechanism in which it calls the JIT to perform IL verification:
4411     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4412     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4413     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4414     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4415     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4416     // to fail upon runtime of the jitted method.
4417     //
4418     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4419     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4420     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4421     // we detect these two conditions, instead of generating a throw statement inside the offending
4422     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4423     // to return false and make RyuJIT behave the same way JIT64 does.
4424     //
4425     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4426     // RyuJIT for the time being until we completely replace JIT64.
4427     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4428
4429     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4430     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4431     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4432     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4433     // be turned off during importation).
4434     CLANG_FORMAT_COMMENT_ANCHOR;
4435
4436 #ifdef _TARGET_64BIT_
4437
4438 #ifdef DEBUG
4439     bool canSkipVerificationResult =
4440         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4441     assert(tiVerificationNeeded || canSkipVerificationResult);
4442 #endif // DEBUG
4443
4444     // Add the non verifiable flag to the compiler
4445     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4446     {
4447         tiIsVerifiableCode = FALSE;
4448     }
4449 #endif //_TARGET_64BIT_
4450     verResetCurrentState(block, &verCurrentState);
4451     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4452
4453 #ifdef DEBUG
4454     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4455 #endif                   // DEBUG
4456 }
4457
4458 /******************************************************************************/
4459 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4460 {
4461     assert(ciType < CORINFO_TYPE_COUNT);
4462
4463     typeInfo tiResult;
4464     switch (ciType)
4465     {
4466         case CORINFO_TYPE_STRING:
4467         case CORINFO_TYPE_CLASS:
4468             tiResult = verMakeTypeInfo(clsHnd);
4469             if (!tiResult.IsType(TI_REF))
4470             { // type must be consistent with element type
4471                 return typeInfo();
4472             }
4473             break;
4474
4475 #ifdef _TARGET_64BIT_
4476         case CORINFO_TYPE_NATIVEINT:
4477         case CORINFO_TYPE_NATIVEUINT:
4478             if (clsHnd)
4479             {
4480                 // If we have more precise information, use it
4481                 return verMakeTypeInfo(clsHnd);
4482             }
4483             else
4484             {
4485                 return typeInfo::nativeInt();
4486             }
4487             break;
4488 #endif // _TARGET_64BIT_
4489
4490         case CORINFO_TYPE_VALUECLASS:
4491         case CORINFO_TYPE_REFANY:
4492             tiResult = verMakeTypeInfo(clsHnd);
4493             // type must be constant with element type;
4494             if (!tiResult.IsValueClass())
4495             {
4496                 return typeInfo();
4497             }
4498             break;
4499         case CORINFO_TYPE_VAR:
4500             return verMakeTypeInfo(clsHnd);
4501
4502         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4503         case CORINFO_TYPE_VOID:
4504             return typeInfo();
4505             break;
4506
4507         case CORINFO_TYPE_BYREF:
4508         {
4509             CORINFO_CLASS_HANDLE childClassHandle;
4510             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4511             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4512         }
4513         break;
4514
4515         default:
4516             if (clsHnd)
4517             { // If we have more precise information, use it
4518                 return typeInfo(TI_STRUCT, clsHnd);
4519             }
4520             else
4521             {
4522                 return typeInfo(JITtype2tiType(ciType));
4523             }
4524     }
4525     return tiResult;
4526 }
4527
4528 /******************************************************************************/
4529
4530 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4531 {
4532     if (clsHnd == nullptr)
4533     {
4534         return typeInfo();
4535     }
4536
4537     // Byrefs should only occur in method and local signatures, which are accessed
4538     // using ICorClassInfo and ICorClassInfo.getChildType.
4539     // So findClass() and getClassAttribs() should not be called for byrefs
4540
4541     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4542     {
4543         assert(!"Did findClass() return a Byref?");
4544         return typeInfo();
4545     }
4546
4547     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4548
4549     if (attribs & CORINFO_FLG_VALUECLASS)
4550     {
4551         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4552
4553         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4554         // not occur here, so we may want to change this to an assert instead.
4555         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4556         {
4557             return typeInfo();
4558         }
4559
4560 #ifdef _TARGET_64BIT_
4561         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4562         {
4563             return typeInfo::nativeInt();
4564         }
4565 #endif // _TARGET_64BIT_
4566
4567         if (t != CORINFO_TYPE_UNDEF)
4568         {
4569             return (typeInfo(JITtype2tiType(t)));
4570         }
4571         else if (bashStructToRef)
4572         {
4573             return (typeInfo(TI_REF, clsHnd));
4574         }
4575         else
4576         {
4577             return (typeInfo(TI_STRUCT, clsHnd));
4578         }
4579     }
4580     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4581     {
4582         // See comment in _typeInfo.h for why we do it this way.
4583         return (typeInfo(TI_REF, clsHnd, true));
4584     }
4585     else
4586     {
4587         return (typeInfo(TI_REF, clsHnd));
4588     }
4589 }
4590
4591 /******************************************************************************/
4592 BOOL Compiler::verIsSDArray(typeInfo ti)
4593 {
4594     if (ti.IsNullObjRef())
4595     { // nulls are SD arrays
4596         return TRUE;
4597     }
4598
4599     if (!ti.IsType(TI_REF))
4600     {
4601         return FALSE;
4602     }
4603
4604     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4605     {
4606         return FALSE;
4607     }
4608     return TRUE;
4609 }
4610
4611 /******************************************************************************/
4612 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4613 /* Returns an error type if anything goes wrong */
4614
4615 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4616 {
4617     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4618
4619     if (!verIsSDArray(arrayObjectType))
4620     {
4621         return typeInfo();
4622     }
4623
4624     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4625     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4626
4627     return verMakeTypeInfo(ciType, childClassHandle);
4628 }
4629
4630 /*****************************************************************************
4631  */
4632 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4633 {
4634     CORINFO_CLASS_HANDLE classHandle;
4635     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4636
4637     var_types type = JITtype2varType(ciType);
4638     if (varTypeIsGC(type))
4639     {
4640         // For efficiency, getArgType only returns something in classHandle for
4641         // value types.  For other types that have addition type info, you
4642         // have to call back explicitly
4643         classHandle = info.compCompHnd->getArgClass(sig, args);
4644         if (!classHandle)
4645         {
4646             NO_WAY("Could not figure out Class specified in argument or local signature");
4647         }
4648     }
4649
4650     return verMakeTypeInfo(ciType, classHandle);
4651 }
4652
4653 /*****************************************************************************/
4654
4655 // This does the expensive check to figure out whether the method
4656 // needs to be verified. It is called only when we fail verification,
4657 // just before throwing the verification exception.
4658
4659 BOOL Compiler::verNeedsVerification()
4660 {
4661     // If we have previously determined that verification is NOT needed
4662     // (for example in Compiler::compCompile), that means verification is really not needed.
4663     // Return the same decision we made before.
4664     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4665
4666     if (!tiVerificationNeeded)
4667     {
4668         return tiVerificationNeeded;
4669     }
4670
4671     assert(tiVerificationNeeded);
4672
4673     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4674     // obtain the answer.
4675     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4676         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4677
4678     // canSkipVerification will return one of the following three values:
4679     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4680     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4681     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4682     //     but need to insert a callout to the VM to ask during runtime
4683     //     whether to skip verification or not.
4684
4685     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4686     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4687     {
4688         tiRuntimeCalloutNeeded = true;
4689     }
4690
4691     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4692     {
4693         // Dev10 706080 - Testers don't like the assert, so just silence it
4694         // by not using the macros that invoke debugAssert.
4695         badCode();
4696     }
4697
4698     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4699     // The following line means we will NOT do jit time verification if canSkipVerification
4700     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4701     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4702     return tiVerificationNeeded;
4703 }
4704
4705 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4706 {
4707     if (ti.IsByRef())
4708     {
4709         return TRUE;
4710     }
4711     if (!ti.IsType(TI_STRUCT))
4712     {
4713         return FALSE;
4714     }
4715     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4716 }
4717
4718 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4719 {
4720     if (ti.IsPermanentHomeByRef())
4721     {
4722         return TRUE;
4723     }
4724     else
4725     {
4726         return FALSE;
4727     }
4728 }
4729
4730 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4731 {
4732     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4733             || ti.IsUnboxedGenericTypeVar() ||
4734             (ti.IsType(TI_STRUCT) &&
4735              // exclude byreflike structs
4736              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4737 }
4738
4739 // Is it a boxed value type?
4740 bool Compiler::verIsBoxedValueType(typeInfo ti)
4741 {
4742     if (ti.GetType() == TI_REF)
4743     {
4744         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4745         return !!eeIsValueClass(clsHnd);
4746     }
4747     else
4748     {
4749         return false;
4750     }
4751 }
4752
4753 /*****************************************************************************
4754  *
4755  *  Check if a TailCall is legal.
4756  */
4757
4758 bool Compiler::verCheckTailCallConstraint(
4759     OPCODE                  opcode,
4760     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4761     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4762     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4763                                                        // return false to the caller.
4764                                                        // If false, it will throw.
4765     )
4766 {
4767     DWORD            mflags;
4768     CORINFO_SIG_INFO sig;
4769     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4770                                    // this counter is used to keep track of how many items have been
4771                                    // virtually popped
4772
4773     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4774     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4775     unsigned              methodClassFlgs = 0;
4776
4777     assert(impOpcodeIsCallOpcode(opcode));
4778
4779     if (compIsForInlining())
4780     {
4781         return false;
4782     }
4783
4784     // for calli, VerifyOrReturn that this is not a virtual method
4785     if (opcode == CEE_CALLI)
4786     {
4787         /* Get the call sig */
4788         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4789
4790         // We don't know the target method, so we have to infer the flags, or
4791         // assume the worst-case.
4792         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4793     }
4794     else
4795     {
4796         methodHnd = pResolvedToken->hMethod;
4797
4798         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4799
4800         // When verifying generic code we pair the method handle with its
4801         // owning class to get the exact method signature.
4802         methodClassHnd = pResolvedToken->hClass;
4803         assert(methodClassHnd);
4804
4805         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4806
4807         // opcode specific check
4808         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4809     }
4810
4811     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4812     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4813
4814     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4815     {
4816         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4817     }
4818
4819     // check compatibility of the arguments
4820     unsigned int argCount;
4821     argCount = sig.numArgs;
4822     CORINFO_ARG_LIST_HANDLE args;
4823     args = sig.args;
4824     while (argCount--)
4825     {
4826         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4827
4828         // check that the argument is not a byref for tailcalls
4829         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4830
4831         // For unsafe code, we might have parameters containing pointer to the stack location.
4832         // Disallow the tailcall for this kind.
4833         CORINFO_CLASS_HANDLE classHandle;
4834         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4835         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4836
4837         args = info.compCompHnd->getArgNext(args);
4838     }
4839
4840     // update popCount
4841     popCount += sig.numArgs;
4842
4843     // check for 'this' which is on non-static methods, not called via NEWOBJ
4844     if (!(mflags & CORINFO_FLG_STATIC))
4845     {
4846         // Always update the popCount.
4847         // This is crucial for the stack calculation to be correct.
4848         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4849         popCount++;
4850
4851         if (opcode == CEE_CALLI)
4852         {
4853             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4854             // on the stack.
4855             if (tiThis.IsValueClass())
4856             {
4857                 tiThis.MakeByRef();
4858             }
4859             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4860         }
4861         else
4862         {
4863             // Check type compatibility of the this argument
4864             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4865             if (tiDeclaredThis.IsValueClass())
4866             {
4867                 tiDeclaredThis.MakeByRef();
4868             }
4869
4870             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4871         }
4872     }
4873
4874     // Tail calls on constrained calls should be illegal too:
4875     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4876     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4877
4878     // Get the exact view of the signature for an array method
4879     if (sig.retType != CORINFO_TYPE_VOID)
4880     {
4881         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4882         {
4883             assert(opcode != CEE_CALLI);
4884             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4885         }
4886     }
4887
4888     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4889     typeInfo tiCallerRetType =
4890         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4891
4892     // void return type gets morphed into the error type, so we have to treat them specially here
4893     if (sig.retType == CORINFO_TYPE_VOID)
4894     {
4895         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4896                                   speculative);
4897     }
4898     else
4899     {
4900         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4901                                                    NormaliseForStack(tiCallerRetType), true),
4902                                   "tailcall return mismatch", speculative);
4903     }
4904
4905     // for tailcall, stack must be empty
4906     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4907
4908     return true; // Yes, tailcall is legal
4909 }
4910
4911 /*****************************************************************************
4912  *
4913  *  Checks the IL verification rules for the call
4914  */
4915
4916 void Compiler::verVerifyCall(OPCODE                  opcode,
4917                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4918                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4919                              bool                    tailCall,
4920                              bool                    readonlyCall,
4921                              const BYTE*             delegateCreateStart,
4922                              const BYTE*             codeAddr,
4923                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4924 {
4925     DWORD             mflags;
4926     CORINFO_SIG_INFO* sig      = nullptr;
4927     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4928                                     // this counter is used to keep track of how many items have been
4929                                     // virtually popped
4930
4931     // for calli, VerifyOrReturn that this is not a virtual method
4932     if (opcode == CEE_CALLI)
4933     {
4934         Verify(false, "Calli not verifiable");
4935         return;
4936     }
4937
4938     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4939     mflags = callInfo->verMethodFlags;
4940
4941     sig = &callInfo->verSig;
4942
4943     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4944     {
4945         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4946     }
4947
4948     // opcode specific check
4949     unsigned methodClassFlgs = callInfo->classFlags;
4950     switch (opcode)
4951     {
4952         case CEE_CALLVIRT:
4953             // cannot do callvirt on valuetypes
4954             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4955             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4956             break;
4957
4958         case CEE_NEWOBJ:
4959         {
4960             assert(!tailCall); // Importer should not allow this
4961             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4962                            "newobj must be on instance");
4963
4964             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4965             {
4966                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4967                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4968                 typeInfo tiDeclaredFtn =
4969                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4970                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4971
4972                 assert(popCount == 0);
4973                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4974                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4975
4976                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4977                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4978                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4979                                "delegate object type mismatch");
4980
4981                 CORINFO_CLASS_HANDLE objTypeHandle =
4982                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4983
4984                 // the method signature must be compatible with the delegate's invoke method
4985
4986                 // check that for virtual functions, the type of the object used to get the
4987                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4988                 // since this is a bit of work to determine in general, we pattern match stylized
4989                 // code sequences
4990
4991                 // the delegate creation code check, which used to be done later, is now done here
4992                 // so we can read delegateMethodRef directly from
4993                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4994                 // we then use it in our call to isCompatibleDelegate().
4995
4996                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4997                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4998                                "must create delegates with certain IL");
4999
5000                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5001                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5002                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5003                 delegateResolvedToken.token        = delegateMethodRef;
5004                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5005                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5006
5007                 CORINFO_CALL_INFO delegateCallInfo;
5008                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5009                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5010
5011                 BOOL isOpenDelegate = FALSE;
5012                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5013                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5014                                                                       &isOpenDelegate),
5015                                "function incompatible with delegate");
5016
5017                 // check the constraints on the target method
5018                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5019                                "delegate target has unsatisfied class constraints");
5020                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5021                                                                             tiActualFtn.GetMethod()),
5022                                "delegate target has unsatisfied method constraints");
5023
5024                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5025                 // for additional verification rules for delegates
5026                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5027                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5028                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5029                 {
5030
5031                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5032 #ifdef DEBUG
5033                         && StrictCheckForNonVirtualCallToVirtualMethod()
5034 #endif
5035                             )
5036                     {
5037                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5038                         {
5039                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5040                                                verIsBoxedValueType(tiActualObj),
5041                                            "The 'this' parameter to the call must be either the calling method's "
5042                                            "'this' parameter or "
5043                                            "a boxed value type.");
5044                         }
5045                     }
5046                 }
5047
5048                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5049                 {
5050                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5051
5052                     Verify(targetIsStatic || !isOpenDelegate,
5053                            "Unverifiable creation of an open instance delegate for a protected member.");
5054
5055                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5056                                                                 ? info.compClassHnd
5057                                                                 : tiActualObj.GetClassHandleForObjRef();
5058
5059                     // In the case of protected methods, it is a requirement that the 'this'
5060                     // pointer be a subclass of the current context.  Perform this check.
5061                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5062                            "Accessing protected method through wrong type.");
5063                 }
5064                 goto DONE_ARGS;
5065             }
5066         }
5067         // fall thru to default checks
5068         default:
5069             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5070     }
5071     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5072                    "can only newobj a delegate constructor");
5073
5074     // check compatibility of the arguments
5075     unsigned int argCount;
5076     argCount = sig->numArgs;
5077     CORINFO_ARG_LIST_HANDLE args;
5078     args = sig->args;
5079     while (argCount--)
5080     {
5081         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5082
5083         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5084         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5085
5086         args = info.compCompHnd->getArgNext(args);
5087     }
5088
5089 DONE_ARGS:
5090
5091     // update popCount
5092     popCount += sig->numArgs;
5093
5094     // check for 'this' which are is non-static methods, not called via NEWOBJ
5095     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5096     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5097     {
5098         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5099         popCount++;
5100
5101         // If it is null, we assume we can access it (since it will AV shortly)
5102         // If it is anything but a reference class, there is no hierarchy, so
5103         // again, we don't need the precise instance class to compute 'protected' access
5104         if (tiThis.IsType(TI_REF))
5105         {
5106             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5107         }
5108
5109         // Check type compatibility of the this argument
5110         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5111         if (tiDeclaredThis.IsValueClass())
5112         {
5113             tiDeclaredThis.MakeByRef();
5114         }
5115
5116         // If this is a call to the base class .ctor, set thisPtr Init for
5117         // this block.
5118         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5119         {
5120             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5121                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5122             {
5123                 assert(verCurrentState.thisInitialized !=
5124                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5125                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5126                                "Call to base class constructor when 'this' is possibly initialized");
5127                 // Otherwise, 'this' is now initialized.
5128                 verCurrentState.thisInitialized = TIS_Init;
5129                 tiThis.SetInitialisedObjRef();
5130             }
5131             else
5132             {
5133                 // We allow direct calls to value type constructors
5134                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5135                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5136                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5137                                "Bad call to a constructor");
5138             }
5139         }
5140
5141         if (pConstrainedResolvedToken != nullptr)
5142         {
5143             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5144
5145             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5146
5147             // We just dereference this and test for equality
5148             tiThis.DereferenceByRef();
5149             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5150                            "this type mismatch with constrained type operand");
5151
5152             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5153             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5154         }
5155
5156         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5157         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5158         {
5159             tiDeclaredThis.SetIsReadonlyByRef();
5160         }
5161
5162         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5163
5164         if (tiThis.IsByRef())
5165         {
5166             // Find the actual type where the method exists (as opposed to what is declared
5167             // in the metadata). This is to prevent passing a byref as the "this" argument
5168             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5169
5170             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5171             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5172                            "Call to base type of valuetype (which is never a valuetype)");
5173         }
5174
5175         // Rules for non-virtual call to a non-final virtual method:
5176
5177         // Define:
5178         // The "this" pointer is considered to be "possibly written" if
5179         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5180         //   (or)
5181         //   2. It has been stored to (STARG.0) anywhere in the method.
5182
5183         // A non-virtual call to a non-final virtual method is only allowed if
5184         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5185         //   (or)
5186         //   2. The this pointer passed to the callee is the current method's this pointer.
5187         //      (and) The current method's this pointer is not "possibly written".
5188
5189         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5190         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5191         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5192         // hard and more error prone.
5193
5194         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5195 #ifdef DEBUG
5196             && StrictCheckForNonVirtualCallToVirtualMethod()
5197 #endif
5198                 )
5199         {
5200             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5201             {
5202                 VerifyOrReturn(
5203                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5204                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5205                     "a boxed value type.");
5206             }
5207         }
5208     }
5209
5210     // check any constraints on the callee's class and type parameters
5211     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5212                    "method has unsatisfied class constraints");
5213     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5214                    "method has unsatisfied method constraints");
5215
5216     if (mflags & CORINFO_FLG_PROTECTED)
5217     {
5218         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5219                        "Can't access protected method");
5220     }
5221
5222     // Get the exact view of the signature for an array method
5223     if (sig->retType != CORINFO_TYPE_VOID)
5224     {
5225         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5226     }
5227
5228     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5229     // The methods supported by array types are under the control of the EE
5230     // so we can trust that only the Address operation returns a byref.
5231     if (readonlyCall)
5232     {
5233         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5234         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5235                        "unexpected use of readonly prefix");
5236     }
5237
5238     // Verify the tailcall
5239     if (tailCall)
5240     {
5241         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5242     }
5243 }
5244
5245 /*****************************************************************************
5246  *  Checks that a delegate creation is done using the following pattern:
5247  *     dup
5248  *     ldvirtftn targetMemberRef
5249  *  OR
5250  *     ldftn targetMemberRef
5251  *
5252  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5253  *  not in this basic block)
5254  *
5255  *  targetMemberRef is read from the code sequence.
5256  *  targetMemberRef is validated iff verificationNeeded.
5257  */
5258
5259 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5260                                         const BYTE*  codeAddr,
5261                                         mdMemberRef& targetMemberRef)
5262 {
5263     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5264     {
5265         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5266         return TRUE;
5267     }
5268     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5269     {
5270         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5271         return TRUE;
5272     }
5273
5274     return FALSE;
5275 }
5276
5277 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5278 {
5279     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5280     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5281     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5282     if (!tiCompatibleWith(value, normPtrVal, true))
5283     {
5284         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5285         compUnsafeCastUsed = true;
5286     }
5287     return ptrVal;
5288 }
5289
5290 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5291 {
5292     assert(!instrType.IsStruct());
5293
5294     typeInfo ptrVal;
5295     if (ptr.IsByRef())
5296     {
5297         ptrVal = DereferenceByRef(ptr);
5298         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5299         {
5300             Verify(false, "bad pointer");
5301             compUnsafeCastUsed = true;
5302         }
5303         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5304         {
5305             Verify(false, "pointer not consistent with instr");
5306             compUnsafeCastUsed = true;
5307         }
5308     }
5309     else
5310     {
5311         Verify(false, "pointer not byref");
5312         compUnsafeCastUsed = true;
5313     }
5314
5315     return ptrVal;
5316 }
5317
5318 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5319 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5320 // ld*flda or a st*fld.
5321 // 'enclosingClass' is given if we are accessing a field in some specific type.
5322
5323 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5324                               const CORINFO_FIELD_INFO& fieldInfo,
5325                               const typeInfo*           tiThis,
5326                               BOOL                      mutator,
5327                               BOOL                      allowPlainStructAsThis)
5328 {
5329     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5330     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5331     CORINFO_CLASS_HANDLE instanceClass =
5332         info.compClassHnd; // for statics, we imagine the instance is the current class.
5333
5334     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5335     if (mutator)
5336     {
5337         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5338         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5339         {
5340             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5341                        info.compIsStatic == isStaticField,
5342                    "bad use of initonly field (set or address taken)");
5343         }
5344     }
5345
5346     if (tiThis == nullptr)
5347     {
5348         Verify(isStaticField, "used static opcode with non-static field");
5349     }
5350     else
5351     {
5352         typeInfo tThis = *tiThis;
5353
5354         if (allowPlainStructAsThis && tThis.IsValueClass())
5355         {
5356             tThis.MakeByRef();
5357         }
5358
5359         // If it is null, we assume we can access it (since it will AV shortly)
5360         // If it is anything but a refernce class, there is no hierarchy, so
5361         // again, we don't need the precise instance class to compute 'protected' access
5362         if (tiThis->IsType(TI_REF))
5363         {
5364             instanceClass = tiThis->GetClassHandleForObjRef();
5365         }
5366
5367         // Note that even if the field is static, we require that the this pointer
5368         // satisfy the same constraints as a non-static field  This happens to
5369         // be simpler and seems reasonable
5370         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5371         if (tiDeclaredThis.IsValueClass())
5372         {
5373             tiDeclaredThis.MakeByRef();
5374
5375             // we allow read-only tThis, on any field access (even stores!), because if the
5376             // class implementor wants to prohibit stores he should make the field private.
5377             // we do this by setting the read-only bit on the type we compare tThis to.
5378             tiDeclaredThis.SetIsReadonlyByRef();
5379         }
5380         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5381         {
5382             // Any field access is legal on "uninitialized" this pointers.
5383             // The easiest way to implement this is to simply set the
5384             // initialized bit for the duration of the type check on the
5385             // field access only.  It does not change the state of the "this"
5386             // for the function as a whole. Note that the "tThis" is a copy
5387             // of the original "this" type (*tiThis) passed in.
5388             tThis.SetInitialisedObjRef();
5389         }
5390
5391         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5392     }
5393
5394     // Presently the JIT does not check that we don't store or take the address of init-only fields
5395     // since we cannot guarantee their immutability and it is not a security issue.
5396
5397     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5398     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5399                    "field has unsatisfied class constraints");
5400     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5401     {
5402         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5403                "Accessing protected method through wrong type.");
5404     }
5405 }
5406
5407 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5408 {
5409     if (tiOp1.IsNumberType())
5410     {
5411 #ifdef _TARGET_64BIT_
5412         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5413 #else  // _TARGET_64BIT
5414         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5415         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5416         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5417         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5418 #endif // !_TARGET_64BIT_
5419     }
5420     else if (tiOp1.IsObjRef())
5421     {
5422         switch (opcode)
5423         {
5424             case CEE_BEQ_S:
5425             case CEE_BEQ:
5426             case CEE_BNE_UN_S:
5427             case CEE_BNE_UN:
5428             case CEE_CEQ:
5429             case CEE_CGT_UN:
5430                 break;
5431             default:
5432                 Verify(FALSE, "Cond not allowed on object types");
5433         }
5434         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5435     }
5436     else if (tiOp1.IsByRef())
5437     {
5438         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5439     }
5440     else
5441     {
5442         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5443     }
5444 }
5445
5446 void Compiler::verVerifyThisPtrInitialised()
5447 {
5448     if (verTrackObjCtorInitState)
5449     {
5450         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5451     }
5452 }
5453
5454 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5455 {
5456     // Either target == context, in this case calling an alternate .ctor
5457     // Or target is the immediate parent of context
5458
5459     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5460 }
5461
5462 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
5463                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
5464                                         CORINFO_CALL_INFO*      pCallInfo)
5465 {
5466     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5467     {
5468         NO_WAY("Virtual call to a function added via EnC is not supported");
5469     }
5470
5471     // CoreRT generic virtual method
5472     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5473     {
5474         GenTreePtr runtimeMethodHandle = nullptr;
5475         if (pCallInfo->exactContextNeedsRuntimeLookup)
5476         {
5477             runtimeMethodHandle =
5478                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5479         }
5480         else
5481         {
5482             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5483         }
5484         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5485                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5486     }
5487
5488 #ifdef FEATURE_READYTORUN_COMPILER
5489     if (opts.IsReadyToRun())
5490     {
5491         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5492         {
5493             GenTreeCall* call =
5494                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5495
5496             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5497
5498             return call;
5499         }
5500
5501         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5502         if (IsTargetAbi(CORINFO_CORERT_ABI))
5503         {
5504             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5505
5506             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5507                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5508         }
5509     }
5510 #endif
5511
5512     // Get the exact descriptor for the static callsite
5513     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5514     if (exactTypeDesc == nullptr)
5515     { // compDonotInline()
5516         return nullptr;
5517     }
5518
5519     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5520     if (exactMethodDesc == nullptr)
5521     { // compDonotInline()
5522         return nullptr;
5523     }
5524
5525     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5526
5527     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5528
5529     helpArgs = gtNewListNode(thisPtr, helpArgs);
5530
5531     // Call helper function.  This gets the target address of the final destination callsite.
5532
5533     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5534 }
5535
5536 //------------------------------------------------------------------------
5537 // impImportAndPushBox: build and import a value-type box
5538 //
5539 // Arguments:
5540 //   pResolvedToken - resolved token from the box operation
5541 //
5542 // Return Value:
5543 //   None.
5544 //
5545 // Side Effects:
5546 //   The value to be boxed is popped from the stack, and a tree for
5547 //   the boxed value is pushed. This method may create upstream
5548 //   statements, spill side effecting trees, and create new temps.
5549 //
5550 //   If importing an inlinee, we may also discover the inline must
5551 //   fail. If so there is no new value pushed on the stack. Callers
5552 //   should use CompDoNotInline after calling this method to see if
5553 //   ongoing importation should be aborted.
5554 //
5555 // Notes:
5556 //   Boxing of ref classes results in the same value as the value on
5557 //   the top of the stack, so is handled inline in impImportBlockCode
5558 //   for the CEE_BOX case. Only value or primitive type boxes make it
5559 //   here.
5560 //
5561 //   Boxing for nullable types is done via a helper call; boxing
5562 //   of other value types is expanded inline or handled via helper
5563 //   call, depending on the jit's codegen mode.
5564 //
5565 //   When the jit is operating in size and time constrained modes,
5566 //   using a helper call here can save jit time and code size. But it
5567 //   also may inhibit cleanup optimizations that could have also had a
5568 //   even greater benefit effect on code size and jit time. An optimal
5569 //   strategy may need to peek ahead and see if it is easy to tell how
5570 //   the box is being used. For now, we defer.
5571
5572 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5573 {
5574     // Spill any special side effects
5575     impSpillSpecialSideEff();
5576
5577     // Get get the expression to box from the stack.
5578     GenTreePtr           op1       = nullptr;
5579     GenTreePtr           op2       = nullptr;
5580     StackEntry           se        = impPopStack();
5581     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5582     GenTreePtr           exprToBox = se.val;
5583
5584     // Look at what helper we should use.
5585     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5586
5587     // Determine what expansion to prefer.
5588     //
5589     // In size/time/debuggable constrained modes, the helper call
5590     // expansion for box is generally smaller and is preferred, unless
5591     // the value to box is a struct that comes from a call. In that
5592     // case the call can construct its return value directly into the
5593     // box payload, saving possibly some up-front zeroing.
5594     //
5595     // Currently primitive type boxes always get inline expanded. We may
5596     // want to do the same for small structs if they don't come from
5597     // calls and don't have GC pointers, since explicitly copying such
5598     // structs is cheap.
5599     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5600     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5601     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5602     bool expandInline    = canExpandInline && !optForSize;
5603
5604     if (expandInline)
5605     {
5606         JITDUMP(" inline allocate/copy sequence\n");
5607
5608         // we are doing 'normal' boxing.  This means that we can inline the box operation
5609         // Box(expr) gets morphed into
5610         // temp = new(clsHnd)
5611         // cpobj(temp+4, expr, clsHnd)
5612         // push temp
5613         // The code paths differ slightly below for structs and primitives because
5614         // "cpobj" differs in these cases.  In one case you get
5615         //    impAssignStructPtr(temp+4, expr, clsHnd)
5616         // and the other you get
5617         //    *(temp+4) = expr
5618
5619         if (opts.MinOpts() || opts.compDbgCode)
5620         {
5621             // For minopts/debug code, try and minimize the total number
5622             // of box temps by reusing an existing temp when possible.
5623             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5624             {
5625                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5626             }
5627         }
5628         else
5629         {
5630             // When optimizing, use a new temp for each box operation
5631             // since we then know the exact class of the box temp.
5632             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5633             lvaTable[impBoxTemp].lvType = TYP_REF;
5634             const bool isExact          = true;
5635             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5636         }
5637
5638         // needs to stay in use until this box expression is appended
5639         // some other node.  We approximate this by keeping it alive until
5640         // the opcode stack becomes empty
5641         impBoxTempInUse = true;
5642
5643 #ifdef FEATURE_READYTORUN_COMPILER
5644         bool usingReadyToRunHelper = false;
5645
5646         if (opts.IsReadyToRun())
5647         {
5648             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5649             usingReadyToRunHelper = (op1 != nullptr);
5650         }
5651
5652         if (!usingReadyToRunHelper)
5653 #endif
5654         {
5655             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5656             // and the newfast call with a single call to a dynamic R2R cell that will:
5657             //      1) Load the context
5658             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5659             //      3) Allocate and return the new object for boxing
5660             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5661
5662             // Ensure that the value class is restored
5663             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5664             if (op2 == nullptr)
5665             {
5666                 // We must be backing out of an inline.
5667                 assert(compDonotInline());
5668                 return;
5669             }
5670
5671             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5672                                     pResolvedToken->hClass, TYP_REF, op2);
5673         }
5674
5675         /* Remember that this basic block contains 'new' of an object, and so does this method */
5676         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5677         optMethodFlags |= OMF_HAS_NEWOBJ;
5678
5679         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5680
5681         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5682
5683         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5684         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5685         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5686
5687         if (varTypeIsStruct(exprToBox))
5688         {
5689             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5690             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5691         }
5692         else
5693         {
5694             var_types lclTyp = exprToBox->TypeGet();
5695             if (lclTyp == TYP_BYREF)
5696             {
5697                 lclTyp = TYP_I_IMPL;
5698             }
5699             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5700             if (impIsPrimitive(jitType))
5701             {
5702                 lclTyp = JITtype2varType(jitType);
5703             }
5704             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5705                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5706             var_types srcTyp = exprToBox->TypeGet();
5707             var_types dstTyp = lclTyp;
5708
5709             if (srcTyp != dstTyp)
5710             {
5711                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5712                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5713                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5714             }
5715             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5716         }
5717
5718         // Spill eval stack to flush out any pending side effects.
5719         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5720
5721         // Set up this copy as a second assignment.
5722         GenTreePtr copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5723
5724         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5725
5726         // Record that this is a "box" node and keep track of the matching parts.
5727         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5728
5729         // If it is a value class, mark the "box" node.  We can use this information
5730         // to optimise several cases:
5731         //    "box(x) == null" --> false
5732         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5733         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5734
5735         op1->gtFlags |= GTF_BOX_VALUE;
5736         assert(op1->IsBoxedValue());
5737         assert(asg->gtOper == GT_ASG);
5738     }
5739     else
5740     {
5741         // Don't optimize, just call the helper and be done with it.
5742         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5743         assert(operCls != nullptr);
5744
5745         // Ensure that the value class is restored
5746         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5747         if (op2 == nullptr)
5748         {
5749             // We must be backing out of an inline.
5750             assert(compDonotInline());
5751             return;
5752         }
5753
5754         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5755         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5756     }
5757
5758     /* Push the result back on the stack, */
5759     /* even if clsHnd is a value class we want the TI_REF */
5760     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5761     impPushOnStack(op1, tiRetVal);
5762 }
5763
5764 //------------------------------------------------------------------------
5765 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5766 //
5767 // Arguments:
5768 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5769 //                     by a call to CEEInfo::resolveToken().
5770 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5771 //                by a call to CEEInfo::getCallInfo().
5772 //
5773 // Assumptions:
5774 //    The multi-dimensional array constructor arguments (array dimensions) are
5775 //    pushed on the IL stack on entry to this method.
5776 //
5777 // Notes:
5778 //    Multi-dimensional array constructors are imported as calls to a JIT
5779 //    helper, not as regular calls.
5780
5781 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5782 {
5783     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5784     if (classHandle == nullptr)
5785     { // compDonotInline()
5786         return;
5787     }
5788
5789     assert(pCallInfo->sig.numArgs);
5790
5791     GenTreePtr      node;
5792     GenTreeArgList* args;
5793
5794     //
5795     // There are two different JIT helpers that can be used to allocate
5796     // multi-dimensional arrays:
5797     //
5798     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5799     //      This variant is deprecated. It should be eventually removed.
5800     //
5801     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5802     //      pointer to block of int32s. This variant is more portable.
5803     //
5804     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5805     // unconditionally would require ReadyToRun version bump.
5806     //
5807     CLANG_FORMAT_COMMENT_ANCHOR;
5808
5809     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5810     {
5811
5812         // Reuse the temp used to pass the array dimensions to avoid bloating
5813         // the stack frame in case there are multiple calls to multi-dim array
5814         // constructors within a single method.
5815         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5816         {
5817             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5818             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5819             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5820         }
5821
5822         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5823         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5824         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5825             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5826
5827         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5828         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5829         // to one allocation at a time.
5830         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5831
5832         //
5833         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5834         //  - Array class handle
5835         //  - Number of dimension arguments
5836         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5837         //
5838
5839         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5840         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5841
5842         // Pop dimension arguments from the stack one at a time and store it
5843         // into lvaNewObjArrayArgs temp.
5844         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5845         {
5846             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5847
5848             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5849             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5850             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5851                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5852             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5853
5854             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5855         }
5856
5857         args = gtNewArgList(node);
5858
5859         // pass number of arguments to the helper
5860         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5861
5862         args = gtNewListNode(classHandle, args);
5863
5864         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5865     }
5866     else
5867     {
5868         //
5869         // The varargs helper needs the type and method handles as last
5870         // and  last-1 param (this is a cdecl call, so args will be
5871         // pushed in reverse order on the CPU stack)
5872         //
5873
5874         args = gtNewArgList(classHandle);
5875
5876         // pass number of arguments to the helper
5877         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5878
5879         unsigned argFlags = 0;
5880         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5881
5882         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5883
5884         // varargs, so we pop the arguments
5885         node->gtFlags |= GTF_CALL_POP_ARGS;
5886
5887 #ifdef DEBUG
5888         // At the present time we don't track Caller pop arguments
5889         // that have GC references in them
5890         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5891         {
5892             assert(temp->Current()->gtType != TYP_REF);
5893         }
5894 #endif
5895     }
5896
5897     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5898     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5899
5900     // Remember that this basic block contains 'new' of a md array
5901     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5902
5903     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5904 }
5905
5906 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5907                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5908                                       CORINFO_THIS_TRANSFORM  transform)
5909 {
5910     switch (transform)
5911     {
5912         case CORINFO_DEREF_THIS:
5913         {
5914             GenTreePtr obj = thisPtr;
5915
5916             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5917             impBashVarAddrsToI(obj);
5918             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5919             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5920
5921             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5922             // ldind could point anywhere, example a boxed class static int
5923             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5924
5925             return obj;
5926         }
5927
5928         case CORINFO_BOX_THIS:
5929         {
5930             // Constraint calls where there might be no
5931             // unboxed entry point require us to implement the call via helper.
5932             // These only occur when a possible target of the call
5933             // may have inherited an implementation of an interface
5934             // method from System.Object or System.ValueType.  The EE does not provide us with
5935             // "unboxed" versions of these methods.
5936
5937             GenTreePtr obj = thisPtr;
5938
5939             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5940             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5941             obj->gtFlags |= GTF_EXCEPT;
5942
5943             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5944             var_types   objType = JITtype2varType(jitTyp);
5945             if (impIsPrimitive(jitTyp))
5946             {
5947                 if (obj->OperIsBlk())
5948                 {
5949                     obj->ChangeOperUnchecked(GT_IND);
5950
5951                     // Obj could point anywhere, example a boxed class static int
5952                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5953                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5954                 }
5955
5956                 obj->gtType = JITtype2varType(jitTyp);
5957                 assert(varTypeIsArithmetic(obj->gtType));
5958             }
5959
5960             // This pushes on the dereferenced byref
5961             // This is then used immediately to box.
5962             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5963
5964             // This pops off the byref-to-a-value-type remaining on the stack and
5965             // replaces it with a boxed object.
5966             // This is then used as the object to the virtual call immediately below.
5967             impImportAndPushBox(pConstrainedResolvedToken);
5968             if (compDonotInline())
5969             {
5970                 return nullptr;
5971             }
5972
5973             obj = impPopStack().val;
5974             return obj;
5975         }
5976         case CORINFO_NO_THIS_TRANSFORM:
5977         default:
5978             return thisPtr;
5979     }
5980 }
5981
5982 //------------------------------------------------------------------------
5983 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5984 //
5985 // Return Value:
5986 //    true if PInvoke inlining should be enabled in current method, false otherwise
5987 //
5988 // Notes:
5989 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5990
5991 bool Compiler::impCanPInvokeInline()
5992 {
5993     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5994            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5995         ;
5996 }
5997
5998 //------------------------------------------------------------------------
5999 // impCanPInvokeInlineCallSite: basic legality checks using information
6000 // from a call to see if the call qualifies as an inline pinvoke.
6001 //
6002 // Arguments:
6003 //    block      - block contaning the call, or for inlinees, block
6004 //                 containing the call being inlined
6005 //
6006 // Return Value:
6007 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6008 //
6009 // Notes:
6010 //    For runtimes that support exception handling interop there are
6011 //    restrictions on using inline pinvoke in handler regions.
6012 //
6013 //    * We have to disable pinvoke inlining inside of filters because
6014 //    in case the main execution (i.e. in the try block) is inside
6015 //    unmanaged code, we cannot reuse the inlined stub (we still need
6016 //    the original state until we are in the catch handler)
6017 //
6018 //    * We disable pinvoke inlining inside handlers since the GSCookie
6019 //    is in the inlined Frame (see
6020 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6021 //    this would not protect framelets/return-address of handlers.
6022 //
6023 //    These restrictions are currently also in place for CoreCLR but
6024 //    can be relaxed when coreclr/#8459 is addressed.
6025
6026 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6027 {
6028     if (block->hasHndIndex())
6029     {
6030         return false;
6031     }
6032
6033     // The remaining limitations do not apply to CoreRT
6034     if (IsTargetAbi(CORINFO_CORERT_ABI))
6035     {
6036         return true;
6037     }
6038
6039 #ifdef _TARGET_AMD64_
6040     // On x64, we disable pinvoke inlining inside of try regions.
6041     // Here is the comment from JIT64 explaining why:
6042     //
6043     //   [VSWhidbey: 611015] - because the jitted code links in the
6044     //   Frame (instead of the stub) we rely on the Frame not being
6045     //   'active' until inside the stub.  This normally happens by the
6046     //   stub setting the return address pointer in the Frame object
6047     //   inside the stub.  On a normal return, the return address
6048     //   pointer is zeroed out so the Frame can be safely re-used, but
6049     //   if an exception occurs, nobody zeros out the return address
6050     //   pointer.  Thus if we re-used the Frame object, it would go
6051     //   'active' as soon as we link it into the Frame chain.
6052     //
6053     //   Technically we only need to disable PInvoke inlining if we're
6054     //   in a handler or if we're in a try body with a catch or
6055     //   filter/except where other non-handler code in this method
6056     //   might run and try to re-use the dirty Frame object.
6057     //
6058     //   A desktop test case where this seems to matter is
6059     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6060     if (block->hasTryIndex())
6061     {
6062         return false;
6063     }
6064 #endif // _TARGET_AMD64_
6065
6066     return true;
6067 }
6068
6069 //------------------------------------------------------------------------
6070 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6071 // if it can be expressed as an inline pinvoke.
6072 //
6073 // Arguments:
6074 //    call       - tree for the call
6075 //    methHnd    - handle for the method being called (may be null)
6076 //    sig        - signature of the method being called
6077 //    mflags     - method flags for the method being called
6078 //    block      - block contaning the call, or for inlinees, block
6079 //                 containing the call being inlined
6080 //
6081 // Notes:
6082 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6083 //
6084 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6085 //   call passes a combination of legality and profitabilty checks.
6086 //
6087 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6088
6089 void Compiler::impCheckForPInvokeCall(
6090     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6091 {
6092     CorInfoUnmanagedCallConv unmanagedCallConv;
6093
6094     // If VM flagged it as Pinvoke, flag the call node accordingly
6095     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6096     {
6097         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6098     }
6099
6100     if (methHnd)
6101     {
6102         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6103         {
6104             return;
6105         }
6106
6107         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6108     }
6109     else
6110     {
6111         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6112         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6113         {
6114             // Used by the IL Stubs.
6115             callConv = CORINFO_CALLCONV_C;
6116         }
6117         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6118         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6119         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6120         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6121
6122         assert(!call->gtCallCookie);
6123     }
6124
6125     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6126         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6127     {
6128         return;
6129     }
6130     optNativeCallCount++;
6131
6132     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6133     {
6134         // PInvoke CALLI in IL stubs must be inlined
6135     }
6136     else
6137     {
6138         // Check legality
6139         if (!impCanPInvokeInlineCallSite(block))
6140         {
6141             return;
6142         }
6143
6144         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6145         // profitability checks
6146         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6147         {
6148             if (!impCanPInvokeInline())
6149             {
6150                 return;
6151             }
6152
6153             // Size-speed tradeoff: don't use inline pinvoke at rarely
6154             // executed call sites.  The non-inline version is more
6155             // compact.
6156             if (block->isRunRarely())
6157             {
6158                 return;
6159             }
6160         }
6161
6162         // The expensive check should be last
6163         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6164         {
6165             return;
6166         }
6167     }
6168
6169     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6170
6171     call->gtFlags |= GTF_CALL_UNMANAGED;
6172     info.compCallUnmanaged++;
6173
6174     // AMD64 convention is same for native and managed
6175     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6176     {
6177         call->gtFlags |= GTF_CALL_POP_ARGS;
6178     }
6179
6180     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6181     {
6182         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6183     }
6184 }
6185
6186 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6187 {
6188     var_types callRetTyp = JITtype2varType(sig->retType);
6189
6190     /* The function pointer is on top of the stack - It may be a
6191      * complex expression. As it is evaluated after the args,
6192      * it may cause registered args to be spilled. Simply spill it.
6193      */
6194
6195     // Ignore this trivial case.
6196     if (impStackTop().val->gtOper != GT_LCL_VAR)
6197     {
6198         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6199                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6200     }
6201
6202     /* Get the function pointer */
6203
6204     GenTreePtr fptr = impPopStack().val;
6205
6206     // The function pointer is typically a sized to match the target pointer size
6207     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6208     // See ILCodeStream::LowerOpcode
6209     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6210
6211 #ifdef DEBUG
6212     // This temporary must never be converted to a double in stress mode,
6213     // because that can introduce a call to the cast helper after the
6214     // arguments have already been evaluated.
6215
6216     if (fptr->OperGet() == GT_LCL_VAR)
6217     {
6218         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6219     }
6220 #endif
6221
6222     /* Create the call node */
6223
6224     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6225
6226     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6227
6228     return call;
6229 }
6230
6231 /*****************************************************************************/
6232
6233 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
6234 {
6235     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6236
6237     /* Since we push the arguments in reverse order (i.e. right -> left)
6238      * spill any side effects from the stack
6239      *
6240      * OBS: If there is only one side effect we do not need to spill it
6241      *      thus we have to spill all side-effects except last one
6242      */
6243
6244     unsigned lastLevelWithSideEffects = UINT_MAX;
6245
6246     unsigned argsToReverse = sig->numArgs;
6247
6248     // For "thiscall", the first argument goes in a register. Since its
6249     // order does not need to be changed, we do not need to spill it
6250
6251     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6252     {
6253         assert(argsToReverse);
6254         argsToReverse--;
6255     }
6256
6257 #ifndef _TARGET_X86_
6258     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6259     argsToReverse = 0;
6260 #endif
6261
6262     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6263     {
6264         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6265         {
6266             assert(lastLevelWithSideEffects == UINT_MAX);
6267
6268             impSpillStackEntry(level,
6269                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6270         }
6271         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6272         {
6273             if (lastLevelWithSideEffects != UINT_MAX)
6274             {
6275                 /* We had a previous side effect - must spill it */
6276                 impSpillStackEntry(lastLevelWithSideEffects,
6277                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6278
6279                 /* Record the level for the current side effect in case we will spill it */
6280                 lastLevelWithSideEffects = level;
6281             }
6282             else
6283             {
6284                 /* This is the first side effect encountered - record its level */
6285
6286                 lastLevelWithSideEffects = level;
6287             }
6288         }
6289     }
6290
6291     /* The argument list is now "clean" - no out-of-order side effects
6292      * Pop the argument list in reverse order */
6293
6294     GenTreePtr args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6295
6296     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6297     {
6298         GenTreePtr thisPtr = args->Current();
6299         impBashVarAddrsToI(thisPtr);
6300         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6301     }
6302
6303     if (args)
6304     {
6305         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6306     }
6307 }
6308
6309 //------------------------------------------------------------------------
6310 // impInitClass: Build a node to initialize the class before accessing the
6311 //               field if necessary
6312 //
6313 // Arguments:
6314 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6315 //                     by a call to CEEInfo::resolveToken().
6316 //
6317 // Return Value: If needed, a pointer to the node that will perform the class
6318 //               initializtion.  Otherwise, nullptr.
6319 //
6320
6321 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6322 {
6323     CorInfoInitClassResult initClassResult =
6324         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6325
6326     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6327     {
6328         return nullptr;
6329     }
6330     BOOL runtimeLookup;
6331
6332     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6333
6334     if (node == nullptr)
6335     {
6336         assert(compDonotInline());
6337         return nullptr;
6338     }
6339
6340     if (runtimeLookup)
6341     {
6342         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6343     }
6344     else
6345     {
6346         // Call the shared non gc static helper, as its the fastest
6347         node = fgGetSharedCCtor(pResolvedToken->hClass);
6348     }
6349
6350     return node;
6351 }
6352
6353 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6354 {
6355     GenTreePtr op1 = nullptr;
6356
6357     switch (lclTyp)
6358     {
6359         int     ival;
6360         __int64 lval;
6361         double  dval;
6362
6363         case TYP_BOOL:
6364             ival = *((bool*)fldAddr);
6365             goto IVAL_COMMON;
6366
6367         case TYP_BYTE:
6368             ival = *((signed char*)fldAddr);
6369             goto IVAL_COMMON;
6370
6371         case TYP_UBYTE:
6372             ival = *((unsigned char*)fldAddr);
6373             goto IVAL_COMMON;
6374
6375         case TYP_SHORT:
6376             ival = *((short*)fldAddr);
6377             goto IVAL_COMMON;
6378
6379         case TYP_USHORT:
6380             ival = *((unsigned short*)fldAddr);
6381             goto IVAL_COMMON;
6382
6383         case TYP_UINT:
6384         case TYP_INT:
6385             ival = *((int*)fldAddr);
6386         IVAL_COMMON:
6387             op1 = gtNewIconNode(ival);
6388             break;
6389
6390         case TYP_LONG:
6391         case TYP_ULONG:
6392             lval = *((__int64*)fldAddr);
6393             op1  = gtNewLconNode(lval);
6394             break;
6395
6396         case TYP_FLOAT:
6397             dval = *((float*)fldAddr);
6398             op1  = gtNewDconNode(dval);
6399 #if !FEATURE_X87_DOUBLES
6400             // X87 stack doesn't differentiate between float/double
6401             // so R4 is treated as R8, but everybody else does
6402             op1->gtType = TYP_FLOAT;
6403 #endif // FEATURE_X87_DOUBLES
6404             break;
6405
6406         case TYP_DOUBLE:
6407             dval = *((double*)fldAddr);
6408             op1  = gtNewDconNode(dval);
6409             break;
6410
6411         default:
6412             assert(!"Unexpected lclTyp");
6413             break;
6414     }
6415
6416     return op1;
6417 }
6418
6419 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6420                                                 CORINFO_ACCESS_FLAGS    access,
6421                                                 CORINFO_FIELD_INFO*     pFieldInfo,
6422                                                 var_types               lclTyp)
6423 {
6424     GenTreePtr op1;
6425
6426     switch (pFieldInfo->fieldAccessor)
6427     {
6428         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6429         {
6430             assert(!compIsForInlining());
6431
6432             // We first call a special helper to get the statics base pointer
6433             op1 = impParentClassTokenToHandle(pResolvedToken);
6434
6435             // compIsForInlining() is false so we should not neve get NULL here
6436             assert(op1 != nullptr);
6437
6438             var_types type = TYP_BYREF;
6439
6440             switch (pFieldInfo->helper)
6441             {
6442                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6443                     type = TYP_I_IMPL;
6444                     break;
6445                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6446                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6447                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6448                     break;
6449                 default:
6450                     assert(!"unknown generic statics helper");
6451                     break;
6452             }
6453
6454             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6455
6456             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6457             op1              = gtNewOperNode(GT_ADD, type, op1,
6458                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6459         }
6460         break;
6461
6462         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6463         {
6464 #ifdef FEATURE_READYTORUN_COMPILER
6465             if (opts.IsReadyToRun())
6466             {
6467                 unsigned callFlags = 0;
6468
6469                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6470                 {
6471                     callFlags |= GTF_CALL_HOISTABLE;
6472                 }
6473
6474                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6475                 op1->gtFlags |= callFlags;
6476
6477                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6478             }
6479             else
6480 #endif
6481             {
6482                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6483             }
6484
6485             {
6486                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6487                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6488                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6489             }
6490             break;
6491         }
6492
6493         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6494         {
6495 #ifdef FEATURE_READYTORUN_COMPILER
6496             noway_assert(opts.IsReadyToRun());
6497             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6498             assert(kind.needsRuntimeLookup);
6499
6500             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6501             GenTreeArgList* args    = gtNewArgList(ctxTree);
6502
6503             unsigned callFlags = 0;
6504
6505             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6506             {
6507                 callFlags |= GTF_CALL_HOISTABLE;
6508             }
6509             var_types type = TYP_BYREF;
6510             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6511             op1->gtFlags |= callFlags;
6512
6513             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6514             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6515             op1              = gtNewOperNode(GT_ADD, type, op1,
6516                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6517 #else
6518             unreached();
6519 #endif // FEATURE_READYTORUN_COMPILER
6520         }
6521         break;
6522
6523         default:
6524         {
6525             if (!(access & CORINFO_ACCESS_ADDRESS))
6526             {
6527                 // In future, it may be better to just create the right tree here instead of folding it later.
6528                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6529
6530                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6531                 {
6532                     op1->gtFlags |= GTF_FLD_INITCLASS;
6533                 }
6534
6535                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6536                 {
6537                     op1->gtType = TYP_REF; // points at boxed object
6538                     FieldSeqNode* firstElemFldSeq =
6539                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6540                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6541                                         new (this, GT_CNS_INT)
6542                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6543
6544                     if (varTypeIsStruct(lclTyp))
6545                     {
6546                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6547                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6548                     }
6549                     else
6550                     {
6551                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6552                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6553                     }
6554                 }
6555
6556                 return op1;
6557             }
6558             else
6559             {
6560                 void** pFldAddr = nullptr;
6561                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6562
6563                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6564
6565                 /* Create the data member node */
6566                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6567                                           fldSeq);
6568
6569                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6570                 {
6571                     op1->gtFlags |= GTF_ICON_INITCLASS;
6572                 }
6573
6574                 if (pFldAddr != nullptr)
6575                 {
6576                     // There are two cases here, either the static is RVA based,
6577                     // in which case the type of the FIELD node is not a GC type
6578                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6579                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6580                     // because handles to statics now go into the large object heap
6581
6582                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6583                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6584                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6585                 }
6586             }
6587             break;
6588         }
6589     }
6590
6591     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6592     {
6593         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6594
6595         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6596
6597         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6598                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6599     }
6600
6601     if (!(access & CORINFO_ACCESS_ADDRESS))
6602     {
6603         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6604         op1->gtFlags |= GTF_GLOB_REF;
6605     }
6606
6607     return op1;
6608 }
6609
6610 // In general try to call this before most of the verification work.  Most people expect the access
6611 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6612 // out if you can't access something we also think that you're unverifiable for other reasons.
6613 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6614 {
6615     if (result != CORINFO_ACCESS_ALLOWED)
6616     {
6617         impHandleAccessAllowedInternal(result, helperCall);
6618     }
6619 }
6620
6621 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6622 {
6623     switch (result)
6624     {
6625         case CORINFO_ACCESS_ALLOWED:
6626             break;
6627         case CORINFO_ACCESS_ILLEGAL:
6628             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6629             // method is verifiable.  Otherwise, delay the exception to runtime.
6630             if (compIsForImportOnly())
6631             {
6632                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6633             }
6634             else
6635             {
6636                 impInsertHelperCall(helperCall);
6637             }
6638             break;
6639         case CORINFO_ACCESS_RUNTIME_CHECK:
6640             impInsertHelperCall(helperCall);
6641             break;
6642     }
6643 }
6644
6645 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6646 {
6647     // Construct the argument list
6648     GenTreeArgList* args = nullptr;
6649     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6650     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6651     {
6652         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6653         GenTreePtr                currentArg = nullptr;
6654         switch (helperArg.argType)
6655         {
6656             case CORINFO_HELPER_ARG_TYPE_Field:
6657                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6658                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6659                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6660                 break;
6661             case CORINFO_HELPER_ARG_TYPE_Method:
6662                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6663                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6664                 break;
6665             case CORINFO_HELPER_ARG_TYPE_Class:
6666                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6667                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6668                 break;
6669             case CORINFO_HELPER_ARG_TYPE_Module:
6670                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6671                 break;
6672             case CORINFO_HELPER_ARG_TYPE_Const:
6673                 currentArg = gtNewIconNode(helperArg.constant);
6674                 break;
6675             default:
6676                 NO_WAY("Illegal helper arg type");
6677         }
6678         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6679     }
6680
6681     /* TODO-Review:
6682      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6683      * Also, consider sticking this in the first basic block.
6684      */
6685     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6686     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6687 }
6688
6689 // Checks whether the return types of caller and callee are compatible
6690 // so that callee can be tail called. Note that here we don't check
6691 // compatibility in IL Verifier sense, but on the lines of return type
6692 // sizes are equal and get returned in the same return register.
6693 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6694                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6695                                             var_types            calleeRetType,
6696                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6697 {
6698     // Note that we can not relax this condition with genActualType() as the
6699     // calling convention dictates that the caller of a function with a small
6700     // typed return value is responsible for normalizing the return val.
6701     if (callerRetType == calleeRetType)
6702     {
6703         return true;
6704     }
6705
6706     // If the class handles are the same and not null, the return types are compatible.
6707     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6708     {
6709         return true;
6710     }
6711
6712 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6713     // Jit64 compat:
6714     if (callerRetType == TYP_VOID)
6715     {
6716         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6717         //     tail.call
6718         //     pop
6719         //     ret
6720         //
6721         // Note that the above IL pattern is not valid as per IL verification rules.
6722         // Therefore, only full trust code can take advantage of this pattern.
6723         return true;
6724     }
6725
6726     // These checks return true if the return value type sizes are the same and
6727     // get returned in the same return register i.e. caller doesn't need to normalize
6728     // return value. Some of the tail calls permitted by below checks would have
6729     // been rejected by IL Verifier before we reached here.  Therefore, only full
6730     // trust code can make those tail calls.
6731     unsigned callerRetTypeSize = 0;
6732     unsigned calleeRetTypeSize = 0;
6733     bool     isCallerRetTypMBEnreg =
6734         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6735     bool isCalleeRetTypMBEnreg =
6736         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6737
6738     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6739     {
6740         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6741     }
6742 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6743
6744     return false;
6745 }
6746
6747 // For prefixFlags
6748 enum
6749 {
6750     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6751     PREFIX_TAILCALL_IMPLICIT =
6752         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6753     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6754     PREFIX_VOLATILE    = 0x00000100,
6755     PREFIX_UNALIGNED   = 0x00001000,
6756     PREFIX_CONSTRAINED = 0x00010000,
6757     PREFIX_READONLY    = 0x00100000
6758 };
6759
6760 /********************************************************************************
6761  *
6762  * Returns true if the current opcode and and the opcodes following it correspond
6763  * to a supported tail call IL pattern.
6764  *
6765  */
6766 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6767                                       OPCODE      curOpcode,
6768                                       const BYTE* codeAddrOfNextOpcode,
6769                                       const BYTE* codeEnd,
6770                                       bool        isRecursive,
6771                                       bool*       isCallPopAndRet /* = nullptr */)
6772 {
6773     // Bail out if the current opcode is not a call.
6774     if (!impOpcodeIsCallOpcode(curOpcode))
6775     {
6776         return false;
6777     }
6778
6779 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6780     // If shared ret tail opt is not enabled, we will enable
6781     // it for recursive methods.
6782     if (isRecursive)
6783 #endif
6784     {
6785         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6786         // sequence. Make sure we don't go past the end of the IL however.
6787         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6788     }
6789
6790     // Bail out if there is no next opcode after call
6791     if (codeAddrOfNextOpcode >= codeEnd)
6792     {
6793         return false;
6794     }
6795
6796     // Scan the opcodes to look for the following IL patterns if either
6797     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6798     //  ii) if tail prefixed, IL verification is not needed for the method.
6799     //
6800     // Only in the above two cases we can allow the below tail call patterns
6801     // violating ECMA spec.
6802     //
6803     // Pattern1:
6804     //       call
6805     //       nop*
6806     //       ret
6807     //
6808     // Pattern2:
6809     //       call
6810     //       nop*
6811     //       pop
6812     //       nop*
6813     //       ret
6814     int    cntPop = 0;
6815     OPCODE nextOpcode;
6816
6817 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6818     do
6819     {
6820         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6821         codeAddrOfNextOpcode += sizeof(__int8);
6822     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6823              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6824              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6825                                                                                          // one pop seen so far.
6826 #else
6827     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6828 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6829
6830     if (isCallPopAndRet)
6831     {
6832         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6833         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6834     }
6835
6836 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6837     // Jit64 Compat:
6838     // Tail call IL pattern could be either of the following
6839     // 1) call/callvirt/calli + ret
6840     // 2) call/callvirt/calli + pop + ret in a method returning void.
6841     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6842 #else
6843     return (nextOpcode == CEE_RET) && (cntPop == 0);
6844 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6845 }
6846
6847 /*****************************************************************************
6848  *
6849  * Determine whether the call could be converted to an implicit tail call
6850  *
6851  */
6852 bool Compiler::impIsImplicitTailCallCandidate(
6853     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6854 {
6855
6856 #if FEATURE_TAILCALL_OPT
6857     if (!opts.compTailCallOpt)
6858     {
6859         return false;
6860     }
6861
6862     if (opts.compDbgCode || opts.MinOpts())
6863     {
6864         return false;
6865     }
6866
6867     // must not be tail prefixed
6868     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6869     {
6870         return false;
6871     }
6872
6873 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6874     // the block containing call is marked as BBJ_RETURN
6875     // We allow shared ret tail call optimization on recursive calls even under
6876     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6877     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6878         return false;
6879 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6880
6881     // must be call+ret or call+pop+ret
6882     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6883     {
6884         return false;
6885     }
6886
6887     return true;
6888 #else
6889     return false;
6890 #endif // FEATURE_TAILCALL_OPT
6891 }
6892
6893 //------------------------------------------------------------------------
6894 // impImportCall: import a call-inspiring opcode
6895 //
6896 // Arguments:
6897 //    opcode                    - opcode that inspires the call
6898 //    pResolvedToken            - resolved token for the call target
6899 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6900 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6901 //    prefixFlags               - IL prefix flags for the call
6902 //    callInfo                  - EE supplied info for the call
6903 //    rawILOffset               - IL offset of the opcode
6904 //
6905 // Returns:
6906 //    Type of the call's return value.
6907 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6908 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6909 //
6910 //
6911 // Notes:
6912 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6913 //
6914 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6915 //    uninitalized object.
6916
6917 #ifdef _PREFAST_
6918 #pragma warning(push)
6919 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6920 #endif
6921
6922 var_types Compiler::impImportCall(OPCODE                  opcode,
6923                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6924                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6925                                   GenTreePtr              newobjThis,
6926                                   int                     prefixFlags,
6927                                   CORINFO_CALL_INFO*      callInfo,
6928                                   IL_OFFSET               rawILOffset)
6929 {
6930     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6931
6932     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6933     var_types              callRetTyp                     = TYP_COUNT;
6934     CORINFO_SIG_INFO*      sig                            = nullptr;
6935     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6936     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6937     unsigned               clsFlags                       = 0;
6938     unsigned               mflags                         = 0;
6939     unsigned               argFlags                       = 0;
6940     GenTreePtr             call                           = nullptr;
6941     GenTreeArgList*        args                           = nullptr;
6942     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6943     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6944     bool                   exactContextNeedsRuntimeLookup = false;
6945     bool                   canTailCall                    = true;
6946     const char*            szCanTailCallFailReason        = nullptr;
6947     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6948     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6949
6950     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6951
6952     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6953     // do that before tailcalls, but that is probably not the intended
6954     // semantic. So just disallow tailcalls from synchronized methods.
6955     // Also, popping arguments in a varargs function is more work and NYI
6956     // If we have a security object, we have to keep our frame around for callers
6957     // to see any imperative security.
6958     if (info.compFlags & CORINFO_FLG_SYNCH)
6959     {
6960         canTailCall             = false;
6961         szCanTailCallFailReason = "Caller is synchronized";
6962     }
6963 #if !FEATURE_FIXED_OUT_ARGS
6964     else if (info.compIsVarArgs)
6965     {
6966         canTailCall             = false;
6967         szCanTailCallFailReason = "Caller is varargs";
6968     }
6969 #endif // FEATURE_FIXED_OUT_ARGS
6970     else if (opts.compNeedSecurityCheck)
6971     {
6972         canTailCall             = false;
6973         szCanTailCallFailReason = "Caller requires a security check.";
6974     }
6975
6976     // We only need to cast the return value of pinvoke inlined calls that return small types
6977
6978     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6979     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6980     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6981     // the time being that the callee might be compiled by the other JIT and thus the return
6982     // value will need to be widened by us (or not widened at all...)
6983
6984     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6985
6986     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6987     bool bIntrinsicImported = false;
6988
6989     CORINFO_SIG_INFO calliSig;
6990     GenTreeArgList*  extraArg = nullptr;
6991
6992     /*-------------------------------------------------------------------------
6993      * First create the call node
6994      */
6995
6996     if (opcode == CEE_CALLI)
6997     {
6998         /* Get the call site sig */
6999         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7000
7001         callRetTyp = JITtype2varType(calliSig.retType);
7002
7003         call = impImportIndirectCall(&calliSig, ilOffset);
7004
7005         // We don't know the target method, so we have to infer the flags, or
7006         // assume the worst-case.
7007         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7008
7009 #ifdef DEBUG
7010         if (verbose)
7011         {
7012             unsigned structSize =
7013                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7014             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7015                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7016         }
7017 #endif
7018         // This should be checked in impImportBlockCode.
7019         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7020
7021         sig = &calliSig;
7022
7023 #ifdef DEBUG
7024         // We cannot lazily obtain the signature of a CALLI call because it has no method
7025         // handle that we can use, so we need to save its full call signature here.
7026         assert(call->gtCall.callSig == nullptr);
7027         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7028         *call->gtCall.callSig = calliSig;
7029 #endif // DEBUG
7030
7031         if (IsTargetAbi(CORINFO_CORERT_ABI))
7032         {
7033             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7034                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7035                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7036                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7037             if (managedCall)
7038             {
7039                 addFatPointerCandidate(call->AsCall());
7040             }
7041         }
7042     }
7043     else // (opcode != CEE_CALLI)
7044     {
7045         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7046
7047         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7048         // supply the instantiation parameters necessary to make direct calls to underlying
7049         // shared generic code, rather than calling through instantiating stubs.  If the
7050         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7051         // must indeed pass an instantiation parameter.
7052
7053         methHnd = callInfo->hMethod;
7054
7055         sig        = &(callInfo->sig);
7056         callRetTyp = JITtype2varType(sig->retType);
7057
7058         mflags = callInfo->methodFlags;
7059
7060 #ifdef DEBUG
7061         if (verbose)
7062         {
7063             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7064             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7065                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7066         }
7067 #endif
7068         if (compIsForInlining())
7069         {
7070             /* Does this call site have security boundary restrictions? */
7071
7072             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7073             {
7074                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7075                 return TYP_UNDEF;
7076             }
7077
7078             /* Does the inlinee need a security check token on the frame */
7079
7080             if (mflags & CORINFO_FLG_SECURITYCHECK)
7081             {
7082                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7083                 return TYP_UNDEF;
7084             }
7085
7086             /* Does the inlinee use StackCrawlMark */
7087
7088             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7089             {
7090                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7091                 return TYP_UNDEF;
7092             }
7093
7094             /* For now ignore delegate invoke */
7095
7096             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7097             {
7098                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7099                 return TYP_UNDEF;
7100             }
7101
7102             /* For now ignore varargs */
7103             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7104             {
7105                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7106                 return TYP_UNDEF;
7107             }
7108
7109             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7110             {
7111                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7112                 return TYP_UNDEF;
7113             }
7114
7115             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7116             {
7117                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7118                 return TYP_UNDEF;
7119             }
7120         }
7121
7122         clsHnd = pResolvedToken->hClass;
7123
7124         clsFlags = callInfo->classFlags;
7125
7126 #ifdef DEBUG
7127         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7128
7129         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7130         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7131         const char* modName;
7132         const char* className;
7133         const char* methodName;
7134         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7135             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7136             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7137         {
7138             return impImportJitTestLabelMark(sig->numArgs);
7139         }
7140 #endif // DEBUG
7141
7142         // <NICE> Factor this into getCallInfo </NICE>
7143         bool isSpecialIntrinsic = false;
7144         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7145         {
7146             const bool isTail = canTailCall && (tailCall != 0);
7147
7148             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7149                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7150
7151             if (compDonotInline())
7152             {
7153                 return TYP_UNDEF;
7154             }
7155
7156             if (call != nullptr)
7157             {
7158                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7159                        (clsFlags & CORINFO_FLG_FINAL));
7160
7161 #ifdef FEATURE_READYTORUN_COMPILER
7162                 if (call->OperGet() == GT_INTRINSIC)
7163                 {
7164                     if (opts.IsReadyToRun())
7165                     {
7166                         noway_assert(callInfo->kind == CORINFO_CALL);
7167                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7168                     }
7169                     else
7170                     {
7171                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7172                     }
7173                 }
7174 #endif
7175
7176                 bIntrinsicImported = true;
7177                 goto DONE_CALL;
7178             }
7179         }
7180
7181 #ifdef FEATURE_SIMD
7182         if (featureSIMD)
7183         {
7184             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7185             if (call != nullptr)
7186             {
7187                 bIntrinsicImported = true;
7188                 goto DONE_CALL;
7189             }
7190         }
7191 #endif // FEATURE_SIMD
7192
7193         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7194         {
7195             NO_WAY("Virtual call to a function added via EnC is not supported");
7196         }
7197
7198         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7199             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7200             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7201         {
7202             BADCODE("Bad calling convention");
7203         }
7204
7205         //-------------------------------------------------------------------------
7206         //  Construct the call node
7207         //
7208         // Work out what sort of call we're making.
7209         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7210
7211         constraintCallThisTransform    = callInfo->thisTransform;
7212         exactContextHnd                = callInfo->contextHandle;
7213         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7214
7215         // Recursive call is treated as a loop to the begining of the method.
7216         if (gtIsRecursiveCall(methHnd))
7217         {
7218 #ifdef DEBUG
7219             if (verbose)
7220             {
7221                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7222                         fgFirstBB->bbNum, compCurBB->bbNum);
7223             }
7224 #endif
7225             fgMarkBackwardJump(fgFirstBB, compCurBB);
7226         }
7227
7228         switch (callInfo->kind)
7229         {
7230
7231             case CORINFO_VIRTUALCALL_STUB:
7232             {
7233                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7234                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7235                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7236                 {
7237
7238                     if (compIsForInlining())
7239                     {
7240                         // Don't import runtime lookups when inlining
7241                         // Inlining has to be aborted in such a case
7242                         /* XXX Fri 3/20/2009
7243                          * By the way, this would never succeed.  If the handle lookup is into the generic
7244                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7245                          * inlined code will crash.
7246                          *
7247                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7248                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7249                          * failing here.
7250                          */
7251                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7252                         return TYP_UNDEF;
7253                     }
7254
7255                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7256                     assert(!compDonotInline());
7257
7258                     // This is the rough code to set up an indirect stub call
7259                     assert(stubAddr != nullptr);
7260
7261                     // The stubAddr may be a
7262                     // complex expression. As it is evaluated after the args,
7263                     // it may cause registered args to be spilled. Simply spill it.
7264
7265                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7266                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7267                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7268
7269                     // Create the actual call node
7270
7271                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7272                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7273
7274                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7275
7276                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7277                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7278
7279 #ifdef _TARGET_X86_
7280                     // No tailcalls allowed for these yet...
7281                     canTailCall             = false;
7282                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7283 #endif
7284                 }
7285                 else
7286                 {
7287                     // ok, the stub is available at compile type.
7288
7289                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7290                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7291                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7292                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
7293                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7294                     {
7295                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7296                     }
7297                 }
7298
7299 #ifdef FEATURE_READYTORUN_COMPILER
7300                 if (opts.IsReadyToRun())
7301                 {
7302                     // Null check is sometimes needed for ready to run to handle
7303                     // non-virtual <-> virtual changes between versions
7304                     if (callInfo->nullInstanceCheck)
7305                     {
7306                         call->gtFlags |= GTF_CALL_NULLCHECK;
7307                     }
7308                 }
7309 #endif
7310
7311                 break;
7312             }
7313
7314             case CORINFO_VIRTUALCALL_VTABLE:
7315             {
7316                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7317                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7318                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7319                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7320                 break;
7321             }
7322
7323             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7324             {
7325                 if (compIsForInlining())
7326                 {
7327                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7328                     return TYP_UNDEF;
7329                 }
7330
7331                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7332                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7333                 // OK, We've been told to call via LDVIRTFTN, so just
7334                 // take the call now....
7335
7336                 args = impPopList(sig->numArgs, sig);
7337
7338                 GenTreePtr thisPtr = impPopStack().val;
7339                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7340                 assert(thisPtr != nullptr);
7341
7342                 // Clone the (possibly transformed) "this" pointer
7343                 GenTreePtr thisPtrCopy;
7344                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7345                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7346
7347                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7348                 assert(fptr != nullptr);
7349
7350                 thisPtr = nullptr; // can't reuse it
7351
7352                 // Now make an indirect call through the function pointer
7353
7354                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7355                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7356                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7357
7358                 // Create the actual call node
7359
7360                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7361                 call->gtCall.gtCallObjp = thisPtrCopy;
7362                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7363
7364                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7365                 {
7366                     // CoreRT generic virtual method: need to handle potential fat function pointers
7367                     addFatPointerCandidate(call->AsCall());
7368                 }
7369 #ifdef FEATURE_READYTORUN_COMPILER
7370                 if (opts.IsReadyToRun())
7371                 {
7372                     // Null check is needed for ready to run to handle
7373                     // non-virtual <-> virtual changes between versions
7374                     call->gtFlags |= GTF_CALL_NULLCHECK;
7375                 }
7376 #endif
7377
7378                 // Sine we are jumping over some code, check that its OK to skip that code
7379                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7380                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7381                 goto DONE;
7382             }
7383
7384             case CORINFO_CALL:
7385             {
7386                 // This is for a non-virtual, non-interface etc. call
7387                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7388
7389                 // We remove the nullcheck for the GetType call instrinsic.
7390                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7391                 // and instrinsics.
7392                 if (callInfo->nullInstanceCheck &&
7393                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7394                 {
7395                     call->gtFlags |= GTF_CALL_NULLCHECK;
7396                 }
7397
7398 #ifdef FEATURE_READYTORUN_COMPILER
7399                 if (opts.IsReadyToRun())
7400                 {
7401                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7402                 }
7403 #endif
7404                 break;
7405             }
7406
7407             case CORINFO_CALL_CODE_POINTER:
7408             {
7409                 // The EE has asked us to call by computing a code pointer and then doing an
7410                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7411
7412                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7413                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7414
7415                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7416                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7417
7418                 GenTreePtr fptr =
7419                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7420
7421                 if (compDonotInline())
7422                 {
7423                     return TYP_UNDEF;
7424                 }
7425
7426                 // Now make an indirect call through the function pointer
7427
7428                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7429                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7430                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7431
7432                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7433                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7434                 if (callInfo->nullInstanceCheck)
7435                 {
7436                     call->gtFlags |= GTF_CALL_NULLCHECK;
7437                 }
7438
7439                 break;
7440             }
7441
7442             default:
7443                 assert(!"unknown call kind");
7444                 break;
7445         }
7446
7447         //-------------------------------------------------------------------------
7448         // Set more flags
7449
7450         PREFIX_ASSUME(call != nullptr);
7451
7452         if (mflags & CORINFO_FLG_NOGCCHECK)
7453         {
7454             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7455         }
7456
7457         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7458         if (isSpecialIntrinsic)
7459         {
7460             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7461         }
7462     }
7463     assert(sig);
7464     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7465
7466     /* Some sanity checks */
7467
7468     // CALL_VIRT and NEWOBJ must have a THIS pointer
7469     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7470     // static bit and hasThis are negations of one another
7471     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7472     assert(call != nullptr);
7473
7474     /*-------------------------------------------------------------------------
7475      * Check special-cases etc
7476      */
7477
7478     /* Special case - Check if it is a call to Delegate.Invoke(). */
7479
7480     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7481     {
7482         assert(!compIsForInlining());
7483         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7484         assert(mflags & CORINFO_FLG_FINAL);
7485
7486         /* Set the delegate flag */
7487         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7488
7489         if (callInfo->secureDelegateInvoke)
7490         {
7491             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7492         }
7493
7494         if (opcode == CEE_CALLVIRT)
7495         {
7496             assert(mflags & CORINFO_FLG_FINAL);
7497
7498             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7499             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7500             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7501         }
7502     }
7503
7504     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7505     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7506     if (varTypeIsStruct(callRetTyp))
7507     {
7508         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7509         call->gtType = callRetTyp;
7510     }
7511
7512 #if !FEATURE_VARARG
7513     /* Check for varargs */
7514     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7515         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7516     {
7517         BADCODE("Varargs not supported.");
7518     }
7519 #endif // !FEATURE_VARARG
7520
7521 #ifdef UNIX_X86_ABI
7522     if (call->gtCall.callSig == nullptr)
7523     {
7524         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7525         *call->gtCall.callSig = *sig;
7526     }
7527 #endif // UNIX_X86_ABI
7528
7529     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7530         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7531     {
7532         assert(!compIsForInlining());
7533
7534         /* Set the right flags */
7535
7536         call->gtFlags |= GTF_CALL_POP_ARGS;
7537         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7538
7539         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7540            will be expecting to pop a certain number of arguments, but if we
7541            tailcall to a function with a different number of arguments, we
7542            are hosed. There are ways around this (caller remembers esp value,
7543            varargs is not caller-pop, etc), but not worth it. */
7544         CLANG_FORMAT_COMMENT_ANCHOR;
7545
7546 #ifdef _TARGET_X86_
7547         if (canTailCall)
7548         {
7549             canTailCall             = false;
7550             szCanTailCallFailReason = "Callee is varargs";
7551         }
7552 #endif
7553
7554         /* Get the total number of arguments - this is already correct
7555          * for CALLI - for methods we have to get it from the call site */
7556
7557         if (opcode != CEE_CALLI)
7558         {
7559 #ifdef DEBUG
7560             unsigned numArgsDef = sig->numArgs;
7561 #endif
7562             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7563
7564 #ifdef DEBUG
7565             // We cannot lazily obtain the signature of a vararg call because using its method
7566             // handle will give us only the declared argument list, not the full argument list.
7567             assert(call->gtCall.callSig == nullptr);
7568             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7569             *call->gtCall.callSig = *sig;
7570 #endif
7571
7572             // For vararg calls we must be sure to load the return type of the
7573             // method actually being called, as well as the return types of the
7574             // specified in the vararg signature. With type equivalency, these types
7575             // may not be the same.
7576             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7577             {
7578                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7579                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7580                     sig->retType != CORINFO_TYPE_VAR)
7581                 {
7582                     // Make sure that all valuetypes (including enums) that we push are loaded.
7583                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7584                     // all valuetypes in the method signature are already loaded.
7585                     // We need to be able to find the size of the valuetypes, but we cannot
7586                     // do a class-load from within GC.
7587                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7588                 }
7589             }
7590
7591             assert(numArgsDef <= sig->numArgs);
7592         }
7593
7594         /* We will have "cookie" as the last argument but we cannot push
7595          * it on the operand stack because we may overflow, so we append it
7596          * to the arg list next after we pop them */
7597     }
7598
7599     if (mflags & CORINFO_FLG_SECURITYCHECK)
7600     {
7601         assert(!compIsForInlining());
7602
7603         // Need security prolog/epilog callouts when there is
7604         // imperative security in the method. This is to give security a
7605         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7606
7607         if (compIsForInlining())
7608         {
7609             // Cannot handle this if the method being imported is an inlinee by itself.
7610             // Because inlinee method does not have its own frame.
7611
7612             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7613             return TYP_UNDEF;
7614         }
7615         else
7616         {
7617             tiSecurityCalloutNeeded = true;
7618
7619             // If the current method calls a method which needs a security check,
7620             // (i.e. the method being compiled has imperative security)
7621             // we need to reserve a slot for the security object in
7622             // the current method's stack frame
7623             opts.compNeedSecurityCheck = true;
7624         }
7625     }
7626
7627     //--------------------------- Inline NDirect ------------------------------
7628
7629     // For inline cases we technically should look at both the current
7630     // block and the call site block (or just the latter if we've
7631     // fused the EH trees). However the block-related checks pertain to
7632     // EH and we currently won't inline a method with EH. So for
7633     // inlinees, just checking the call site block is sufficient.
7634     {
7635         // New lexical block here to avoid compilation errors because of GOTOs.
7636         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7637         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7638     }
7639
7640     if (call->gtFlags & GTF_CALL_UNMANAGED)
7641     {
7642         // We set up the unmanaged call by linking the frame, disabling GC, etc
7643         // This needs to be cleaned up on return
7644         if (canTailCall)
7645         {
7646             canTailCall             = false;
7647             szCanTailCallFailReason = "Callee is native";
7648         }
7649
7650         checkForSmallType = true;
7651
7652         impPopArgsForUnmanagedCall(call, sig);
7653
7654         goto DONE;
7655     }
7656     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7657                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7658                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7659                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7660     {
7661         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7662         {
7663             // Normally this only happens with inlining.
7664             // However, a generic method (or type) being NGENd into another module
7665             // can run into this issue as well.  There's not an easy fall-back for NGEN
7666             // so instead we fallback to JIT.
7667             if (compIsForInlining())
7668             {
7669                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7670             }
7671             else
7672             {
7673                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7674             }
7675
7676             return TYP_UNDEF;
7677         }
7678
7679         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7680
7681         // This cookie is required to be either a simple GT_CNS_INT or
7682         // an indirection of a GT_CNS_INT
7683         //
7684         GenTreePtr cookieConst = cookie;
7685         if (cookie->gtOper == GT_IND)
7686         {
7687             cookieConst = cookie->gtOp.gtOp1;
7688         }
7689         assert(cookieConst->gtOper == GT_CNS_INT);
7690
7691         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7692         // we won't allow this tree to participate in any CSE logic
7693         //
7694         cookie->gtFlags |= GTF_DONT_CSE;
7695         cookieConst->gtFlags |= GTF_DONT_CSE;
7696
7697         call->gtCall.gtCallCookie = cookie;
7698
7699         if (canTailCall)
7700         {
7701             canTailCall             = false;
7702             szCanTailCallFailReason = "PInvoke calli";
7703         }
7704     }
7705
7706     /*-------------------------------------------------------------------------
7707      * Create the argument list
7708      */
7709
7710     //-------------------------------------------------------------------------
7711     // Special case - for varargs we have an implicit last argument
7712
7713     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7714     {
7715         assert(!compIsForInlining());
7716
7717         void *varCookie, *pVarCookie;
7718         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7719         {
7720             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7721             return TYP_UNDEF;
7722         }
7723
7724         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7725         assert((!varCookie) != (!pVarCookie));
7726         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7727
7728         assert(extraArg == nullptr);
7729         extraArg = gtNewArgList(cookie);
7730     }
7731
7732     //-------------------------------------------------------------------------
7733     // Extra arg for shared generic code and array methods
7734     //
7735     // Extra argument containing instantiation information is passed in the
7736     // following circumstances:
7737     // (a) To the "Address" method on array classes; the extra parameter is
7738     //     the array's type handle (a TypeDesc)
7739     // (b) To shared-code instance methods in generic structs; the extra parameter
7740     //     is the struct's type handle (a vtable ptr)
7741     // (c) To shared-code per-instantiation non-generic static methods in generic
7742     //     classes and structs; the extra parameter is the type handle
7743     // (d) To shared-code generic methods; the extra parameter is an
7744     //     exact-instantiation MethodDesc
7745     //
7746     // We also set the exact type context associated with the call so we can
7747     // inline the call correctly later on.
7748
7749     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7750     {
7751         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7752         if (clsHnd == nullptr)
7753         {
7754             NO_WAY("CALLI on parameterized type");
7755         }
7756
7757         assert(opcode != CEE_CALLI);
7758
7759         GenTreePtr instParam;
7760         BOOL       runtimeLookup;
7761
7762         // Instantiated generic method
7763         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7764         {
7765             CORINFO_METHOD_HANDLE exactMethodHandle =
7766                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7767
7768             if (!exactContextNeedsRuntimeLookup)
7769             {
7770 #ifdef FEATURE_READYTORUN_COMPILER
7771                 if (opts.IsReadyToRun())
7772                 {
7773                     instParam =
7774                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7775                     if (instParam == nullptr)
7776                     {
7777                         assert(compDonotInline());
7778                         return TYP_UNDEF;
7779                     }
7780                 }
7781                 else
7782 #endif
7783                 {
7784                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7785                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7786                 }
7787             }
7788             else
7789             {
7790                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7791                 if (instParam == nullptr)
7792                 {
7793                     assert(compDonotInline());
7794                     return TYP_UNDEF;
7795                 }
7796             }
7797         }
7798
7799         // otherwise must be an instance method in a generic struct,
7800         // a static method in a generic type, or a runtime-generated array method
7801         else
7802         {
7803             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7804             CORINFO_CLASS_HANDLE exactClassHandle =
7805                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7806
7807             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7808             {
7809                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7810                 return TYP_UNDEF;
7811             }
7812
7813             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7814             {
7815                 // We indicate "readonly" to the Address operation by using a null
7816                 // instParam.
7817                 instParam = gtNewIconNode(0, TYP_REF);
7818             }
7819             else if (!exactContextNeedsRuntimeLookup)
7820             {
7821 #ifdef FEATURE_READYTORUN_COMPILER
7822                 if (opts.IsReadyToRun())
7823                 {
7824                     instParam =
7825                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7826                     if (instParam == nullptr)
7827                     {
7828                         assert(compDonotInline());
7829                         return TYP_UNDEF;
7830                     }
7831                 }
7832                 else
7833 #endif
7834                 {
7835                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7836                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7837                 }
7838             }
7839             else
7840             {
7841                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7842                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7843                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7844                 if (pConstrainedResolvedToken)
7845                 {
7846                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7847                                                  FALSE /* importParent */);
7848                 }
7849                 else
7850                 {
7851                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7852                 }
7853
7854                 if (instParam == nullptr)
7855                 {
7856                     assert(compDonotInline());
7857                     return TYP_UNDEF;
7858                 }
7859             }
7860         }
7861
7862         assert(extraArg == nullptr);
7863         extraArg = gtNewArgList(instParam);
7864     }
7865
7866     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7867     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7868     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7869     // exactContextHnd is not currently required when inlining shared generic code into shared
7870     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7871     // (e.g. anything marked needsRuntimeLookup)
7872     if (exactContextNeedsRuntimeLookup)
7873     {
7874         exactContextHnd = nullptr;
7875     }
7876
7877     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7878     {
7879         // Only verifiable cases are supported.
7880         // dup; ldvirtftn; newobj; or ldftn; newobj.
7881         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7882         if (impStackHeight() > 0)
7883         {
7884             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7885             if (delegateTypeInfo.IsToken())
7886             {
7887                 ldftnToken = delegateTypeInfo.GetToken();
7888             }
7889         }
7890     }
7891
7892     //-------------------------------------------------------------------------
7893     // The main group of arguments
7894
7895     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7896
7897     if (args)
7898     {
7899         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7900     }
7901
7902     //-------------------------------------------------------------------------
7903     // The "this" pointer
7904
7905     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7906     {
7907         GenTreePtr obj;
7908
7909         if (opcode == CEE_NEWOBJ)
7910         {
7911             obj = newobjThis;
7912         }
7913         else
7914         {
7915             obj = impPopStack().val;
7916             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7917             if (compDonotInline())
7918             {
7919                 return TYP_UNDEF;
7920             }
7921         }
7922
7923         // Store the "this" value in the call
7924         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7925         call->gtCall.gtCallObjp = obj;
7926
7927         // Is this a virtual or interface call?
7928         if (call->gtCall.IsVirtual())
7929         {
7930             // only true object pointers can be virtual
7931             assert(obj->gtType == TYP_REF);
7932
7933             // See if we can devirtualize.
7934             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7935                                 &exactContextHnd);
7936         }
7937
7938         if (impIsThis(obj))
7939         {
7940             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7941         }
7942     }
7943
7944     //-------------------------------------------------------------------------
7945     // The "this" pointer for "newobj"
7946
7947     if (opcode == CEE_NEWOBJ)
7948     {
7949         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7950         {
7951             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7952             // This is a 'new' of a variable sized object, wher
7953             // the constructor is to return the object.  In this case
7954             // the constructor claims to return VOID but we know it
7955             // actually returns the new object
7956             assert(callRetTyp == TYP_VOID);
7957             callRetTyp   = TYP_REF;
7958             call->gtType = TYP_REF;
7959             impSpillSpecialSideEff();
7960
7961             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7962         }
7963         else
7964         {
7965             if (clsFlags & CORINFO_FLG_DELEGATE)
7966             {
7967                 // New inliner morph it in impImportCall.
7968                 // This will allow us to inline the call to the delegate constructor.
7969                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7970             }
7971
7972             if (!bIntrinsicImported)
7973             {
7974
7975 #if defined(DEBUG) || defined(INLINE_DATA)
7976
7977                 // Keep track of the raw IL offset of the call
7978                 call->gtCall.gtRawILOffset = rawILOffset;
7979
7980 #endif // defined(DEBUG) || defined(INLINE_DATA)
7981
7982                 // Is it an inline candidate?
7983                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7984             }
7985
7986             // append the call node.
7987             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7988
7989             // Now push the value of the 'new onto the stack
7990
7991             // This is a 'new' of a non-variable sized object.
7992             // Append the new node (op1) to the statement list,
7993             // and then push the local holding the value of this
7994             // new instruction on the stack.
7995
7996             if (clsFlags & CORINFO_FLG_VALUECLASS)
7997             {
7998                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7999
8000                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8001                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8002             }
8003             else
8004             {
8005                 if (newobjThis->gtOper == GT_COMMA)
8006                 {
8007                     // In coreclr the callout can be inserted even if verification is disabled
8008                     // so we cannot rely on tiVerificationNeeded alone
8009
8010                     // We must have inserted the callout. Get the real newobj.
8011                     newobjThis = newobjThis->gtOp.gtOp2;
8012                 }
8013
8014                 assert(newobjThis->gtOper == GT_LCL_VAR);
8015                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8016             }
8017         }
8018         return callRetTyp;
8019     }
8020
8021 DONE:
8022
8023     if (tailCall)
8024     {
8025         // This check cannot be performed for implicit tail calls for the reason
8026         // that impIsImplicitTailCallCandidate() is not checking whether return
8027         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8028         // As a result it is possible that in the following case, we find that
8029         // the type stack is non-empty if Callee() is considered for implicit
8030         // tail calling.
8031         //      int Caller(..) { .... void Callee(); ret val; ... }
8032         //
8033         // Note that we cannot check return type compatibility before ImpImportCall()
8034         // as we don't have required info or need to duplicate some of the logic of
8035         // ImpImportCall().
8036         //
8037         // For implicit tail calls, we perform this check after return types are
8038         // known to be compatible.
8039         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8040         {
8041             BADCODE("Stack should be empty after tailcall");
8042         }
8043
8044         // Note that we can not relax this condition with genActualType() as
8045         // the calling convention dictates that the caller of a function with
8046         // a small-typed return value is responsible for normalizing the return val
8047
8048         if (canTailCall &&
8049             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8050                                           callInfo->sig.retTypeClass))
8051         {
8052             canTailCall             = false;
8053             szCanTailCallFailReason = "Return types are not tail call compatible";
8054         }
8055
8056         // Stack empty check for implicit tail calls.
8057         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8058         {
8059 #ifdef _TARGET_AMD64_
8060             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8061             // in JIT64, not an InvalidProgramException.
8062             Verify(false, "Stack should be empty after tailcall");
8063 #else  // _TARGET_64BIT_
8064             BADCODE("Stack should be empty after tailcall");
8065 #endif //!_TARGET_64BIT_
8066         }
8067
8068         // assert(compCurBB is not a catch, finally or filter block);
8069         // assert(compCurBB is not a try block protected by a finally block);
8070
8071         // Check for permission to tailcall
8072         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8073
8074         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8075
8076         if (canTailCall)
8077         {
8078             // True virtual or indirect calls, shouldn't pass in a callee handle.
8079             CORINFO_METHOD_HANDLE exactCalleeHnd =
8080                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8081             GenTreePtr thisArg = call->gtCall.gtCallObjp;
8082
8083             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8084             {
8085                 canTailCall = true;
8086                 if (explicitTailCall)
8087                 {
8088                     // In case of explicit tail calls, mark it so that it is not considered
8089                     // for in-lining.
8090                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8091 #ifdef DEBUG
8092                     if (verbose)
8093                     {
8094                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8095                         printTreeID(call);
8096                         printf("\n");
8097                     }
8098 #endif
8099                 }
8100                 else
8101                 {
8102 #if FEATURE_TAILCALL_OPT
8103                     // Must be an implicit tail call.
8104                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8105
8106                     // It is possible that a call node is both an inline candidate and marked
8107                     // for opportunistic tail calling.  In-lining happens before morhphing of
8108                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8109                     // reason, it will survive to the morphing stage at which point it will be
8110                     // transformed into a tail call after performing additional checks.
8111
8112                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8113 #ifdef DEBUG
8114                     if (verbose)
8115                     {
8116                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8117                         printTreeID(call);
8118                         printf("\n");
8119                     }
8120 #endif
8121
8122 #else //! FEATURE_TAILCALL_OPT
8123                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8124
8125 #endif // FEATURE_TAILCALL_OPT
8126                 }
8127
8128                 // we can't report success just yet...
8129             }
8130             else
8131             {
8132                 canTailCall = false;
8133 // canTailCall reported its reasons already
8134 #ifdef DEBUG
8135                 if (verbose)
8136                 {
8137                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8138                     printTreeID(call);
8139                     printf("\n");
8140                 }
8141 #endif
8142             }
8143         }
8144         else
8145         {
8146             // If this assert fires it means that canTailCall was set to false without setting a reason!
8147             assert(szCanTailCallFailReason != nullptr);
8148
8149 #ifdef DEBUG
8150             if (verbose)
8151             {
8152                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8153                 printTreeID(call);
8154                 printf(": %s\n", szCanTailCallFailReason);
8155             }
8156 #endif
8157             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8158                                                      szCanTailCallFailReason);
8159         }
8160     }
8161
8162     // Note: we assume that small return types are already normalized by the managed callee
8163     // or by the pinvoke stub for calls to unmanaged code.
8164
8165     if (!bIntrinsicImported)
8166     {
8167         //
8168         // Things needed to be checked when bIntrinsicImported is false.
8169         //
8170
8171         assert(call->gtOper == GT_CALL);
8172         assert(sig != nullptr);
8173
8174         // Tail calls require us to save the call site's sig info so we can obtain an argument
8175         // copying thunk from the EE later on.
8176         if (call->gtCall.callSig == nullptr)
8177         {
8178             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8179             *call->gtCall.callSig = *sig;
8180         }
8181
8182         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8183         {
8184             GenTreePtr callObj = call->gtCall.gtCallObjp;
8185             assert(callObj != nullptr);
8186
8187             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8188                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8189                                                                    impInlineInfo->inlArgInfo))
8190             {
8191                 impInlineInfo->thisDereferencedFirst = true;
8192             }
8193         }
8194
8195 #if defined(DEBUG) || defined(INLINE_DATA)
8196
8197         // Keep track of the raw IL offset of the call
8198         call->gtCall.gtRawILOffset = rawILOffset;
8199
8200 #endif // defined(DEBUG) || defined(INLINE_DATA)
8201
8202         // Is it an inline candidate?
8203         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8204     }
8205
8206 DONE_CALL:
8207     // Push or append the result of the call
8208     if (callRetTyp == TYP_VOID)
8209     {
8210         if (opcode == CEE_NEWOBJ)
8211         {
8212             // we actually did push something, so don't spill the thing we just pushed.
8213             assert(verCurrentState.esStackDepth > 0);
8214             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8215         }
8216         else
8217         {
8218             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8219         }
8220     }
8221     else
8222     {
8223         impSpillSpecialSideEff();
8224
8225         if (clsFlags & CORINFO_FLG_ARRAY)
8226         {
8227             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8228         }
8229
8230         // Find the return type used for verification by interpreting the method signature.
8231         // NB: we are clobbering the already established sig.
8232         if (tiVerificationNeeded)
8233         {
8234             // Actually, we never get the sig for the original method.
8235             sig = &(callInfo->verSig);
8236         }
8237
8238         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8239         tiRetVal.NormaliseForStack();
8240
8241         // The CEE_READONLY prefix modifies the verification semantics of an Address
8242         // operation on an array type.
8243         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8244         {
8245             tiRetVal.SetIsReadonlyByRef();
8246         }
8247
8248         if (tiVerificationNeeded)
8249         {
8250             // We assume all calls return permanent home byrefs. If they
8251             // didn't they wouldn't be verifiable. This is also covering
8252             // the Address() helper for multidimensional arrays.
8253             if (tiRetVal.IsByRef())
8254             {
8255                 tiRetVal.SetIsPermanentHomeByRef();
8256             }
8257         }
8258
8259         if (call->IsCall())
8260         {
8261             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8262
8263             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8264             if (varTypeIsStruct(callRetTyp))
8265             {
8266                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8267             }
8268
8269             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8270             {
8271                 assert(opts.OptEnabled(CLFLG_INLINING));
8272                 assert(!fatPointerCandidate); // We should not try to inline calli.
8273
8274                 // Make the call its own tree (spill the stack if needed).
8275                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8276
8277                 // TODO: Still using the widened type.
8278                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8279             }
8280             else
8281             {
8282                 if (fatPointerCandidate)
8283                 {
8284                     // fatPointer candidates should be in statements of the form call() or var = call().
8285                     // Such form allows to find statements with fat calls without walking through whole trees
8286                     // and removes problems with cutting trees.
8287                     assert(!bIntrinsicImported);
8288                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8289                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8290                     {
8291                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8292                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8293                         varDsc->lvVerTypeInfo = tiRetVal;
8294                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8295                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8296                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8297                         call           = gtNewLclvNode(calliSlot, type);
8298                     }
8299                 }
8300
8301                 // For non-candidates we must also spill, since we
8302                 // might have locals live on the eval stack that this
8303                 // call can modify.
8304                 //
8305                 // Suppress this for certain well-known call targets
8306                 // that we know won't modify locals, eg calls that are
8307                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8308                 // we may break key fragile pattern matches later on.
8309                 bool spillStack = true;
8310                 if (call->IsCall())
8311                 {
8312                     GenTreeCall* callNode = call->AsCall();
8313                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8314                     {
8315                         spillStack = false;
8316                     }
8317                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8318                     {
8319                         spillStack = false;
8320                     }
8321                 }
8322
8323                 if (spillStack)
8324                 {
8325                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8326                 }
8327             }
8328         }
8329
8330         if (!bIntrinsicImported)
8331         {
8332             //-------------------------------------------------------------------------
8333             //
8334             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8335                 before returning.
8336                 However, we need to normalize small type values returned by unmanaged
8337                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8338                 if we use the shorter inlined pinvoke stub. */
8339
8340             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8341             {
8342                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8343             }
8344         }
8345
8346         impPushOnStack(call, tiRetVal);
8347     }
8348
8349     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8350     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8351     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8352     //  callInfoCache.uncacheCallInfo();
8353
8354     return callRetTyp;
8355 }
8356 #ifdef _PREFAST_
8357 #pragma warning(pop)
8358 #endif
8359
8360 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8361 {
8362     CorInfoType corType = methInfo->args.retType;
8363
8364     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8365     {
8366         // We have some kind of STRUCT being returned
8367
8368         structPassingKind howToReturnStruct = SPK_Unknown;
8369
8370         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8371
8372         if (howToReturnStruct == SPK_ByReference)
8373         {
8374             return true;
8375         }
8376     }
8377
8378     return false;
8379 }
8380
8381 #ifdef DEBUG
8382 //
8383 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8384 {
8385     TestLabelAndNum tlAndN;
8386     if (numArgs == 2)
8387     {
8388         tlAndN.m_num  = 0;
8389         StackEntry se = impPopStack();
8390         assert(se.seTypeInfo.GetType() == TI_INT);
8391         GenTreePtr val = se.val;
8392         assert(val->IsCnsIntOrI());
8393         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8394     }
8395     else if (numArgs == 3)
8396     {
8397         StackEntry se = impPopStack();
8398         assert(se.seTypeInfo.GetType() == TI_INT);
8399         GenTreePtr val = se.val;
8400         assert(val->IsCnsIntOrI());
8401         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8402         se           = impPopStack();
8403         assert(se.seTypeInfo.GetType() == TI_INT);
8404         val = se.val;
8405         assert(val->IsCnsIntOrI());
8406         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8407     }
8408     else
8409     {
8410         assert(false);
8411     }
8412
8413     StackEntry expSe = impPopStack();
8414     GenTreePtr node  = expSe.val;
8415
8416     // There are a small number of special cases, where we actually put the annotation on a subnode.
8417     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8418     {
8419         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8420         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8421         // offset within the the static field block whose address is returned by the helper call.
8422         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8423         GenTreePtr helperCall = nullptr;
8424         assert(node->OperGet() == GT_IND);
8425         tlAndN.m_num -= 100;
8426         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8427         GetNodeTestData()->Remove(node);
8428     }
8429     else
8430     {
8431         GetNodeTestData()->Set(node, tlAndN);
8432     }
8433
8434     impPushOnStack(node, expSe.seTypeInfo);
8435     return node->TypeGet();
8436 }
8437 #endif // DEBUG
8438
8439 //-----------------------------------------------------------------------------------
8440 //  impFixupCallStructReturn: For a call node that returns a struct type either
8441 //  adjust the return type to an enregisterable type, or set the flag to indicate
8442 //  struct return via retbuf arg.
8443 //
8444 //  Arguments:
8445 //    call       -  GT_CALL GenTree node
8446 //    retClsHnd  -  Class handle of return type of the call
8447 //
8448 //  Return Value:
8449 //    Returns new GenTree node after fixing struct return of call node
8450 //
8451 GenTreePtr Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8452 {
8453     if (!varTypeIsStruct(call))
8454     {
8455         return call;
8456     }
8457
8458     call->gtRetClsHnd = retClsHnd;
8459
8460 #if FEATURE_MULTIREG_RET
8461     // Initialize Return type descriptor of call node
8462     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8463     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8464 #endif // FEATURE_MULTIREG_RET
8465
8466 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8467
8468     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8469     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8470
8471     // The return type will remain as the incoming struct type unless normalized to a
8472     // single eightbyte return type below.
8473     call->gtReturnType = call->gtType;
8474
8475     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8476     if (retRegCount != 0)
8477     {
8478         if (retRegCount == 1)
8479         {
8480             // struct returned in a single register
8481             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8482         }
8483         else
8484         {
8485             // must be a struct returned in two registers
8486             assert(retRegCount == 2);
8487
8488             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8489             {
8490                 // Force a call returning multi-reg struct to be always of the IR form
8491                 //   tmp = call
8492                 //
8493                 // No need to assign a multi-reg struct to a local var if:
8494                 //  - It is a tail call or
8495                 //  - The call is marked for in-lining later
8496                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8497             }
8498         }
8499     }
8500     else
8501     {
8502         // struct not returned in registers i.e returned via hiddden retbuf arg.
8503         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8504     }
8505
8506 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8507
8508     // Check for TYP_STRUCT type that wraps a primitive type
8509     // Such structs are returned using a single register
8510     // and we change the return type on those calls here.
8511     //
8512     structPassingKind howToReturnStruct;
8513     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8514
8515     if (howToReturnStruct == SPK_ByReference)
8516     {
8517         assert(returnType == TYP_UNKNOWN);
8518         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8519     }
8520     else
8521     {
8522         assert(returnType != TYP_UNKNOWN);
8523         call->gtReturnType = returnType;
8524
8525         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8526         if ((returnType == TYP_LONG) && (compLongUsed == false))
8527         {
8528             compLongUsed = true;
8529         }
8530         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8531         {
8532             compFloatingPointUsed = true;
8533         }
8534
8535 #if FEATURE_MULTIREG_RET
8536         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8537         assert(retRegCount != 0);
8538
8539         if (retRegCount >= 2)
8540         {
8541             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8542             {
8543                 // Force a call returning multi-reg struct to be always of the IR form
8544                 //   tmp = call
8545                 //
8546                 // No need to assign a multi-reg struct to a local var if:
8547                 //  - It is a tail call or
8548                 //  - The call is marked for in-lining later
8549                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8550             }
8551         }
8552 #endif // FEATURE_MULTIREG_RET
8553     }
8554
8555 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8556
8557     return call;
8558 }
8559
8560 /*****************************************************************************
8561    For struct return values, re-type the operand in the case where the ABI
8562    does not use a struct return buffer
8563    Note that this method is only call for !_TARGET_X86_
8564  */
8565
8566 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8567 {
8568     assert(varTypeIsStruct(info.compRetType));
8569     assert(info.compRetBuffArg == BAD_VAR_NUM);
8570
8571 #if defined(_TARGET_XARCH_)
8572
8573 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8574     // No VarArgs for CoreCLR on x64 Unix
8575     assert(!info.compIsVarArgs);
8576
8577     // Is method returning a multi-reg struct?
8578     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8579     {
8580         // In case of multi-reg struct return, we force IR to be one of the following:
8581         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8582         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8583
8584         if (op->gtOper == GT_LCL_VAR)
8585         {
8586             // Make sure that this struct stays in memory and doesn't get promoted.
8587             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8588             lvaTable[lclNum].lvIsMultiRegRet = true;
8589
8590             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8591             op->gtFlags |= GTF_DONT_CSE;
8592
8593             return op;
8594         }
8595
8596         if (op->gtOper == GT_CALL)
8597         {
8598             return op;
8599         }
8600
8601         return impAssignMultiRegTypeToVar(op, retClsHnd);
8602     }
8603 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8604     assert(info.compRetNativeType != TYP_STRUCT);
8605 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8606
8607 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8608
8609     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8610     {
8611         if (op->gtOper == GT_LCL_VAR)
8612         {
8613             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8614             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8615             // Make sure this struct type stays as struct so that we can return it as an HFA
8616             lvaTable[lclNum].lvIsMultiRegRet = true;
8617
8618             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8619             op->gtFlags |= GTF_DONT_CSE;
8620
8621             return op;
8622         }
8623
8624         if (op->gtOper == GT_CALL)
8625         {
8626             if (op->gtCall.IsVarargs())
8627             {
8628                 // We cannot tail call because control needs to return to fixup the calling
8629                 // convention for result return.
8630                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8631                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8632             }
8633             else
8634             {
8635                 return op;
8636             }
8637         }
8638         return impAssignMultiRegTypeToVar(op, retClsHnd);
8639     }
8640
8641 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8642
8643     // Is method returning a multi-reg struct?
8644     if (IsMultiRegReturnedType(retClsHnd))
8645     {
8646         if (op->gtOper == GT_LCL_VAR)
8647         {
8648             // This LCL_VAR stays as a TYP_STRUCT
8649             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8650
8651             // Make sure this struct type is not struct promoted
8652             lvaTable[lclNum].lvIsMultiRegRet = true;
8653
8654             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8655             op->gtFlags |= GTF_DONT_CSE;
8656
8657             return op;
8658         }
8659
8660         if (op->gtOper == GT_CALL)
8661         {
8662             if (op->gtCall.IsVarargs())
8663             {
8664                 // We cannot tail call because control needs to return to fixup the calling
8665                 // convention for result return.
8666                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8667                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8668             }
8669             else
8670             {
8671                 return op;
8672             }
8673         }
8674         return impAssignMultiRegTypeToVar(op, retClsHnd);
8675     }
8676
8677 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8678
8679 REDO_RETURN_NODE:
8680     // adjust the type away from struct to integral
8681     // and no normalizing
8682     if (op->gtOper == GT_LCL_VAR)
8683     {
8684         op->ChangeOper(GT_LCL_FLD);
8685     }
8686     else if (op->gtOper == GT_OBJ)
8687     {
8688         GenTreePtr op1 = op->AsObj()->Addr();
8689
8690         // We will fold away OBJ/ADDR
8691         // except for OBJ/ADDR/INDEX
8692         //     as the array type influences the array element's offset
8693         //     Later in this method we change op->gtType to info.compRetNativeType
8694         //     This is not correct when op is a GT_INDEX as the starting offset
8695         //     for the array elements 'elemOffs' is different for an array of
8696         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8697         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8698         //
8699         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8700         {
8701             // Change '*(&X)' to 'X' and see if we can do better
8702             op = op1->gtOp.gtOp1;
8703             goto REDO_RETURN_NODE;
8704         }
8705         op->gtObj.gtClass = NO_CLASS_HANDLE;
8706         op->ChangeOperUnchecked(GT_IND);
8707         op->gtFlags |= GTF_IND_TGTANYWHERE;
8708     }
8709     else if (op->gtOper == GT_CALL)
8710     {
8711         if (op->AsCall()->TreatAsHasRetBufArg(this))
8712         {
8713             // This must be one of those 'special' helpers that don't
8714             // really have a return buffer, but instead use it as a way
8715             // to keep the trees cleaner with fewer address-taken temps.
8716             //
8717             // Well now we have to materialize the the return buffer as
8718             // an address-taken temp. Then we can return the temp.
8719             //
8720             // NOTE: this code assumes that since the call directly
8721             // feeds the return, then the call must be returning the
8722             // same structure/class/type.
8723             //
8724             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8725
8726             // No need to spill anything as we're about to return.
8727             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8728
8729             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8730             // jump directly to a GT_LCL_FLD.
8731             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8732             op->ChangeOper(GT_LCL_FLD);
8733         }
8734         else
8735         {
8736             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8737
8738             // Don't change the gtType of the node just yet, it will get changed later.
8739             return op;
8740         }
8741     }
8742     else if (op->gtOper == GT_COMMA)
8743     {
8744         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8745     }
8746
8747     op->gtType = info.compRetNativeType;
8748
8749     return op;
8750 }
8751
8752 /*****************************************************************************
8753    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8754    finally-protected try. We find the finally blocks protecting the current
8755    offset (in order) by walking over the complete exception table and
8756    finding enclosing clauses. This assumes that the table is sorted.
8757    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8758
8759    If we are leaving a catch handler, we need to attach the
8760    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8761
8762    After this function, the BBJ_LEAVE block has been converted to a different type.
8763  */
8764
8765 #if !FEATURE_EH_FUNCLETS
8766
8767 void Compiler::impImportLeave(BasicBlock* block)
8768 {
8769 #ifdef DEBUG
8770     if (verbose)
8771     {
8772         printf("\nBefore import CEE_LEAVE:\n");
8773         fgDispBasicBlocks();
8774         fgDispHandlerTab();
8775     }
8776 #endif // DEBUG
8777
8778     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8779     unsigned    blkAddr         = block->bbCodeOffs;
8780     BasicBlock* leaveTarget     = block->bbJumpDest;
8781     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8782
8783     // LEAVE clears the stack, spill side effects, and set stack to 0
8784
8785     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8786     verCurrentState.esStackDepth = 0;
8787
8788     assert(block->bbJumpKind == BBJ_LEAVE);
8789     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8790
8791     BasicBlock* step         = DUMMY_INIT(NULL);
8792     unsigned    encFinallies = 0; // Number of enclosing finallies.
8793     GenTreePtr  endCatches   = NULL;
8794     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8795
8796     unsigned  XTnum;
8797     EHblkDsc* HBtab;
8798
8799     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8800     {
8801         // Grab the handler offsets
8802
8803         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8804         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8805         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8806         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8807
8808         /* Is this a catch-handler we are CEE_LEAVEing out of?
8809          * If so, we need to call CORINFO_HELP_ENDCATCH.
8810          */
8811
8812         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8813         {
8814             // Can't CEE_LEAVE out of a finally/fault handler
8815             if (HBtab->HasFinallyOrFaultHandler())
8816                 BADCODE("leave out of fault/finally block");
8817
8818             // Create the call to CORINFO_HELP_ENDCATCH
8819             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8820
8821             // Make a list of all the currently pending endCatches
8822             if (endCatches)
8823                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8824             else
8825                 endCatches = endCatch;
8826
8827 #ifdef DEBUG
8828             if (verbose)
8829             {
8830                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8831                        "CORINFO_HELP_ENDCATCH\n",
8832                        block->bbNum, XTnum);
8833             }
8834 #endif
8835         }
8836         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8837                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8838         {
8839             /* This is a finally-protected try we are jumping out of */
8840
8841             /* If there are any pending endCatches, and we have already
8842                jumped out of a finally-protected try, then the endCatches
8843                have to be put in a block in an outer try for async
8844                exceptions to work correctly.
8845                Else, just use append to the original block */
8846
8847             BasicBlock* callBlock;
8848
8849             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8850
8851             if (encFinallies == 0)
8852             {
8853                 assert(step == DUMMY_INIT(NULL));
8854                 callBlock             = block;
8855                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8856
8857                 if (endCatches)
8858                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8859
8860 #ifdef DEBUG
8861                 if (verbose)
8862                 {
8863                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8864                            "block %s\n",
8865                            callBlock->dspToString());
8866                 }
8867 #endif
8868             }
8869             else
8870             {
8871                 assert(step != DUMMY_INIT(NULL));
8872
8873                 /* Calling the finally block */
8874                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8875                 assert(step->bbJumpKind == BBJ_ALWAYS);
8876                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8877                                               // finally in the chain)
8878                 step->bbJumpDest->bbRefs++;
8879
8880                 /* The new block will inherit this block's weight */
8881                 callBlock->setBBWeight(block->bbWeight);
8882                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8883
8884 #ifdef DEBUG
8885                 if (verbose)
8886                 {
8887                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8888                            callBlock->dspToString());
8889                 }
8890 #endif
8891
8892                 GenTreePtr lastStmt;
8893
8894                 if (endCatches)
8895                 {
8896                     lastStmt         = gtNewStmt(endCatches);
8897                     endLFin->gtNext  = lastStmt;
8898                     lastStmt->gtPrev = endLFin;
8899                 }
8900                 else
8901                 {
8902                     lastStmt = endLFin;
8903                 }
8904
8905                 // note that this sets BBF_IMPORTED on the block
8906                 impEndTreeList(callBlock, endLFin, lastStmt);
8907             }
8908
8909             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8910             /* The new block will inherit this block's weight */
8911             step->setBBWeight(block->bbWeight);
8912             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8913
8914 #ifdef DEBUG
8915             if (verbose)
8916             {
8917                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8918                        step->dspToString());
8919             }
8920 #endif
8921
8922             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8923             assert(finallyNesting <= compHndBBtabCount);
8924
8925             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8926             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8927             endLFin               = gtNewStmt(endLFin);
8928             endCatches            = NULL;
8929
8930             encFinallies++;
8931
8932             invalidatePreds = true;
8933         }
8934     }
8935
8936     /* Append any remaining endCatches, if any */
8937
8938     assert(!encFinallies == !endLFin);
8939
8940     if (encFinallies == 0)
8941     {
8942         assert(step == DUMMY_INIT(NULL));
8943         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8944
8945         if (endCatches)
8946             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8947
8948 #ifdef DEBUG
8949         if (verbose)
8950         {
8951             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8952                    "block %s\n",
8953                    block->dspToString());
8954         }
8955 #endif
8956     }
8957     else
8958     {
8959         // If leaveTarget is the start of another try block, we want to make sure that
8960         // we do not insert finalStep into that try block. Hence, we find the enclosing
8961         // try block.
8962         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8963
8964         // Insert a new BB either in the try region indicated by tryIndex or
8965         // the handler region indicated by leaveTarget->bbHndIndex,
8966         // depending on which is the inner region.
8967         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8968         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8969         step->bbJumpDest = finalStep;
8970
8971         /* The new block will inherit this block's weight */
8972         finalStep->setBBWeight(block->bbWeight);
8973         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8974
8975 #ifdef DEBUG
8976         if (verbose)
8977         {
8978             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
8979                    finalStep->dspToString());
8980         }
8981 #endif
8982
8983         GenTreePtr lastStmt;
8984
8985         if (endCatches)
8986         {
8987             lastStmt         = gtNewStmt(endCatches);
8988             endLFin->gtNext  = lastStmt;
8989             lastStmt->gtPrev = endLFin;
8990         }
8991         else
8992         {
8993             lastStmt = endLFin;
8994         }
8995
8996         impEndTreeList(finalStep, endLFin, lastStmt);
8997
8998         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8999
9000         // Queue up the jump target for importing
9001
9002         impImportBlockPending(leaveTarget);
9003
9004         invalidatePreds = true;
9005     }
9006
9007     if (invalidatePreds && fgComputePredsDone)
9008     {
9009         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9010         fgRemovePreds();
9011     }
9012
9013 #ifdef DEBUG
9014     fgVerifyHandlerTab();
9015
9016     if (verbose)
9017     {
9018         printf("\nAfter import CEE_LEAVE:\n");
9019         fgDispBasicBlocks();
9020         fgDispHandlerTab();
9021     }
9022 #endif // DEBUG
9023 }
9024
9025 #else // FEATURE_EH_FUNCLETS
9026
9027 void Compiler::impImportLeave(BasicBlock* block)
9028 {
9029 #ifdef DEBUG
9030     if (verbose)
9031     {
9032         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9033         fgDispBasicBlocks();
9034         fgDispHandlerTab();
9035     }
9036 #endif // DEBUG
9037
9038     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9039     unsigned    blkAddr         = block->bbCodeOffs;
9040     BasicBlock* leaveTarget     = block->bbJumpDest;
9041     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9042
9043     // LEAVE clears the stack, spill side effects, and set stack to 0
9044
9045     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9046     verCurrentState.esStackDepth = 0;
9047
9048     assert(block->bbJumpKind == BBJ_LEAVE);
9049     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9050
9051     BasicBlock* step = nullptr;
9052
9053     enum StepType
9054     {
9055         // No step type; step == NULL.
9056         ST_None,
9057
9058         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9059         // That is, is step->bbJumpDest where a finally will return to?
9060         ST_FinallyReturn,
9061
9062         // The step block is a catch return.
9063         ST_Catch,
9064
9065         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9066         ST_Try
9067     };
9068     StepType stepType = ST_None;
9069
9070     unsigned  XTnum;
9071     EHblkDsc* HBtab;
9072
9073     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9074     {
9075         // Grab the handler offsets
9076
9077         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9078         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9079         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9080         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9081
9082         /* Is this a catch-handler we are CEE_LEAVEing out of?
9083          */
9084
9085         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9086         {
9087             // Can't CEE_LEAVE out of a finally/fault handler
9088             if (HBtab->HasFinallyOrFaultHandler())
9089             {
9090                 BADCODE("leave out of fault/finally block");
9091             }
9092
9093             /* We are jumping out of a catch */
9094
9095             if (step == nullptr)
9096             {
9097                 step             = block;
9098                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9099                 stepType         = ST_Catch;
9100
9101 #ifdef DEBUG
9102                 if (verbose)
9103                 {
9104                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9105                            "block\n",
9106                            XTnum, step->bbNum);
9107                 }
9108 #endif
9109             }
9110             else
9111             {
9112                 BasicBlock* exitBlock;
9113
9114                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9115                  * scope */
9116                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9117
9118                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9119                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9120                                               // exit) returns to this block
9121                 step->bbJumpDest->bbRefs++;
9122
9123 #if defined(_TARGET_ARM_)
9124                 if (stepType == ST_FinallyReturn)
9125                 {
9126                     assert(step->bbJumpKind == BBJ_ALWAYS);
9127                     // Mark the target of a finally return
9128                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9129                 }
9130 #endif // defined(_TARGET_ARM_)
9131
9132                 /* The new block will inherit this block's weight */
9133                 exitBlock->setBBWeight(block->bbWeight);
9134                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9135
9136                 /* This exit block is the new step */
9137                 step     = exitBlock;
9138                 stepType = ST_Catch;
9139
9140                 invalidatePreds = true;
9141
9142 #ifdef DEBUG
9143                 if (verbose)
9144                 {
9145                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9146                            exitBlock->bbNum);
9147                 }
9148 #endif
9149             }
9150         }
9151         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9152                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9153         {
9154             /* We are jumping out of a finally-protected try */
9155
9156             BasicBlock* callBlock;
9157
9158             if (step == nullptr)
9159             {
9160 #if FEATURE_EH_CALLFINALLY_THUNKS
9161
9162                 // Put the call to the finally in the enclosing region.
9163                 unsigned callFinallyTryIndex =
9164                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9165                 unsigned callFinallyHndIndex =
9166                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9167                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9168
9169                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9170                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9171                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9172                 // next block, and flow optimizations will remove it.
9173                 block->bbJumpKind = BBJ_ALWAYS;
9174                 block->bbJumpDest = callBlock;
9175                 block->bbJumpDest->bbRefs++;
9176
9177                 /* The new block will inherit this block's weight */
9178                 callBlock->setBBWeight(block->bbWeight);
9179                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9180
9181 #ifdef DEBUG
9182                 if (verbose)
9183                 {
9184                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9185                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9186                            XTnum, block->bbNum, callBlock->bbNum);
9187                 }
9188 #endif
9189
9190 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9191
9192                 callBlock             = block;
9193                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9194
9195 #ifdef DEBUG
9196                 if (verbose)
9197                 {
9198                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9199                            "BBJ_CALLFINALLY block\n",
9200                            XTnum, callBlock->bbNum);
9201                 }
9202 #endif
9203
9204 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9205             }
9206             else
9207             {
9208                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9209                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9210                 // a 'finally'), or the step block is the return from a catch.
9211                 //
9212                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9213                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9214                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9215                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9216                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9217                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9218                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9219                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9220                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9221                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9222                 // stack walks.)
9223
9224                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9225
9226 #if FEATURE_EH_CALLFINALLY_THUNKS
9227                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9228                 {
9229                     // Need to create another step block in the 'try' region that will actually branch to the
9230                     // call-to-finally thunk.
9231                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9232                     step->bbJumpDest  = step2;
9233                     step->bbJumpDest->bbRefs++;
9234                     step2->setBBWeight(block->bbWeight);
9235                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9236
9237 #ifdef DEBUG
9238                     if (verbose)
9239                     {
9240                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9241                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9242                                XTnum, step->bbNum, step2->bbNum);
9243                     }
9244 #endif
9245
9246                     step = step2;
9247                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9248                 }
9249 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9250
9251 #if FEATURE_EH_CALLFINALLY_THUNKS
9252                 unsigned callFinallyTryIndex =
9253                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9254                 unsigned callFinallyHndIndex =
9255                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9256 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9257                 unsigned callFinallyTryIndex = XTnum + 1;
9258                 unsigned callFinallyHndIndex = 0; // don't care
9259 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9260
9261                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9262                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9263                                               // finally in the chain)
9264                 step->bbJumpDest->bbRefs++;
9265
9266 #if defined(_TARGET_ARM_)
9267                 if (stepType == ST_FinallyReturn)
9268                 {
9269                     assert(step->bbJumpKind == BBJ_ALWAYS);
9270                     // Mark the target of a finally return
9271                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9272                 }
9273 #endif // defined(_TARGET_ARM_)
9274
9275                 /* The new block will inherit this block's weight */
9276                 callBlock->setBBWeight(block->bbWeight);
9277                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9278
9279 #ifdef DEBUG
9280                 if (verbose)
9281                 {
9282                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9283                            "BB%02u\n",
9284                            XTnum, callBlock->bbNum);
9285                 }
9286 #endif
9287             }
9288
9289             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9290             stepType = ST_FinallyReturn;
9291
9292             /* The new block will inherit this block's weight */
9293             step->setBBWeight(block->bbWeight);
9294             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9295
9296 #ifdef DEBUG
9297             if (verbose)
9298             {
9299                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9300                        "block BB%02u\n",
9301                        XTnum, step->bbNum);
9302             }
9303 #endif
9304
9305             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9306
9307             invalidatePreds = true;
9308         }
9309         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9310                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9311         {
9312             // We are jumping out of a catch-protected try.
9313             //
9314             // If we are returning from a call to a finally, then we must have a step block within a try
9315             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9316             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9317             // and invoke the appropriate catch.
9318             //
9319             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9320             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9321             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9322             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9323             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9324             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9325             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9326             // For example:
9327             //
9328             // try {
9329             //    try {
9330             //       // something here raises ThreadAbortException
9331             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9332             //    } catch (Exception) {
9333             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9334             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9335             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9336             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9337             //       // need to do this transformation if the current EH block is a try/catch that catches
9338             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9339             //       // information, so currently we do it for all catch types.
9340             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9341             //    }
9342             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9343             // } catch (ThreadAbortException) {
9344             // }
9345             // LABEL_1:
9346             //
9347             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9348             // compiler.
9349
9350             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9351             {
9352                 BasicBlock* catchStep;
9353
9354                 assert(step);
9355
9356                 if (stepType == ST_FinallyReturn)
9357                 {
9358                     assert(step->bbJumpKind == BBJ_ALWAYS);
9359                 }
9360                 else
9361                 {
9362                     assert(stepType == ST_Catch);
9363                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9364                 }
9365
9366                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9367                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9368                 step->bbJumpDest = catchStep;
9369                 step->bbJumpDest->bbRefs++;
9370
9371 #if defined(_TARGET_ARM_)
9372                 if (stepType == ST_FinallyReturn)
9373                 {
9374                     // Mark the target of a finally return
9375                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9376                 }
9377 #endif // defined(_TARGET_ARM_)
9378
9379                 /* The new block will inherit this block's weight */
9380                 catchStep->setBBWeight(block->bbWeight);
9381                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9382
9383 #ifdef DEBUG
9384                 if (verbose)
9385                 {
9386                     if (stepType == ST_FinallyReturn)
9387                     {
9388                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9389                                "BBJ_ALWAYS block BB%02u\n",
9390                                XTnum, catchStep->bbNum);
9391                     }
9392                     else
9393                     {
9394                         assert(stepType == ST_Catch);
9395                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9396                                "BBJ_ALWAYS block BB%02u\n",
9397                                XTnum, catchStep->bbNum);
9398                     }
9399                 }
9400 #endif // DEBUG
9401
9402                 /* This block is the new step */
9403                 step     = catchStep;
9404                 stepType = ST_Try;
9405
9406                 invalidatePreds = true;
9407             }
9408         }
9409     }
9410
9411     if (step == nullptr)
9412     {
9413         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9414
9415 #ifdef DEBUG
9416         if (verbose)
9417         {
9418             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9419                    "block BB%02u to BBJ_ALWAYS\n",
9420                    block->bbNum);
9421         }
9422 #endif
9423     }
9424     else
9425     {
9426         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9427
9428 #if defined(_TARGET_ARM_)
9429         if (stepType == ST_FinallyReturn)
9430         {
9431             assert(step->bbJumpKind == BBJ_ALWAYS);
9432             // Mark the target of a finally return
9433             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9434         }
9435 #endif // defined(_TARGET_ARM_)
9436
9437 #ifdef DEBUG
9438         if (verbose)
9439         {
9440             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9441         }
9442 #endif
9443
9444         // Queue up the jump target for importing
9445
9446         impImportBlockPending(leaveTarget);
9447     }
9448
9449     if (invalidatePreds && fgComputePredsDone)
9450     {
9451         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9452         fgRemovePreds();
9453     }
9454
9455 #ifdef DEBUG
9456     fgVerifyHandlerTab();
9457
9458     if (verbose)
9459     {
9460         printf("\nAfter import CEE_LEAVE:\n");
9461         fgDispBasicBlocks();
9462         fgDispHandlerTab();
9463     }
9464 #endif // DEBUG
9465 }
9466
9467 #endif // FEATURE_EH_FUNCLETS
9468
9469 /*****************************************************************************/
9470 // This is called when reimporting a leave block. It resets the JumpKind,
9471 // JumpDest, and bbNext to the original values
9472
9473 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9474 {
9475 #if FEATURE_EH_FUNCLETS
9476     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9477     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9478     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9479     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9480     // only predecessor are also considered orphans and attempted to be deleted.
9481     //
9482     //  try  {
9483     //     ....
9484     //     try
9485     //     {
9486     //         ....
9487     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9488     //     } finally { }
9489     //  } finally { }
9490     //  OUTSIDE:
9491     //
9492     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9493     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9494     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9495     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9496     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9497     // will be treated as pair and handled correctly.
9498     if (block->bbJumpKind == BBJ_CALLFINALLY)
9499     {
9500         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9501         dupBlock->bbFlags    = block->bbFlags;
9502         dupBlock->bbJumpDest = block->bbJumpDest;
9503         dupBlock->copyEHRegion(block);
9504         dupBlock->bbCatchTyp = block->bbCatchTyp;
9505
9506         // Mark this block as
9507         //  a) not referenced by any other block to make sure that it gets deleted
9508         //  b) weight zero
9509         //  c) prevent from being imported
9510         //  d) as internal
9511         //  e) as rarely run
9512         dupBlock->bbRefs   = 0;
9513         dupBlock->bbWeight = 0;
9514         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9515
9516         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9517         // will be next to each other.
9518         fgInsertBBafter(block, dupBlock);
9519
9520 #ifdef DEBUG
9521         if (verbose)
9522         {
9523             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9524         }
9525 #endif
9526     }
9527 #endif // FEATURE_EH_FUNCLETS
9528
9529     block->bbJumpKind = BBJ_LEAVE;
9530     fgInitBBLookup();
9531     block->bbJumpDest = fgLookupBB(jmpAddr);
9532
9533     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9534     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9535     // reason we don't want to remove the block at this point is that if we call
9536     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9537     // added and the linked list length will be different than fgBBcount.
9538 }
9539
9540 /*****************************************************************************/
9541 // Get the first non-prefix opcode. Used for verification of valid combinations
9542 // of prefixes and actual opcodes.
9543
9544 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9545 {
9546     while (codeAddr < codeEndp)
9547     {
9548         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9549         codeAddr += sizeof(__int8);
9550
9551         if (opcode == CEE_PREFIX1)
9552         {
9553             if (codeAddr >= codeEndp)
9554             {
9555                 break;
9556             }
9557             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9558             codeAddr += sizeof(__int8);
9559         }
9560
9561         switch (opcode)
9562         {
9563             case CEE_UNALIGNED:
9564             case CEE_VOLATILE:
9565             case CEE_TAILCALL:
9566             case CEE_CONSTRAINED:
9567             case CEE_READONLY:
9568                 break;
9569             default:
9570                 return opcode;
9571         }
9572
9573         codeAddr += opcodeSizes[opcode];
9574     }
9575
9576     return CEE_ILLEGAL;
9577 }
9578
9579 /*****************************************************************************/
9580 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9581
9582 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9583 {
9584     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9585
9586     if (!(
9587             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9588             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9589             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9590             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9591             // volatile. prefix is allowed with the ldsfld and stsfld
9592             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9593     {
9594         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9595     }
9596 }
9597
9598 /*****************************************************************************/
9599
9600 #ifdef DEBUG
9601
9602 #undef RETURN // undef contracts RETURN macro
9603
9604 enum controlFlow_t
9605 {
9606     NEXT,
9607     CALL,
9608     RETURN,
9609     THROW,
9610     BRANCH,
9611     COND_BRANCH,
9612     BREAK,
9613     PHI,
9614     META,
9615 };
9616
9617 const static controlFlow_t controlFlow[] = {
9618 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9619 #include "opcode.def"
9620 #undef OPDEF
9621 };
9622
9623 #endif // DEBUG
9624
9625 /*****************************************************************************
9626  *  Determine the result type of an arithemetic operation
9627  *  On 64-bit inserts upcasts when native int is mixed with int32
9628  */
9629 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9630 {
9631     var_types  type = TYP_UNDEF;
9632     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9633
9634     // Arithemetic operations are generally only allowed with
9635     // primitive types, but certain operations are allowed
9636     // with byrefs
9637
9638     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9639     {
9640         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9641         {
9642             // byref1-byref2 => gives a native int
9643             type = TYP_I_IMPL;
9644         }
9645         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9646         {
9647             // [native] int - byref => gives a native int
9648
9649             //
9650             // The reason is that it is possible, in managed C++,
9651             // to have a tree like this:
9652             //
9653             //              -
9654             //             / \
9655             //            /   \
9656             //           /     \
9657             //          /       \
9658             // const(h) int     addr byref
9659             //
9660             // <BUGNUM> VSW 318822 </BUGNUM>
9661             //
9662             // So here we decide to make the resulting type to be a native int.
9663             CLANG_FORMAT_COMMENT_ANCHOR;
9664
9665 #ifdef _TARGET_64BIT_
9666             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9667             {
9668                 // insert an explicit upcast
9669                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9670             }
9671 #endif // _TARGET_64BIT_
9672
9673             type = TYP_I_IMPL;
9674         }
9675         else
9676         {
9677             // byref - [native] int => gives a byref
9678             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9679
9680 #ifdef _TARGET_64BIT_
9681             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9682             {
9683                 // insert an explicit upcast
9684                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9685             }
9686 #endif // _TARGET_64BIT_
9687
9688             type = TYP_BYREF;
9689         }
9690     }
9691     else if ((oper == GT_ADD) &&
9692              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9693     {
9694         // byref + [native] int => gives a byref
9695         // (or)
9696         // [native] int + byref => gives a byref
9697
9698         // only one can be a byref : byref op byref not allowed
9699         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9700         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9701
9702 #ifdef _TARGET_64BIT_
9703         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9704         {
9705             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9706             {
9707                 // insert an explicit upcast
9708                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9709             }
9710         }
9711         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9712         {
9713             // insert an explicit upcast
9714             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9715         }
9716 #endif // _TARGET_64BIT_
9717
9718         type = TYP_BYREF;
9719     }
9720 #ifdef _TARGET_64BIT_
9721     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9722     {
9723         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9724
9725         // int + long => gives long
9726         // long + int => gives long
9727         // we get this because in the IL the long isn't Int64, it's just IntPtr
9728
9729         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9730         {
9731             // insert an explicit upcast
9732             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9733         }
9734         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9735         {
9736             // insert an explicit upcast
9737             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9738         }
9739
9740         type = TYP_I_IMPL;
9741     }
9742 #else  // 32-bit TARGET
9743     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9744     {
9745         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9746
9747         // int + long => gives long
9748         // long + int => gives long
9749
9750         type = TYP_LONG;
9751     }
9752 #endif // _TARGET_64BIT_
9753     else
9754     {
9755         // int + int => gives an int
9756         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9757
9758         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9759                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9760
9761         type = genActualType(op1->gtType);
9762
9763 #if FEATURE_X87_DOUBLES
9764
9765         // For x87, since we only have 1 size of registers, prefer double
9766         // For everybody else, be more precise
9767         if (type == TYP_FLOAT)
9768             type = TYP_DOUBLE;
9769
9770 #else // !FEATURE_X87_DOUBLES
9771
9772         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9773         // Otherwise, turn floats into doubles
9774         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9775         {
9776             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9777             type = TYP_DOUBLE;
9778         }
9779
9780 #endif // FEATURE_X87_DOUBLES
9781     }
9782
9783 #if FEATURE_X87_DOUBLES
9784     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9785 #else  // FEATURE_X87_DOUBLES
9786     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9787 #endif // FEATURE_X87_DOUBLES
9788
9789     return type;
9790 }
9791
9792 //------------------------------------------------------------------------
9793 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9794 //
9795 // Arguments:
9796 //   op1 - value to cast
9797 //   pResolvedToken - resolved token for type to cast to
9798 //   isCastClass - true if this is a castclass, false if isinst
9799 //
9800 // Return Value:
9801 //   tree representing optimized cast, or null if no optimization possible
9802
9803 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9804 {
9805     assert(op1->TypeGet() == TYP_REF);
9806
9807     // Don't optimize for minopts or debug codegen.
9808     if (opts.compDbgCode || opts.MinOpts())
9809     {
9810         return nullptr;
9811     }
9812
9813     // See what we know about the type of the object being cast.
9814     bool                 isExact   = false;
9815     bool                 isNonNull = false;
9816     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9817     GenTree*             optResult = nullptr;
9818
9819     if (fromClass != nullptr)
9820     {
9821         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9822         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9823                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9824                 info.compCompHnd->getClassName(toClass));
9825
9826         // Perhaps we know if the cast will succeed or fail.
9827         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9828
9829         if (castResult == TypeCompareState::Must)
9830         {
9831             // Cast will succeed, result is simply op1.
9832             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9833             return op1;
9834         }
9835         else if (castResult == TypeCompareState::MustNot)
9836         {
9837             // See if we can sharpen exactness by looking for final classes
9838             if (!isExact)
9839             {
9840                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9841                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9842                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9843                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9844             }
9845
9846             // Cast to exact type will fail. Handle case where we have
9847             // an exact type (that is, fromClass is not a subtype)
9848             // and we're not going to throw on failure.
9849             if (isExact && !isCastClass)
9850             {
9851                 JITDUMP("Cast will fail, optimizing to return null\n");
9852                 GenTree* result = gtNewIconNode(0, TYP_REF);
9853
9854                 // If the cast was fed by a box, we can remove that too.
9855                 if (op1->IsBoxedValue())
9856                 {
9857                     JITDUMP("Also removing upstream box\n");
9858                     gtTryRemoveBoxUpstreamEffects(op1);
9859                 }
9860
9861                 return result;
9862             }
9863             else if (isExact)
9864             {
9865                 JITDUMP("Not optimizing failing castclass (yet)\n");
9866             }
9867             else
9868             {
9869                 JITDUMP("Can't optimize since fromClass is inexact\n");
9870             }
9871         }
9872         else
9873         {
9874             JITDUMP("Result of cast unknown, must generate runtime test\n");
9875         }
9876     }
9877     else
9878     {
9879         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9880     }
9881
9882     return nullptr;
9883 }
9884
9885 //------------------------------------------------------------------------
9886 // impCastClassOrIsInstToTree: build and import castclass/isinst
9887 //
9888 // Arguments:
9889 //   op1 - value to cast
9890 //   op2 - type handle for type to cast to
9891 //   pResolvedToken - resolved token from the cast operation
9892 //   isCastClass - true if this is castclass, false means isinst
9893 //
9894 // Return Value:
9895 //   Tree representing the cast
9896 //
9897 // Notes:
9898 //   May expand into a series of runtime checks or a helper call.
9899
9900 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9901                                                 GenTreePtr              op2,
9902                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9903                                                 bool                    isCastClass)
9904 {
9905     assert(op1->TypeGet() == TYP_REF);
9906
9907     // Optimistically assume the jit should expand this as an inline test
9908     bool shouldExpandInline = true;
9909
9910     // Profitability check.
9911     //
9912     // Don't bother with inline expansion when jit is trying to
9913     // generate code quickly, or the cast is in code that won't run very
9914     // often, or the method already is pretty big.
9915     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9916     {
9917         // not worth the code expansion if jitting fast or in a rarely run block
9918         shouldExpandInline = false;
9919     }
9920     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9921     {
9922         // not worth creating an untracked local variable
9923         shouldExpandInline = false;
9924     }
9925
9926     // Pessimistically assume the jit cannot expand this as an inline test
9927     bool                  canExpandInline = false;
9928     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9929
9930     // Legality check.
9931     //
9932     // Not all classclass/isinst operations can be inline expanded.
9933     // Check legality only if an inline expansion is desirable.
9934     if (shouldExpandInline)
9935     {
9936         if (isCastClass)
9937         {
9938             // Jit can only inline expand the normal CHKCASTCLASS helper.
9939             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9940         }
9941         else
9942         {
9943             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9944             {
9945                 // Check the class attributes.
9946                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9947
9948                 // If the class is final and is not marshal byref or
9949                 // contextful, the jit can expand the IsInst check inline.
9950                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9951                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9952             }
9953         }
9954     }
9955
9956     const bool expandInline = canExpandInline && shouldExpandInline;
9957
9958     if (!expandInline)
9959     {
9960         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9961                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9962
9963         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9964         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9965         //
9966         op2->gtFlags |= GTF_DONT_CSE;
9967
9968         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9969     }
9970
9971     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9972
9973     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9974
9975     GenTreePtr temp;
9976     GenTreePtr condMT;
9977     //
9978     // expand the methodtable match:
9979     //
9980     //  condMT ==>   GT_NE
9981     //               /    \
9982     //           GT_IND   op2 (typically CNS_INT)
9983     //              |
9984     //           op1Copy
9985     //
9986
9987     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9988     //
9989     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9990     //
9991     // op1 is now known to be a non-complex tree
9992     // thus we can use gtClone(op1) from now on
9993     //
9994
9995     GenTreePtr op2Var = op2;
9996     if (isCastClass)
9997     {
9998         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9999         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10000     }
10001     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10002     temp->gtFlags |= GTF_EXCEPT;
10003     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10004
10005     GenTreePtr condNull;
10006     //
10007     // expand the null check:
10008     //
10009     //  condNull ==>   GT_EQ
10010     //                 /    \
10011     //             op1Copy CNS_INT
10012     //                      null
10013     //
10014     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10015
10016     //
10017     // expand the true and false trees for the condMT
10018     //
10019     GenTreePtr condFalse = gtClone(op1);
10020     GenTreePtr condTrue;
10021     if (isCastClass)
10022     {
10023         //
10024         // use the special helper that skips the cases checked by our inlined cast
10025         //
10026         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10027
10028         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10029     }
10030     else
10031     {
10032         condTrue = gtNewIconNode(0, TYP_REF);
10033     }
10034
10035 #define USE_QMARK_TREES
10036
10037 #ifdef USE_QMARK_TREES
10038     GenTreePtr qmarkMT;
10039     //
10040     // Generate first QMARK - COLON tree
10041     //
10042     //  qmarkMT ==>   GT_QMARK
10043     //                 /     \
10044     //            condMT   GT_COLON
10045     //                      /     \
10046     //                condFalse  condTrue
10047     //
10048     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10049     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10050     condMT->gtFlags |= GTF_RELOP_QMARK;
10051
10052     GenTreePtr qmarkNull;
10053     //
10054     // Generate second QMARK - COLON tree
10055     //
10056     //  qmarkNull ==>  GT_QMARK
10057     //                 /     \
10058     //           condNull  GT_COLON
10059     //                      /     \
10060     //                qmarkMT   op1Copy
10061     //
10062     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10063     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10064     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10065     condNull->gtFlags |= GTF_RELOP_QMARK;
10066
10067     // Make QMark node a top level node by spilling it.
10068     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10069     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10070
10071     // TODO: Is it possible op1 has a better type?
10072     lvaSetClass(tmp, pResolvedToken->hClass);
10073     return gtNewLclvNode(tmp, TYP_REF);
10074 #endif
10075 }
10076
10077 #ifndef DEBUG
10078 #define assertImp(cond) ((void)0)
10079 #else
10080 #define assertImp(cond)                                                                                                \
10081     do                                                                                                                 \
10082     {                                                                                                                  \
10083         if (!(cond))                                                                                                   \
10084         {                                                                                                              \
10085             const int cchAssertImpBuf = 600;                                                                           \
10086             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10087             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10088                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10089                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10090                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10091             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10092         }                                                                                                              \
10093     } while (0)
10094 #endif // DEBUG
10095
10096 #ifdef _PREFAST_
10097 #pragma warning(push)
10098 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10099 #endif
10100 /*****************************************************************************
10101  *  Import the instr for the given basic block
10102  */
10103 void Compiler::impImportBlockCode(BasicBlock* block)
10104 {
10105 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10106
10107 #ifdef DEBUG
10108
10109     if (verbose)
10110     {
10111         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10112     }
10113 #endif
10114
10115     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10116     IL_OFFSET nxtStmtOffs;
10117
10118     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
10119     CorInfoHelpFunc              helper;
10120     CorInfoIsAccessAllowedResult accessAllowedResult;
10121     CORINFO_HELPER_DESC          calloutHelper;
10122     const BYTE*                  lastLoadToken = nullptr;
10123
10124     // reject cyclic constraints
10125     if (tiVerificationNeeded)
10126     {
10127         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10128         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10129     }
10130
10131     /* Get the tree list started */
10132
10133     impBeginTreeList();
10134
10135     /* Walk the opcodes that comprise the basic block */
10136
10137     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10138     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10139
10140     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10141     IL_OFFSET lastSpillOffs = opcodeOffs;
10142
10143     signed jmpDist;
10144
10145     /* remember the start of the delegate creation sequence (used for verification) */
10146     const BYTE* delegateCreateStart = nullptr;
10147
10148     int  prefixFlags = 0;
10149     bool explicitTailCall, constraintCall, readonlyCall;
10150
10151     typeInfo tiRetVal;
10152
10153     unsigned numArgs = info.compArgsCount;
10154
10155     /* Now process all the opcodes in the block */
10156
10157     var_types callTyp    = TYP_COUNT;
10158     OPCODE    prevOpcode = CEE_ILLEGAL;
10159
10160     if (block->bbCatchTyp)
10161     {
10162         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10163         {
10164             impCurStmtOffsSet(block->bbCodeOffs);
10165         }
10166
10167         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10168         // to a temp. This is a trade off for code simplicity
10169         impSpillSpecialSideEff();
10170     }
10171
10172     while (codeAddr < codeEndp)
10173     {
10174         bool                   usingReadyToRunHelper = false;
10175         CORINFO_RESOLVED_TOKEN resolvedToken;
10176         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10177         CORINFO_CALL_INFO      callInfo;
10178         CORINFO_FIELD_INFO     fieldInfo;
10179
10180         tiRetVal = typeInfo(); // Default type info
10181
10182         //---------------------------------------------------------------------
10183
10184         /* We need to restrict the max tree depth as many of the Compiler
10185            functions are recursive. We do this by spilling the stack */
10186
10187         if (verCurrentState.esStackDepth)
10188         {
10189             /* Has it been a while since we last saw a non-empty stack (which
10190                guarantees that the tree depth isnt accumulating. */
10191
10192             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10193             {
10194                 impSpillStackEnsure();
10195                 lastSpillOffs = opcodeOffs;
10196             }
10197         }
10198         else
10199         {
10200             lastSpillOffs   = opcodeOffs;
10201             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10202         }
10203
10204         /* Compute the current instr offset */
10205
10206         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10207
10208 #ifndef DEBUG
10209         if (opts.compDbgInfo)
10210 #endif
10211         {
10212             if (!compIsForInlining())
10213             {
10214                 nxtStmtOffs =
10215                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10216
10217                 /* Have we reached the next stmt boundary ? */
10218
10219                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10220                 {
10221                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10222
10223                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10224                     {
10225                         /* We need to provide accurate IP-mapping at this point.
10226                            So spill anything on the stack so that it will form
10227                            gtStmts with the correct stmt offset noted */
10228
10229                         impSpillStackEnsure(true);
10230                     }
10231
10232                     // Has impCurStmtOffs been reported in any tree?
10233
10234                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10235                     {
10236                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10237                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10238
10239                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10240                     }
10241
10242                     if (impCurStmtOffs == BAD_IL_OFFSET)
10243                     {
10244                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10245                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10246
10247                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10248                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10249                         {
10250                             nxtStmtIndex++;
10251                         }
10252
10253                         /* Go to the new stmt */
10254
10255                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10256
10257                         /* Update the stmt boundary index */
10258
10259                         nxtStmtIndex++;
10260                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10261
10262                         /* Are there any more line# entries after this one? */
10263
10264                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10265                         {
10266                             /* Remember where the next line# starts */
10267
10268                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10269                         }
10270                         else
10271                         {
10272                             /* No more line# entries */
10273
10274                             nxtStmtOffs = BAD_IL_OFFSET;
10275                         }
10276                     }
10277                 }
10278                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10279                          (verCurrentState.esStackDepth == 0))
10280                 {
10281                     /* At stack-empty locations, we have already added the tree to
10282                        the stmt list with the last offset. We just need to update
10283                        impCurStmtOffs
10284                      */
10285
10286                     impCurStmtOffsSet(opcodeOffs);
10287                 }
10288                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10289                          impOpcodeIsCallSiteBoundary(prevOpcode))
10290                 {
10291                     /* Make sure we have a type cached */
10292                     assert(callTyp != TYP_COUNT);
10293
10294                     if (callTyp == TYP_VOID)
10295                     {
10296                         impCurStmtOffsSet(opcodeOffs);
10297                     }
10298                     else if (opts.compDbgCode)
10299                     {
10300                         impSpillStackEnsure(true);
10301                         impCurStmtOffsSet(opcodeOffs);
10302                     }
10303                 }
10304                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10305                 {
10306                     if (opts.compDbgCode)
10307                     {
10308                         impSpillStackEnsure(true);
10309                     }
10310
10311                     impCurStmtOffsSet(opcodeOffs);
10312                 }
10313
10314                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10315                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10316             }
10317         }
10318
10319         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10320         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10321         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10322
10323         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10324         GenTreePtr      op1           = DUMMY_INIT(NULL);
10325         GenTreePtr      op2           = DUMMY_INIT(NULL);
10326         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10327         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
10328         bool            uns           = DUMMY_INIT(false);
10329         bool            isLocal       = false;
10330
10331         /* Get the next opcode and the size of its parameters */
10332
10333         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10334         codeAddr += sizeof(__int8);
10335
10336 #ifdef DEBUG
10337         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10338         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10339 #endif
10340
10341     DECODE_OPCODE:
10342
10343         // Return if any previous code has caused inline to fail.
10344         if (compDonotInline())
10345         {
10346             return;
10347         }
10348
10349         /* Get the size of additional parameters */
10350
10351         signed int sz = opcodeSizes[opcode];
10352
10353 #ifdef DEBUG
10354         clsHnd  = NO_CLASS_HANDLE;
10355         lclTyp  = TYP_COUNT;
10356         callTyp = TYP_COUNT;
10357
10358         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10359         impCurOpcName = opcodeNames[opcode];
10360
10361         if (verbose && (opcode != CEE_PREFIX1))
10362         {
10363             printf("%s", impCurOpcName);
10364         }
10365
10366         /* Use assertImp() to display the opcode */
10367
10368         op1 = op2 = nullptr;
10369 #endif
10370
10371         /* See what kind of an opcode we have, then */
10372
10373         unsigned mflags   = 0;
10374         unsigned clsFlags = 0;
10375
10376         switch (opcode)
10377         {
10378             unsigned  lclNum;
10379             var_types type;
10380
10381             GenTreePtr op3;
10382             genTreeOps oper;
10383             unsigned   size;
10384
10385             int val;
10386
10387             CORINFO_SIG_INFO     sig;
10388             IL_OFFSET            jmpAddr;
10389             bool                 ovfl, unordered, callNode;
10390             bool                 ldstruct;
10391             CORINFO_CLASS_HANDLE tokenType;
10392
10393             union {
10394                 int     intVal;
10395                 float   fltVal;
10396                 __int64 lngVal;
10397                 double  dblVal;
10398             } cval;
10399
10400             case CEE_PREFIX1:
10401                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10402                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10403                 codeAddr += sizeof(__int8);
10404                 goto DECODE_OPCODE;
10405
10406             SPILL_APPEND:
10407
10408                 // We need to call impSpillLclRefs() for a struct type lclVar.
10409                 // This is done for non-block assignments in the handling of stloc.
10410                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10411                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10412                 {
10413                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10414                 }
10415
10416                 /* Append 'op1' to the list of statements */
10417                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10418                 goto DONE_APPEND;
10419
10420             APPEND:
10421
10422                 /* Append 'op1' to the list of statements */
10423
10424                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10425                 goto DONE_APPEND;
10426
10427             DONE_APPEND:
10428
10429 #ifdef DEBUG
10430                 // Remember at which BC offset the tree was finished
10431                 impNoteLastILoffs();
10432 #endif
10433                 break;
10434
10435             case CEE_LDNULL:
10436                 impPushNullObjRefOnStack();
10437                 break;
10438
10439             case CEE_LDC_I4_M1:
10440             case CEE_LDC_I4_0:
10441             case CEE_LDC_I4_1:
10442             case CEE_LDC_I4_2:
10443             case CEE_LDC_I4_3:
10444             case CEE_LDC_I4_4:
10445             case CEE_LDC_I4_5:
10446             case CEE_LDC_I4_6:
10447             case CEE_LDC_I4_7:
10448             case CEE_LDC_I4_8:
10449                 cval.intVal = (opcode - CEE_LDC_I4_0);
10450                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10451                 goto PUSH_I4CON;
10452
10453             case CEE_LDC_I4_S:
10454                 cval.intVal = getI1LittleEndian(codeAddr);
10455                 goto PUSH_I4CON;
10456             case CEE_LDC_I4:
10457                 cval.intVal = getI4LittleEndian(codeAddr);
10458                 goto PUSH_I4CON;
10459             PUSH_I4CON:
10460                 JITDUMP(" %d", cval.intVal);
10461                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10462                 break;
10463
10464             case CEE_LDC_I8:
10465                 cval.lngVal = getI8LittleEndian(codeAddr);
10466                 JITDUMP(" 0x%016llx", cval.lngVal);
10467                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10468                 break;
10469
10470             case CEE_LDC_R8:
10471                 cval.dblVal = getR8LittleEndian(codeAddr);
10472                 JITDUMP(" %#.17g", cval.dblVal);
10473                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10474                 break;
10475
10476             case CEE_LDC_R4:
10477                 cval.dblVal = getR4LittleEndian(codeAddr);
10478                 JITDUMP(" %#.17g", cval.dblVal);
10479                 {
10480                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
10481 #if !FEATURE_X87_DOUBLES
10482                     // X87 stack doesn't differentiate between float/double
10483                     // so R4 is treated as R8, but everybody else does
10484                     cnsOp->gtType = TYP_FLOAT;
10485 #endif // FEATURE_X87_DOUBLES
10486                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10487                 }
10488                 break;
10489
10490             case CEE_LDSTR:
10491
10492                 if (compIsForInlining())
10493                 {
10494                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10495                     {
10496                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10497                         return;
10498                     }
10499                 }
10500
10501                 val = getU4LittleEndian(codeAddr);
10502                 JITDUMP(" %08X", val);
10503                 if (tiVerificationNeeded)
10504                 {
10505                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10506                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10507                 }
10508                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10509
10510                 break;
10511
10512             case CEE_LDARG:
10513                 lclNum = getU2LittleEndian(codeAddr);
10514                 JITDUMP(" %u", lclNum);
10515                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10516                 break;
10517
10518             case CEE_LDARG_S:
10519                 lclNum = getU1LittleEndian(codeAddr);
10520                 JITDUMP(" %u", lclNum);
10521                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10522                 break;
10523
10524             case CEE_LDARG_0:
10525             case CEE_LDARG_1:
10526             case CEE_LDARG_2:
10527             case CEE_LDARG_3:
10528                 lclNum = (opcode - CEE_LDARG_0);
10529                 assert(lclNum >= 0 && lclNum < 4);
10530                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10531                 break;
10532
10533             case CEE_LDLOC:
10534                 lclNum = getU2LittleEndian(codeAddr);
10535                 JITDUMP(" %u", lclNum);
10536                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10537                 break;
10538
10539             case CEE_LDLOC_S:
10540                 lclNum = getU1LittleEndian(codeAddr);
10541                 JITDUMP(" %u", lclNum);
10542                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10543                 break;
10544
10545             case CEE_LDLOC_0:
10546             case CEE_LDLOC_1:
10547             case CEE_LDLOC_2:
10548             case CEE_LDLOC_3:
10549                 lclNum = (opcode - CEE_LDLOC_0);
10550                 assert(lclNum >= 0 && lclNum < 4);
10551                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10552                 break;
10553
10554             case CEE_STARG:
10555                 lclNum = getU2LittleEndian(codeAddr);
10556                 goto STARG;
10557
10558             case CEE_STARG_S:
10559                 lclNum = getU1LittleEndian(codeAddr);
10560             STARG:
10561                 JITDUMP(" %u", lclNum);
10562
10563                 if (tiVerificationNeeded)
10564                 {
10565                     Verify(lclNum < info.compILargsCount, "bad arg num");
10566                 }
10567
10568                 if (compIsForInlining())
10569                 {
10570                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10571                     noway_assert(op1->gtOper == GT_LCL_VAR);
10572                     lclNum = op1->AsLclVar()->gtLclNum;
10573
10574                     goto VAR_ST_VALID;
10575                 }
10576
10577                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10578                 assertImp(lclNum < numArgs);
10579
10580                 if (lclNum == info.compThisArg)
10581                 {
10582                     lclNum = lvaArg0Var;
10583                 }
10584
10585                 // We should have seen this arg write in the prescan
10586                 assert(lvaTable[lclNum].lvHasILStoreOp);
10587
10588                 if (tiVerificationNeeded)
10589                 {
10590                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10591                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10592                            "type mismatch");
10593
10594                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10595                     {
10596                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10597                     }
10598                 }
10599
10600                 goto VAR_ST;
10601
10602             case CEE_STLOC:
10603                 lclNum  = getU2LittleEndian(codeAddr);
10604                 isLocal = true;
10605                 JITDUMP(" %u", lclNum);
10606                 goto LOC_ST;
10607
10608             case CEE_STLOC_S:
10609                 lclNum  = getU1LittleEndian(codeAddr);
10610                 isLocal = true;
10611                 JITDUMP(" %u", lclNum);
10612                 goto LOC_ST;
10613
10614             case CEE_STLOC_0:
10615             case CEE_STLOC_1:
10616             case CEE_STLOC_2:
10617             case CEE_STLOC_3:
10618                 isLocal = true;
10619                 lclNum  = (opcode - CEE_STLOC_0);
10620                 assert(lclNum >= 0 && lclNum < 4);
10621
10622             LOC_ST:
10623                 if (tiVerificationNeeded)
10624                 {
10625                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10626                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10627                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10628                            "type mismatch");
10629                 }
10630
10631                 if (compIsForInlining())
10632                 {
10633                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10634
10635                     /* Have we allocated a temp for this local? */
10636
10637                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10638
10639                     goto _PopValue;
10640                 }
10641
10642                 lclNum += numArgs;
10643
10644             VAR_ST:
10645
10646                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10647                 {
10648                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10649                     BADCODE("Bad IL");
10650                 }
10651
10652             VAR_ST_VALID:
10653
10654                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10655                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10656
10657                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10658                 {
10659                     lclTyp = lvaGetRealType(lclNum);
10660                 }
10661                 else
10662                 {
10663                     lclTyp = lvaGetActualType(lclNum);
10664                 }
10665
10666             _PopValue:
10667                 /* Pop the value being assigned */
10668
10669                 {
10670                     StackEntry se = impPopStack();
10671                     clsHnd        = se.seTypeInfo.GetClassHandle();
10672                     op1           = se.val;
10673                     tiRetVal      = se.seTypeInfo;
10674                 }
10675
10676 #ifdef FEATURE_SIMD
10677                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10678                 {
10679                     assert(op1->TypeGet() == TYP_STRUCT);
10680                     op1->gtType = lclTyp;
10681                 }
10682 #endif // FEATURE_SIMD
10683
10684                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10685
10686 #ifdef _TARGET_64BIT_
10687                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10688                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10689                 {
10690                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10691                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10692                 }
10693 #endif // _TARGET_64BIT_
10694
10695                 // We had better assign it a value of the correct type
10696                 assertImp(
10697                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10698                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10699                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10700                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10701                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10702                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10703
10704                 /* If op1 is "&var" then its type is the transient "*" and it can
10705                    be used either as TYP_BYREF or TYP_I_IMPL */
10706
10707                 if (op1->IsVarAddr())
10708                 {
10709                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10710
10711                     /* When "&var" is created, we assume it is a byref. If it is
10712                        being assigned to a TYP_I_IMPL var, change the type to
10713                        prevent unnecessary GC info */
10714
10715                     if (genActualType(lclTyp) == TYP_I_IMPL)
10716                     {
10717                         op1->gtType = TYP_I_IMPL;
10718                     }
10719                 }
10720
10721                 // If this is a local and the local is a ref type, see
10722                 // if we can improve type information based on the
10723                 // value being assigned.
10724                 if (isLocal && (lclTyp == TYP_REF))
10725                 {
10726                     // We should have seen a stloc in our IL prescan.
10727                     assert(lvaTable[lclNum].lvHasILStoreOp);
10728
10729                     const bool isSingleILStoreLocal =
10730                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10731
10732                     // Conservative check that there is just one
10733                     // definition that reaches this store.
10734                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10735
10736                     if (isSingleILStoreLocal && hasSingleReachingDef)
10737                     {
10738                         lvaUpdateClass(lclNum, op1, clsHnd);
10739                     }
10740                 }
10741
10742                 /* Filter out simple assignments to itself */
10743
10744                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10745                 {
10746                     if (opts.compDbgCode)
10747                     {
10748                         op1 = gtNewNothingNode();
10749                         goto SPILL_APPEND;
10750                     }
10751                     else
10752                     {
10753                         break;
10754                     }
10755                 }
10756
10757                 /* Create the assignment node */
10758
10759                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10760
10761                 /* If the local is aliased or pinned, we need to spill calls and
10762                    indirections from the stack. */
10763
10764                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10765                     (verCurrentState.esStackDepth > 0))
10766                 {
10767                     impSpillSideEffects(false,
10768                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10769                 }
10770
10771                 /* Spill any refs to the local from the stack */
10772
10773                 impSpillLclRefs(lclNum);
10774
10775 #if !FEATURE_X87_DOUBLES
10776                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10777                 // We insert a cast to the dest 'op2' type
10778                 //
10779                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10780                     varTypeIsFloating(op2->gtType))
10781                 {
10782                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10783                 }
10784 #endif // !FEATURE_X87_DOUBLES
10785
10786                 if (varTypeIsStruct(lclTyp))
10787                 {
10788                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10789                 }
10790                 else
10791                 {
10792                     // The code generator generates GC tracking information
10793                     // based on the RHS of the assignment.  Later the LHS (which is
10794                     // is a BYREF) gets used and the emitter checks that that variable
10795                     // is being tracked.  It is not (since the RHS was an int and did
10796                     // not need tracking).  To keep this assert happy, we change the RHS
10797                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10798                     {
10799                         op1->gtType = TYP_BYREF;
10800                     }
10801                     op1 = gtNewAssignNode(op2, op1);
10802                 }
10803
10804                 goto SPILL_APPEND;
10805
10806             case CEE_LDLOCA:
10807                 lclNum = getU2LittleEndian(codeAddr);
10808                 goto LDLOCA;
10809
10810             case CEE_LDLOCA_S:
10811                 lclNum = getU1LittleEndian(codeAddr);
10812             LDLOCA:
10813                 JITDUMP(" %u", lclNum);
10814                 if (tiVerificationNeeded)
10815                 {
10816                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10817                     Verify(info.compInitMem, "initLocals not set");
10818                 }
10819
10820                 if (compIsForInlining())
10821                 {
10822                     // Get the local type
10823                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10824
10825                     /* Have we allocated a temp for this local? */
10826
10827                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10828
10829                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10830
10831                     goto _PUSH_ADRVAR;
10832                 }
10833
10834                 lclNum += numArgs;
10835                 assertImp(lclNum < info.compLocalsCount);
10836                 goto ADRVAR;
10837
10838             case CEE_LDARGA:
10839                 lclNum = getU2LittleEndian(codeAddr);
10840                 goto LDARGA;
10841
10842             case CEE_LDARGA_S:
10843                 lclNum = getU1LittleEndian(codeAddr);
10844             LDARGA:
10845                 JITDUMP(" %u", lclNum);
10846                 Verify(lclNum < info.compILargsCount, "bad arg num");
10847
10848                 if (compIsForInlining())
10849                 {
10850                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10851                     // followed by a ldfld to load the field.
10852
10853                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10854                     if (op1->gtOper != GT_LCL_VAR)
10855                     {
10856                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10857                         return;
10858                     }
10859
10860                     assert(op1->gtOper == GT_LCL_VAR);
10861
10862                     goto _PUSH_ADRVAR;
10863                 }
10864
10865                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10866                 assertImp(lclNum < numArgs);
10867
10868                 if (lclNum == info.compThisArg)
10869                 {
10870                     lclNum = lvaArg0Var;
10871                 }
10872
10873                 goto ADRVAR;
10874
10875             ADRVAR:
10876
10877                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10878
10879             _PUSH_ADRVAR:
10880                 assert(op1->gtOper == GT_LCL_VAR);
10881
10882                 /* Note that this is supposed to create the transient type "*"
10883                    which may be used as a TYP_I_IMPL. However we catch places
10884                    where it is used as a TYP_I_IMPL and change the node if needed.
10885                    Thus we are pessimistic and may report byrefs in the GC info
10886                    where it was not absolutely needed, but it is safer this way.
10887                  */
10888                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10889
10890                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10891                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10892
10893                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10894                 if (tiVerificationNeeded)
10895                 {
10896                     // Don't allow taking address of uninit this ptr.
10897                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10898                     {
10899                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10900                     }
10901
10902                     if (!tiRetVal.IsByRef())
10903                     {
10904                         tiRetVal.MakeByRef();
10905                     }
10906                     else
10907                     {
10908                         Verify(false, "byref to byref");
10909                     }
10910                 }
10911
10912                 impPushOnStack(op1, tiRetVal);
10913                 break;
10914
10915             case CEE_ARGLIST:
10916
10917                 if (!info.compIsVarArgs)
10918                 {
10919                     BADCODE("arglist in non-vararg method");
10920                 }
10921
10922                 if (tiVerificationNeeded)
10923                 {
10924                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10925                 }
10926                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10927
10928                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10929                    adjusted the arg count cos this is like fetching the last param */
10930                 assertImp(0 < numArgs);
10931                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10932                 lclNum = lvaVarargsHandleArg;
10933                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10934                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10935                 impPushOnStack(op1, tiRetVal);
10936                 break;
10937
10938             case CEE_ENDFINALLY:
10939
10940                 if (compIsForInlining())
10941                 {
10942                     assert(!"Shouldn't have exception handlers in the inliner!");
10943                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10944                     return;
10945                 }
10946
10947                 if (verCurrentState.esStackDepth > 0)
10948                 {
10949                     impEvalSideEffects();
10950                 }
10951
10952                 if (info.compXcptnsCount == 0)
10953                 {
10954                     BADCODE("endfinally outside finally");
10955                 }
10956
10957                 assert(verCurrentState.esStackDepth == 0);
10958
10959                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10960                 goto APPEND;
10961
10962             case CEE_ENDFILTER:
10963
10964                 if (compIsForInlining())
10965                 {
10966                     assert(!"Shouldn't have exception handlers in the inliner!");
10967                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10968                     return;
10969                 }
10970
10971                 block->bbSetRunRarely(); // filters are rare
10972
10973                 if (info.compXcptnsCount == 0)
10974                 {
10975                     BADCODE("endfilter outside filter");
10976                 }
10977
10978                 if (tiVerificationNeeded)
10979                 {
10980                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10981                 }
10982
10983                 op1 = impPopStack().val;
10984                 assertImp(op1->gtType == TYP_INT);
10985                 if (!bbInFilterILRange(block))
10986                 {
10987                     BADCODE("EndFilter outside a filter handler");
10988                 }
10989
10990                 /* Mark current bb as end of filter */
10991
10992                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10993                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10994
10995                 /* Mark catch handler as successor */
10996
10997                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10998                 if (verCurrentState.esStackDepth != 0)
10999                 {
11000                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11001                                                 DEBUGARG(__LINE__));
11002                 }
11003                 goto APPEND;
11004
11005             case CEE_RET:
11006                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11007             RET:
11008                 if (!impReturnInstruction(block, prefixFlags, opcode))
11009                 {
11010                     return; // abort
11011                 }
11012                 else
11013                 {
11014                     break;
11015                 }
11016
11017             case CEE_JMP:
11018
11019                 assert(!compIsForInlining());
11020
11021                 if (tiVerificationNeeded)
11022                 {
11023                     Verify(false, "Invalid opcode: CEE_JMP");
11024                 }
11025
11026                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11027                 {
11028                     /* CEE_JMP does not make sense in some "protected" regions. */
11029
11030                     BADCODE("Jmp not allowed in protected region");
11031                 }
11032
11033                 if (verCurrentState.esStackDepth != 0)
11034                 {
11035                     BADCODE("Stack must be empty after CEE_JMPs");
11036                 }
11037
11038                 _impResolveToken(CORINFO_TOKENKIND_Method);
11039
11040                 JITDUMP(" %08X", resolvedToken.token);
11041
11042                 /* The signature of the target has to be identical to ours.
11043                    At least check that argCnt and returnType match */
11044
11045                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11046                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11047                     sig.retType != info.compMethodInfo->args.retType ||
11048                     sig.callConv != info.compMethodInfo->args.callConv)
11049                 {
11050                     BADCODE("Incompatible target for CEE_JMPs");
11051                 }
11052
11053                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11054
11055                 /* Mark the basic block as being a JUMP instead of RETURN */
11056
11057                 block->bbFlags |= BBF_HAS_JMP;
11058
11059                 /* Set this flag to make sure register arguments have a location assigned
11060                  * even if we don't use them inside the method */
11061
11062                 compJmpOpUsed = true;
11063
11064                 fgNoStructPromotion = true;
11065
11066                 goto APPEND;
11067
11068             case CEE_LDELEMA:
11069                 assertImp(sz == sizeof(unsigned));
11070
11071                 _impResolveToken(CORINFO_TOKENKIND_Class);
11072
11073                 JITDUMP(" %08X", resolvedToken.token);
11074
11075                 ldelemClsHnd = resolvedToken.hClass;
11076
11077                 if (tiVerificationNeeded)
11078                 {
11079                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11080                     typeInfo tiIndex = impStackTop().seTypeInfo;
11081
11082                     // As per ECMA 'index' specified can be either int32 or native int.
11083                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11084
11085                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11086                     Verify(tiArray.IsNullObjRef() ||
11087                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11088                            "bad array");
11089
11090                     tiRetVal = arrayElemType;
11091                     tiRetVal.MakeByRef();
11092                     if (prefixFlags & PREFIX_READONLY)
11093                     {
11094                         tiRetVal.SetIsReadonlyByRef();
11095                     }
11096
11097                     // an array interior pointer is always in the heap
11098                     tiRetVal.SetIsPermanentHomeByRef();
11099                 }
11100
11101                 // If it's a value class array we just do a simple address-of
11102                 if (eeIsValueClass(ldelemClsHnd))
11103                 {
11104                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11105                     if (cit == CORINFO_TYPE_UNDEF)
11106                     {
11107                         lclTyp = TYP_STRUCT;
11108                     }
11109                     else
11110                     {
11111                         lclTyp = JITtype2varType(cit);
11112                     }
11113                     goto ARR_LD_POST_VERIFY;
11114                 }
11115
11116                 // Similarly, if its a readonly access, we can do a simple address-of
11117                 // without doing a runtime type-check
11118                 if (prefixFlags & PREFIX_READONLY)
11119                 {
11120                     lclTyp = TYP_REF;
11121                     goto ARR_LD_POST_VERIFY;
11122                 }
11123
11124                 // Otherwise we need the full helper function with run-time type check
11125                 op1 = impTokenToHandle(&resolvedToken);
11126                 if (op1 == nullptr)
11127                 { // compDonotInline()
11128                     return;
11129                 }
11130
11131                 args = gtNewArgList(op1);                      // Type
11132                 args = gtNewListNode(impPopStack().val, args); // index
11133                 args = gtNewListNode(impPopStack().val, args); // array
11134                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11135
11136                 impPushOnStack(op1, tiRetVal);
11137                 break;
11138
11139             // ldelem for reference and value types
11140             case CEE_LDELEM:
11141                 assertImp(sz == sizeof(unsigned));
11142
11143                 _impResolveToken(CORINFO_TOKENKIND_Class);
11144
11145                 JITDUMP(" %08X", resolvedToken.token);
11146
11147                 ldelemClsHnd = resolvedToken.hClass;
11148
11149                 if (tiVerificationNeeded)
11150                 {
11151                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11152                     typeInfo tiIndex = impStackTop().seTypeInfo;
11153
11154                     // As per ECMA 'index' specified can be either int32 or native int.
11155                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11156                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11157
11158                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11159                            "type of array incompatible with type operand");
11160                     tiRetVal.NormaliseForStack();
11161                 }
11162
11163                 // If it's a reference type or generic variable type
11164                 // then just generate code as though it's a ldelem.ref instruction
11165                 if (!eeIsValueClass(ldelemClsHnd))
11166                 {
11167                     lclTyp = TYP_REF;
11168                     opcode = CEE_LDELEM_REF;
11169                 }
11170                 else
11171                 {
11172                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11173                     lclTyp             = JITtype2varType(jitTyp);
11174                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11175                     tiRetVal.NormaliseForStack();
11176                 }
11177                 goto ARR_LD_POST_VERIFY;
11178
11179             case CEE_LDELEM_I1:
11180                 lclTyp = TYP_BYTE;
11181                 goto ARR_LD;
11182             case CEE_LDELEM_I2:
11183                 lclTyp = TYP_SHORT;
11184                 goto ARR_LD;
11185             case CEE_LDELEM_I:
11186                 lclTyp = TYP_I_IMPL;
11187                 goto ARR_LD;
11188
11189             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11190             // and treating it as TYP_INT avoids other asserts.
11191             case CEE_LDELEM_U4:
11192                 lclTyp = TYP_INT;
11193                 goto ARR_LD;
11194
11195             case CEE_LDELEM_I4:
11196                 lclTyp = TYP_INT;
11197                 goto ARR_LD;
11198             case CEE_LDELEM_I8:
11199                 lclTyp = TYP_LONG;
11200                 goto ARR_LD;
11201             case CEE_LDELEM_REF:
11202                 lclTyp = TYP_REF;
11203                 goto ARR_LD;
11204             case CEE_LDELEM_R4:
11205                 lclTyp = TYP_FLOAT;
11206                 goto ARR_LD;
11207             case CEE_LDELEM_R8:
11208                 lclTyp = TYP_DOUBLE;
11209                 goto ARR_LD;
11210             case CEE_LDELEM_U1:
11211                 lclTyp = TYP_UBYTE;
11212                 goto ARR_LD;
11213             case CEE_LDELEM_U2:
11214                 lclTyp = TYP_USHORT;
11215                 goto ARR_LD;
11216
11217             ARR_LD:
11218
11219                 if (tiVerificationNeeded)
11220                 {
11221                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11222                     typeInfo tiIndex = impStackTop().seTypeInfo;
11223
11224                     // As per ECMA 'index' specified can be either int32 or native int.
11225                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11226                     if (tiArray.IsNullObjRef())
11227                     {
11228                         if (lclTyp == TYP_REF)
11229                         { // we will say a deref of a null array yields a null ref
11230                             tiRetVal = typeInfo(TI_NULL);
11231                         }
11232                         else
11233                         {
11234                             tiRetVal = typeInfo(lclTyp);
11235                         }
11236                     }
11237                     else
11238                     {
11239                         tiRetVal             = verGetArrayElemType(tiArray);
11240                         typeInfo arrayElemTi = typeInfo(lclTyp);
11241 #ifdef _TARGET_64BIT_
11242                         if (opcode == CEE_LDELEM_I)
11243                         {
11244                             arrayElemTi = typeInfo::nativeInt();
11245                         }
11246
11247                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11248                         {
11249                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11250                         }
11251                         else
11252 #endif // _TARGET_64BIT_
11253                         {
11254                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11255                         }
11256                     }
11257                     tiRetVal.NormaliseForStack();
11258                 }
11259             ARR_LD_POST_VERIFY:
11260
11261                 /* Pull the index value and array address */
11262                 op2 = impPopStack().val;
11263                 op1 = impPopStack().val;
11264                 assertImp(op1->gtType == TYP_REF);
11265
11266                 /* Check for null pointer - in the inliner case we simply abort */
11267
11268                 if (compIsForInlining())
11269                 {
11270                     if (op1->gtOper == GT_CNS_INT)
11271                     {
11272                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11273                         return;
11274                     }
11275                 }
11276
11277                 op1 = impCheckForNullPointer(op1);
11278
11279                 /* Mark the block as containing an index expression */
11280
11281                 if (op1->gtOper == GT_LCL_VAR)
11282                 {
11283                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11284                     {
11285                         block->bbFlags |= BBF_HAS_IDX_LEN;
11286                         optMethodFlags |= OMF_HAS_ARRAYREF;
11287                     }
11288                 }
11289
11290                 /* Create the index node and push it on the stack */
11291
11292                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11293
11294                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11295
11296                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11297                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11298                 {
11299                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11300
11301                     // remember the element size
11302                     if (lclTyp == TYP_REF)
11303                     {
11304                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11305                     }
11306                     else
11307                     {
11308                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11309                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11310                         {
11311                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11312                         }
11313                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11314                         if (lclTyp == TYP_STRUCT)
11315                         {
11316                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11317                             op1->gtIndex.gtIndElemSize = size;
11318                             op1->gtType                = lclTyp;
11319                         }
11320                     }
11321
11322                     if ((opcode == CEE_LDELEMA) || ldstruct)
11323                     {
11324                         // wrap it in a &
11325                         lclTyp = TYP_BYREF;
11326
11327                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11328                     }
11329                     else
11330                     {
11331                         assert(lclTyp != TYP_STRUCT);
11332                     }
11333                 }
11334
11335                 if (ldstruct)
11336                 {
11337                     // Create an OBJ for the result
11338                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11339                     op1->gtFlags |= GTF_EXCEPT;
11340                 }
11341                 impPushOnStack(op1, tiRetVal);
11342                 break;
11343
11344             // stelem for reference and value types
11345             case CEE_STELEM:
11346
11347                 assertImp(sz == sizeof(unsigned));
11348
11349                 _impResolveToken(CORINFO_TOKENKIND_Class);
11350
11351                 JITDUMP(" %08X", resolvedToken.token);
11352
11353                 stelemClsHnd = resolvedToken.hClass;
11354
11355                 if (tiVerificationNeeded)
11356                 {
11357                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11358                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11359                     typeInfo tiValue = impStackTop().seTypeInfo;
11360
11361                     // As per ECMA 'index' specified can be either int32 or native int.
11362                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11363                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11364
11365                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11366                            "type operand incompatible with array element type");
11367                     arrayElem.NormaliseForStack();
11368                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11369                 }
11370
11371                 // If it's a reference type just behave as though it's a stelem.ref instruction
11372                 if (!eeIsValueClass(stelemClsHnd))
11373                 {
11374                     goto STELEM_REF_POST_VERIFY;
11375                 }
11376
11377                 // Otherwise extract the type
11378                 {
11379                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11380                     lclTyp             = JITtype2varType(jitTyp);
11381                     goto ARR_ST_POST_VERIFY;
11382                 }
11383
11384             case CEE_STELEM_REF:
11385
11386                 if (tiVerificationNeeded)
11387                 {
11388                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11389                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11390                     typeInfo tiValue = impStackTop().seTypeInfo;
11391
11392                     // As per ECMA 'index' specified can be either int32 or native int.
11393                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11394                     Verify(tiValue.IsObjRef(), "bad value");
11395
11396                     // we only check that it is an object referece, The helper does additional checks
11397                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11398                 }
11399
11400             STELEM_REF_POST_VERIFY:
11401
11402                 arrayNodeTo      = impStackTop(2).val;
11403                 arrayNodeToIndex = impStackTop(1).val;
11404                 arrayNodeFrom    = impStackTop().val;
11405
11406                 //
11407                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11408                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11409                 //
11410
11411                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11412                 // This does not need CORINFO_HELP_ARRADDR_ST
11413                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11414                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11415                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11416                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11417                 {
11418                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11419                     lclTyp = TYP_REF;
11420                     goto ARR_ST_POST_VERIFY;
11421                 }
11422
11423                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11424                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11425                 {
11426                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11427                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11428                     lclTyp = TYP_REF;
11429                     goto ARR_ST_POST_VERIFY;
11430                 }
11431
11432                 /* Call a helper function to do the assignment */
11433                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11434
11435                 goto SPILL_APPEND;
11436
11437             case CEE_STELEM_I1:
11438                 lclTyp = TYP_BYTE;
11439                 goto ARR_ST;
11440             case CEE_STELEM_I2:
11441                 lclTyp = TYP_SHORT;
11442                 goto ARR_ST;
11443             case CEE_STELEM_I:
11444                 lclTyp = TYP_I_IMPL;
11445                 goto ARR_ST;
11446             case CEE_STELEM_I4:
11447                 lclTyp = TYP_INT;
11448                 goto ARR_ST;
11449             case CEE_STELEM_I8:
11450                 lclTyp = TYP_LONG;
11451                 goto ARR_ST;
11452             case CEE_STELEM_R4:
11453                 lclTyp = TYP_FLOAT;
11454                 goto ARR_ST;
11455             case CEE_STELEM_R8:
11456                 lclTyp = TYP_DOUBLE;
11457                 goto ARR_ST;
11458
11459             ARR_ST:
11460
11461                 if (tiVerificationNeeded)
11462                 {
11463                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11464                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11465                     typeInfo tiValue = impStackTop().seTypeInfo;
11466
11467                     // As per ECMA 'index' specified can be either int32 or native int.
11468                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11469                     typeInfo arrayElem = typeInfo(lclTyp);
11470 #ifdef _TARGET_64BIT_
11471                     if (opcode == CEE_STELEM_I)
11472                     {
11473                         arrayElem = typeInfo::nativeInt();
11474                     }
11475 #endif // _TARGET_64BIT_
11476                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11477                            "bad array");
11478
11479                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11480                            "bad value");
11481                 }
11482
11483             ARR_ST_POST_VERIFY:
11484                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11485                    range-check, and then assignment. However, codegen currently
11486                    does the range-check before evaluation the RHS-operands. So to
11487                    maintain strict ordering, we spill the stack. */
11488
11489                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11490                 {
11491                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11492                                                    "Strict ordering of exceptions for Array store"));
11493                 }
11494
11495                 /* Pull the new value from the stack */
11496                 op2 = impPopStack().val;
11497
11498                 /* Pull the index value */
11499                 op1 = impPopStack().val;
11500
11501                 /* Pull the array address */
11502                 op3 = impPopStack().val;
11503
11504                 assertImp(op3->gtType == TYP_REF);
11505                 if (op2->IsVarAddr())
11506                 {
11507                     op2->gtType = TYP_I_IMPL;
11508                 }
11509
11510                 op3 = impCheckForNullPointer(op3);
11511
11512                 // Mark the block as containing an index expression
11513
11514                 if (op3->gtOper == GT_LCL_VAR)
11515                 {
11516                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11517                     {
11518                         block->bbFlags |= BBF_HAS_IDX_LEN;
11519                         optMethodFlags |= OMF_HAS_ARRAYREF;
11520                     }
11521                 }
11522
11523                 /* Create the index node */
11524
11525                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11526
11527                 /* Create the assignment node and append it */
11528
11529                 if (lclTyp == TYP_STRUCT)
11530                 {
11531                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11532
11533                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11534                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11535                 }
11536                 if (varTypeIsStruct(op1))
11537                 {
11538                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11539                 }
11540                 else
11541                 {
11542                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11543                     op1 = gtNewAssignNode(op1, op2);
11544                 }
11545
11546                 /* Mark the expression as containing an assignment */
11547
11548                 op1->gtFlags |= GTF_ASG;
11549
11550                 goto SPILL_APPEND;
11551
11552             case CEE_ADD:
11553                 oper = GT_ADD;
11554                 goto MATH_OP2;
11555
11556             case CEE_ADD_OVF:
11557                 uns = false;
11558                 goto ADD_OVF;
11559             case CEE_ADD_OVF_UN:
11560                 uns = true;
11561                 goto ADD_OVF;
11562
11563             ADD_OVF:
11564                 ovfl     = true;
11565                 callNode = false;
11566                 oper     = GT_ADD;
11567                 goto MATH_OP2_FLAGS;
11568
11569             case CEE_SUB:
11570                 oper = GT_SUB;
11571                 goto MATH_OP2;
11572
11573             case CEE_SUB_OVF:
11574                 uns = false;
11575                 goto SUB_OVF;
11576             case CEE_SUB_OVF_UN:
11577                 uns = true;
11578                 goto SUB_OVF;
11579
11580             SUB_OVF:
11581                 ovfl     = true;
11582                 callNode = false;
11583                 oper     = GT_SUB;
11584                 goto MATH_OP2_FLAGS;
11585
11586             case CEE_MUL:
11587                 oper = GT_MUL;
11588                 goto MATH_MAYBE_CALL_NO_OVF;
11589
11590             case CEE_MUL_OVF:
11591                 uns = false;
11592                 goto MUL_OVF;
11593             case CEE_MUL_OVF_UN:
11594                 uns = true;
11595                 goto MUL_OVF;
11596
11597             MUL_OVF:
11598                 ovfl = true;
11599                 oper = GT_MUL;
11600                 goto MATH_MAYBE_CALL_OVF;
11601
11602             // Other binary math operations
11603
11604             case CEE_DIV:
11605                 oper = GT_DIV;
11606                 goto MATH_MAYBE_CALL_NO_OVF;
11607
11608             case CEE_DIV_UN:
11609                 oper = GT_UDIV;
11610                 goto MATH_MAYBE_CALL_NO_OVF;
11611
11612             case CEE_REM:
11613                 oper = GT_MOD;
11614                 goto MATH_MAYBE_CALL_NO_OVF;
11615
11616             case CEE_REM_UN:
11617                 oper = GT_UMOD;
11618                 goto MATH_MAYBE_CALL_NO_OVF;
11619
11620             MATH_MAYBE_CALL_NO_OVF:
11621                 ovfl = false;
11622             MATH_MAYBE_CALL_OVF:
11623                 // Morpher has some complex logic about when to turn different
11624                 // typed nodes on different platforms into helper calls. We
11625                 // need to either duplicate that logic here, or just
11626                 // pessimistically make all the nodes large enough to become
11627                 // call nodes.  Since call nodes aren't that much larger and
11628                 // these opcodes are infrequent enough I chose the latter.
11629                 callNode = true;
11630                 goto MATH_OP2_FLAGS;
11631
11632             case CEE_AND:
11633                 oper = GT_AND;
11634                 goto MATH_OP2;
11635             case CEE_OR:
11636                 oper = GT_OR;
11637                 goto MATH_OP2;
11638             case CEE_XOR:
11639                 oper = GT_XOR;
11640                 goto MATH_OP2;
11641
11642             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11643
11644                 ovfl     = false;
11645                 callNode = false;
11646
11647             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11648
11649                 /* Pull two values and push back the result */
11650
11651                 if (tiVerificationNeeded)
11652                 {
11653                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11654                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11655
11656                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11657                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11658                     {
11659                         Verify(tiOp1.IsNumberType(), "not number");
11660                     }
11661                     else
11662                     {
11663                         Verify(tiOp1.IsIntegerType(), "not integer");
11664                     }
11665
11666                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11667
11668                     tiRetVal = tiOp1;
11669
11670 #ifdef _TARGET_64BIT_
11671                     if (tiOp2.IsNativeIntType())
11672                     {
11673                         tiRetVal = tiOp2;
11674                     }
11675 #endif // _TARGET_64BIT_
11676                 }
11677
11678                 op2 = impPopStack().val;
11679                 op1 = impPopStack().val;
11680
11681 #if !CPU_HAS_FP_SUPPORT
11682                 if (varTypeIsFloating(op1->gtType))
11683                 {
11684                     callNode = true;
11685                 }
11686 #endif
11687                 /* Can't do arithmetic with references */
11688                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11689
11690                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11691                 // if it is in the stack)
11692                 impBashVarAddrsToI(op1, op2);
11693
11694                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11695
11696                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11697
11698                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11699
11700                 if (op2->gtOper == GT_CNS_INT)
11701                 {
11702                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11703                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11704
11705                     {
11706                         impPushOnStack(op1, tiRetVal);
11707                         break;
11708                     }
11709                 }
11710
11711 #if !FEATURE_X87_DOUBLES
11712                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11713                 //
11714                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11715                 {
11716                     if (op1->TypeGet() != type)
11717                     {
11718                         // We insert a cast of op1 to 'type'
11719                         op1 = gtNewCastNode(type, op1, type);
11720                     }
11721                     if (op2->TypeGet() != type)
11722                     {
11723                         // We insert a cast of op2 to 'type'
11724                         op2 = gtNewCastNode(type, op2, type);
11725                     }
11726                 }
11727 #endif // !FEATURE_X87_DOUBLES
11728
11729 #if SMALL_TREE_NODES
11730                 if (callNode)
11731                 {
11732                     /* These operators can later be transformed into 'GT_CALL' */
11733
11734                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11735 #ifndef _TARGET_ARM_
11736                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11737                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11738                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11739                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11740 #endif
11741                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11742                     // that we'll need to transform into a general large node, but rather specifically
11743                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11744                     // and a CALL is no longer the largest.
11745                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11746                     // than an "if".
11747                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11748                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11749                 }
11750                 else
11751 #endif // SMALL_TREE_NODES
11752                 {
11753                     op1 = gtNewOperNode(oper, type, op1, op2);
11754                 }
11755
11756                 /* Special case: integer/long division may throw an exception */
11757
11758                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11759                 {
11760                     op1->gtFlags |= GTF_EXCEPT;
11761                 }
11762
11763                 if (ovfl)
11764                 {
11765                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11766                     if (ovflType != TYP_UNKNOWN)
11767                     {
11768                         op1->gtType = ovflType;
11769                     }
11770                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11771                     if (uns)
11772                     {
11773                         op1->gtFlags |= GTF_UNSIGNED;
11774                     }
11775                 }
11776
11777                 impPushOnStack(op1, tiRetVal);
11778                 break;
11779
11780             case CEE_SHL:
11781                 oper = GT_LSH;
11782                 goto CEE_SH_OP2;
11783
11784             case CEE_SHR:
11785                 oper = GT_RSH;
11786                 goto CEE_SH_OP2;
11787             case CEE_SHR_UN:
11788                 oper = GT_RSZ;
11789                 goto CEE_SH_OP2;
11790
11791             CEE_SH_OP2:
11792                 if (tiVerificationNeeded)
11793                 {
11794                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11795                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11796                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11797                     tiRetVal = tiVal;
11798                 }
11799                 op2 = impPopStack().val;
11800                 op1 = impPopStack().val; // operand to be shifted
11801                 impBashVarAddrsToI(op1, op2);
11802
11803                 type = genActualType(op1->TypeGet());
11804                 op1  = gtNewOperNode(oper, type, op1, op2);
11805
11806                 impPushOnStack(op1, tiRetVal);
11807                 break;
11808
11809             case CEE_NOT:
11810                 if (tiVerificationNeeded)
11811                 {
11812                     tiRetVal = impStackTop().seTypeInfo;
11813                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11814                 }
11815
11816                 op1 = impPopStack().val;
11817                 impBashVarAddrsToI(op1, nullptr);
11818                 type = genActualType(op1->TypeGet());
11819                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11820                 break;
11821
11822             case CEE_CKFINITE:
11823                 if (tiVerificationNeeded)
11824                 {
11825                     tiRetVal = impStackTop().seTypeInfo;
11826                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11827                 }
11828                 op1  = impPopStack().val;
11829                 type = op1->TypeGet();
11830                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11831                 op1->gtFlags |= GTF_EXCEPT;
11832
11833                 impPushOnStack(op1, tiRetVal);
11834                 break;
11835
11836             case CEE_LEAVE:
11837
11838                 val     = getI4LittleEndian(codeAddr); // jump distance
11839                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11840                 goto LEAVE;
11841
11842             case CEE_LEAVE_S:
11843                 val     = getI1LittleEndian(codeAddr); // jump distance
11844                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11845
11846             LEAVE:
11847
11848                 if (compIsForInlining())
11849                 {
11850                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11851                     return;
11852                 }
11853
11854                 JITDUMP(" %04X", jmpAddr);
11855                 if (block->bbJumpKind != BBJ_LEAVE)
11856                 {
11857                     impResetLeaveBlock(block, jmpAddr);
11858                 }
11859
11860                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11861                 impImportLeave(block);
11862                 impNoteBranchOffs();
11863
11864                 break;
11865
11866             case CEE_BR:
11867             case CEE_BR_S:
11868                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11869
11870                 if (compIsForInlining() && jmpDist == 0)
11871                 {
11872                     break; /* NOP */
11873                 }
11874
11875                 impNoteBranchOffs();
11876                 break;
11877
11878             case CEE_BRTRUE:
11879             case CEE_BRTRUE_S:
11880             case CEE_BRFALSE:
11881             case CEE_BRFALSE_S:
11882
11883                 /* Pop the comparand (now there's a neat term) from the stack */
11884                 if (tiVerificationNeeded)
11885                 {
11886                     typeInfo& tiVal = impStackTop().seTypeInfo;
11887                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11888                            "bad value");
11889                 }
11890
11891                 op1  = impPopStack().val;
11892                 type = op1->TypeGet();
11893
11894                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11895                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11896                 {
11897                     block->bbJumpKind = BBJ_NONE;
11898
11899                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11900                     {
11901                         op1 = gtUnusedValNode(op1);
11902                         goto SPILL_APPEND;
11903                     }
11904                     else
11905                     {
11906                         break;
11907                     }
11908                 }
11909
11910                 if (op1->OperIsCompare())
11911                 {
11912                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11913                     {
11914                         // Flip the sense of the compare
11915
11916                         op1 = gtReverseCond(op1);
11917                     }
11918                 }
11919                 else
11920                 {
11921                     /* We'll compare against an equally-sized integer 0 */
11922                     /* For small types, we always compare against int   */
11923                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11924
11925                     /* Create the comparison operator and try to fold it */
11926
11927                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11928                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11929                 }
11930
11931             // fall through
11932
11933             COND_JUMP:
11934
11935                 /* Fold comparison if we can */
11936
11937                 op1 = gtFoldExpr(op1);
11938
11939                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11940                 /* Don't make any blocks unreachable in import only mode */
11941
11942                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11943                 {
11944                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11945                        unreachable under compDbgCode */
11946                     assert(!opts.compDbgCode);
11947
11948                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11949                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11950                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11951                                                                          // block for the second time
11952
11953                     block->bbJumpKind = foldedJumpKind;
11954 #ifdef DEBUG
11955                     if (verbose)
11956                     {
11957                         if (op1->gtIntCon.gtIconVal)
11958                         {
11959                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11960                                    block->bbJumpDest->bbNum);
11961                         }
11962                         else
11963                         {
11964                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11965                         }
11966                     }
11967 #endif
11968                     break;
11969                 }
11970
11971                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11972
11973                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11974                    in impImportBlock(block). For correct line numbers, spill stack. */
11975
11976                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11977                 {
11978                     impSpillStackEnsure(true);
11979                 }
11980
11981                 goto SPILL_APPEND;
11982
11983             case CEE_CEQ:
11984                 oper = GT_EQ;
11985                 uns  = false;
11986                 goto CMP_2_OPs;
11987             case CEE_CGT_UN:
11988                 oper = GT_GT;
11989                 uns  = true;
11990                 goto CMP_2_OPs;
11991             case CEE_CGT:
11992                 oper = GT_GT;
11993                 uns  = false;
11994                 goto CMP_2_OPs;
11995             case CEE_CLT_UN:
11996                 oper = GT_LT;
11997                 uns  = true;
11998                 goto CMP_2_OPs;
11999             case CEE_CLT:
12000                 oper = GT_LT;
12001                 uns  = false;
12002                 goto CMP_2_OPs;
12003
12004             CMP_2_OPs:
12005                 if (tiVerificationNeeded)
12006                 {
12007                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12008                     tiRetVal = typeInfo(TI_INT);
12009                 }
12010
12011                 op2 = impPopStack().val;
12012                 op1 = impPopStack().val;
12013
12014 #ifdef _TARGET_64BIT_
12015                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12016                 {
12017                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12018                 }
12019                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12020                 {
12021                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12022                 }
12023 #endif // _TARGET_64BIT_
12024
12025                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12026                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12027                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12028
12029                 /* Create the comparison node */
12030
12031                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12032
12033                 /* TODO: setting both flags when only one is appropriate */
12034                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12035                 {
12036                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12037                 }
12038
12039                 impPushOnStack(op1, tiRetVal);
12040                 break;
12041
12042             case CEE_BEQ_S:
12043             case CEE_BEQ:
12044                 oper = GT_EQ;
12045                 goto CMP_2_OPs_AND_BR;
12046
12047             case CEE_BGE_S:
12048             case CEE_BGE:
12049                 oper = GT_GE;
12050                 goto CMP_2_OPs_AND_BR;
12051
12052             case CEE_BGE_UN_S:
12053             case CEE_BGE_UN:
12054                 oper = GT_GE;
12055                 goto CMP_2_OPs_AND_BR_UN;
12056
12057             case CEE_BGT_S:
12058             case CEE_BGT:
12059                 oper = GT_GT;
12060                 goto CMP_2_OPs_AND_BR;
12061
12062             case CEE_BGT_UN_S:
12063             case CEE_BGT_UN:
12064                 oper = GT_GT;
12065                 goto CMP_2_OPs_AND_BR_UN;
12066
12067             case CEE_BLE_S:
12068             case CEE_BLE:
12069                 oper = GT_LE;
12070                 goto CMP_2_OPs_AND_BR;
12071
12072             case CEE_BLE_UN_S:
12073             case CEE_BLE_UN:
12074                 oper = GT_LE;
12075                 goto CMP_2_OPs_AND_BR_UN;
12076
12077             case CEE_BLT_S:
12078             case CEE_BLT:
12079                 oper = GT_LT;
12080                 goto CMP_2_OPs_AND_BR;
12081
12082             case CEE_BLT_UN_S:
12083             case CEE_BLT_UN:
12084                 oper = GT_LT;
12085                 goto CMP_2_OPs_AND_BR_UN;
12086
12087             case CEE_BNE_UN_S:
12088             case CEE_BNE_UN:
12089                 oper = GT_NE;
12090                 goto CMP_2_OPs_AND_BR_UN;
12091
12092             CMP_2_OPs_AND_BR_UN:
12093                 uns       = true;
12094                 unordered = true;
12095                 goto CMP_2_OPs_AND_BR_ALL;
12096             CMP_2_OPs_AND_BR:
12097                 uns       = false;
12098                 unordered = false;
12099                 goto CMP_2_OPs_AND_BR_ALL;
12100             CMP_2_OPs_AND_BR_ALL:
12101
12102                 if (tiVerificationNeeded)
12103                 {
12104                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12105                 }
12106
12107                 /* Pull two values */
12108                 op2 = impPopStack().val;
12109                 op1 = impPopStack().val;
12110
12111 #ifdef _TARGET_64BIT_
12112                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12113                 {
12114                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12115                 }
12116                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12117                 {
12118                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12119                 }
12120 #endif // _TARGET_64BIT_
12121
12122                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12123                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12124                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12125
12126                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12127                 {
12128                     block->bbJumpKind = BBJ_NONE;
12129
12130                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12131                     {
12132                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12133                                                        "Branch to next Optimization, op1 side effect"));
12134                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12135                     }
12136                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12137                     {
12138                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12139                                                        "Branch to next Optimization, op2 side effect"));
12140                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12141                     }
12142
12143 #ifdef DEBUG
12144                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12145                     {
12146                         impNoteLastILoffs();
12147                     }
12148 #endif
12149                     break;
12150                 }
12151 #if !FEATURE_X87_DOUBLES
12152                 // We can generate an compare of different sized floating point op1 and op2
12153                 // We insert a cast
12154                 //
12155                 if (varTypeIsFloating(op1->TypeGet()))
12156                 {
12157                     if (op1->TypeGet() != op2->TypeGet())
12158                     {
12159                         assert(varTypeIsFloating(op2->TypeGet()));
12160
12161                         // say op1=double, op2=float. To avoid loss of precision
12162                         // while comparing, op2 is converted to double and double
12163                         // comparison is done.
12164                         if (op1->TypeGet() == TYP_DOUBLE)
12165                         {
12166                             // We insert a cast of op2 to TYP_DOUBLE
12167                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
12168                         }
12169                         else if (op2->TypeGet() == TYP_DOUBLE)
12170                         {
12171                             // We insert a cast of op1 to TYP_DOUBLE
12172                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
12173                         }
12174                     }
12175                 }
12176 #endif // !FEATURE_X87_DOUBLES
12177
12178                 /* Create and append the operator */
12179
12180                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12181
12182                 if (uns)
12183                 {
12184                     op1->gtFlags |= GTF_UNSIGNED;
12185                 }
12186
12187                 if (unordered)
12188                 {
12189                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12190                 }
12191
12192                 goto COND_JUMP;
12193
12194             case CEE_SWITCH:
12195                 assert(!compIsForInlining());
12196
12197                 if (tiVerificationNeeded)
12198                 {
12199                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12200                 }
12201                 /* Pop the switch value off the stack */
12202                 op1 = impPopStack().val;
12203                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12204
12205                 /* We can create a switch node */
12206
12207                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12208
12209                 val = (int)getU4LittleEndian(codeAddr);
12210                 codeAddr += 4 + val * 4; // skip over the switch-table
12211
12212                 goto SPILL_APPEND;
12213
12214             /************************** Casting OPCODES ***************************/
12215
12216             case CEE_CONV_OVF_I1:
12217                 lclTyp = TYP_BYTE;
12218                 goto CONV_OVF;
12219             case CEE_CONV_OVF_I2:
12220                 lclTyp = TYP_SHORT;
12221                 goto CONV_OVF;
12222             case CEE_CONV_OVF_I:
12223                 lclTyp = TYP_I_IMPL;
12224                 goto CONV_OVF;
12225             case CEE_CONV_OVF_I4:
12226                 lclTyp = TYP_INT;
12227                 goto CONV_OVF;
12228             case CEE_CONV_OVF_I8:
12229                 lclTyp = TYP_LONG;
12230                 goto CONV_OVF;
12231
12232             case CEE_CONV_OVF_U1:
12233                 lclTyp = TYP_UBYTE;
12234                 goto CONV_OVF;
12235             case CEE_CONV_OVF_U2:
12236                 lclTyp = TYP_USHORT;
12237                 goto CONV_OVF;
12238             case CEE_CONV_OVF_U:
12239                 lclTyp = TYP_U_IMPL;
12240                 goto CONV_OVF;
12241             case CEE_CONV_OVF_U4:
12242                 lclTyp = TYP_UINT;
12243                 goto CONV_OVF;
12244             case CEE_CONV_OVF_U8:
12245                 lclTyp = TYP_ULONG;
12246                 goto CONV_OVF;
12247
12248             case CEE_CONV_OVF_I1_UN:
12249                 lclTyp = TYP_BYTE;
12250                 goto CONV_OVF_UN;
12251             case CEE_CONV_OVF_I2_UN:
12252                 lclTyp = TYP_SHORT;
12253                 goto CONV_OVF_UN;
12254             case CEE_CONV_OVF_I_UN:
12255                 lclTyp = TYP_I_IMPL;
12256                 goto CONV_OVF_UN;
12257             case CEE_CONV_OVF_I4_UN:
12258                 lclTyp = TYP_INT;
12259                 goto CONV_OVF_UN;
12260             case CEE_CONV_OVF_I8_UN:
12261                 lclTyp = TYP_LONG;
12262                 goto CONV_OVF_UN;
12263
12264             case CEE_CONV_OVF_U1_UN:
12265                 lclTyp = TYP_UBYTE;
12266                 goto CONV_OVF_UN;
12267             case CEE_CONV_OVF_U2_UN:
12268                 lclTyp = TYP_USHORT;
12269                 goto CONV_OVF_UN;
12270             case CEE_CONV_OVF_U_UN:
12271                 lclTyp = TYP_U_IMPL;
12272                 goto CONV_OVF_UN;
12273             case CEE_CONV_OVF_U4_UN:
12274                 lclTyp = TYP_UINT;
12275                 goto CONV_OVF_UN;
12276             case CEE_CONV_OVF_U8_UN:
12277                 lclTyp = TYP_ULONG;
12278                 goto CONV_OVF_UN;
12279
12280             CONV_OVF_UN:
12281                 uns = true;
12282                 goto CONV_OVF_COMMON;
12283             CONV_OVF:
12284                 uns = false;
12285                 goto CONV_OVF_COMMON;
12286
12287             CONV_OVF_COMMON:
12288                 ovfl = true;
12289                 goto _CONV;
12290
12291             case CEE_CONV_I1:
12292                 lclTyp = TYP_BYTE;
12293                 goto CONV;
12294             case CEE_CONV_I2:
12295                 lclTyp = TYP_SHORT;
12296                 goto CONV;
12297             case CEE_CONV_I:
12298                 lclTyp = TYP_I_IMPL;
12299                 goto CONV;
12300             case CEE_CONV_I4:
12301                 lclTyp = TYP_INT;
12302                 goto CONV;
12303             case CEE_CONV_I8:
12304                 lclTyp = TYP_LONG;
12305                 goto CONV;
12306
12307             case CEE_CONV_U1:
12308                 lclTyp = TYP_UBYTE;
12309                 goto CONV;
12310             case CEE_CONV_U2:
12311                 lclTyp = TYP_USHORT;
12312                 goto CONV;
12313 #if (REGSIZE_BYTES == 8)
12314             case CEE_CONV_U:
12315                 lclTyp = TYP_U_IMPL;
12316                 goto CONV_UN;
12317 #else
12318             case CEE_CONV_U:
12319                 lclTyp = TYP_U_IMPL;
12320                 goto CONV;
12321 #endif
12322             case CEE_CONV_U4:
12323                 lclTyp = TYP_UINT;
12324                 goto CONV;
12325             case CEE_CONV_U8:
12326                 lclTyp = TYP_ULONG;
12327                 goto CONV_UN;
12328
12329             case CEE_CONV_R4:
12330                 lclTyp = TYP_FLOAT;
12331                 goto CONV;
12332             case CEE_CONV_R8:
12333                 lclTyp = TYP_DOUBLE;
12334                 goto CONV;
12335
12336             case CEE_CONV_R_UN:
12337                 lclTyp = TYP_DOUBLE;
12338                 goto CONV_UN;
12339
12340             CONV_UN:
12341                 uns  = true;
12342                 ovfl = false;
12343                 goto _CONV;
12344
12345             CONV:
12346                 uns  = false;
12347                 ovfl = false;
12348                 goto _CONV;
12349
12350             _CONV:
12351                 // just check that we have a number on the stack
12352                 if (tiVerificationNeeded)
12353                 {
12354                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12355                     Verify(tiVal.IsNumberType(), "bad arg");
12356
12357 #ifdef _TARGET_64BIT_
12358                     bool isNative = false;
12359
12360                     switch (opcode)
12361                     {
12362                         case CEE_CONV_OVF_I:
12363                         case CEE_CONV_OVF_I_UN:
12364                         case CEE_CONV_I:
12365                         case CEE_CONV_OVF_U:
12366                         case CEE_CONV_OVF_U_UN:
12367                         case CEE_CONV_U:
12368                             isNative = true;
12369                         default:
12370                             // leave 'isNative' = false;
12371                             break;
12372                     }
12373                     if (isNative)
12374                     {
12375                         tiRetVal = typeInfo::nativeInt();
12376                     }
12377                     else
12378 #endif // _TARGET_64BIT_
12379                     {
12380                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12381                     }
12382                 }
12383
12384                 // only converts from FLOAT or DOUBLE to an integer type
12385                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12386
12387                 if (varTypeIsFloating(lclTyp))
12388                 {
12389                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12390 #ifdef _TARGET_64BIT_
12391                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12392                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12393                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12394                                // and generate SSE2 code instead of going through helper calls.
12395                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12396 #endif
12397                         ;
12398                 }
12399                 else
12400                 {
12401                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12402                 }
12403
12404                 // At this point uns, ovf, callNode all set
12405
12406                 op1 = impPopStack().val;
12407                 impBashVarAddrsToI(op1);
12408
12409                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12410                 {
12411                     op2 = op1->gtOp.gtOp2;
12412
12413                     if (op2->gtOper == GT_CNS_INT)
12414                     {
12415                         ssize_t ival = op2->gtIntCon.gtIconVal;
12416                         ssize_t mask, umask;
12417
12418                         switch (lclTyp)
12419                         {
12420                             case TYP_BYTE:
12421                             case TYP_UBYTE:
12422                                 mask  = 0x00FF;
12423                                 umask = 0x007F;
12424                                 break;
12425                             case TYP_USHORT:
12426                             case TYP_SHORT:
12427                                 mask  = 0xFFFF;
12428                                 umask = 0x7FFF;
12429                                 break;
12430
12431                             default:
12432                                 assert(!"unexpected type");
12433                                 return;
12434                         }
12435
12436                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12437                         {
12438                             /* Toss the cast, it's a waste of time */
12439
12440                             impPushOnStack(op1, tiRetVal);
12441                             break;
12442                         }
12443                         else if (ival == mask)
12444                         {
12445                             /* Toss the masking, it's a waste of time, since
12446                                we sign-extend from the small value anyways */
12447
12448                             op1 = op1->gtOp.gtOp1;
12449                         }
12450                     }
12451                 }
12452
12453                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12454                     since the result of a cast to one of the 'small' integer
12455                     types is an integer.
12456                  */
12457
12458                 type = genActualType(lclTyp);
12459
12460 #if SMALL_TREE_NODES
12461                 if (callNode)
12462                 {
12463                     op1 = gtNewCastNodeL(type, op1, lclTyp);
12464                 }
12465                 else
12466 #endif // SMALL_TREE_NODES
12467                 {
12468                     op1 = gtNewCastNode(type, op1, lclTyp);
12469                 }
12470
12471                 if (ovfl)
12472                 {
12473                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12474                 }
12475                 if (uns)
12476                 {
12477                     op1->gtFlags |= GTF_UNSIGNED;
12478                 }
12479                 impPushOnStack(op1, tiRetVal);
12480                 break;
12481
12482             case CEE_NEG:
12483                 if (tiVerificationNeeded)
12484                 {
12485                     tiRetVal = impStackTop().seTypeInfo;
12486                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12487                 }
12488
12489                 op1 = impPopStack().val;
12490                 impBashVarAddrsToI(op1, nullptr);
12491                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12492                 break;
12493
12494             case CEE_POP:
12495             {
12496                 /* Pull the top value from the stack */
12497
12498                 StackEntry se = impPopStack();
12499                 clsHnd        = se.seTypeInfo.GetClassHandle();
12500                 op1           = se.val;
12501
12502                 /* Get hold of the type of the value being duplicated */
12503
12504                 lclTyp = genActualType(op1->gtType);
12505
12506                 /* Does the value have any side effects? */
12507
12508                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12509                 {
12510                     // Since we are throwing away the value, just normalize
12511                     // it to its address.  This is more efficient.
12512
12513                     if (varTypeIsStruct(op1))
12514                     {
12515 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12516                         // Non-calls, such as obj or ret_expr, have to go through this.
12517                         // Calls with large struct return value have to go through this.
12518                         // Helper calls with small struct return value also have to go
12519                         // through this since they do not follow Unix calling convention.
12520                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12521                             op1->AsCall()->gtCallType == CT_HELPER)
12522 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12523                         {
12524                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12525                         }
12526                     }
12527
12528                     // If op1 is non-overflow cast, throw it away since it is useless.
12529                     // Another reason for throwing away the useless cast is in the context of
12530                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12531                     // The cast gets added as part of importing GT_CALL, which gets in the way
12532                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12533                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12534                     {
12535                         op1 = op1->gtOp.gtOp1;
12536                     }
12537
12538                     // If 'op1' is an expression, create an assignment node.
12539                     // Helps analyses (like CSE) to work fine.
12540
12541                     if (op1->gtOper != GT_CALL)
12542                     {
12543                         op1 = gtUnusedValNode(op1);
12544                     }
12545
12546                     /* Append the value to the tree list */
12547                     goto SPILL_APPEND;
12548                 }
12549
12550                 /* No side effects - just throw the <BEEP> thing away */
12551             }
12552             break;
12553
12554             case CEE_DUP:
12555             {
12556                 if (tiVerificationNeeded)
12557                 {
12558                     // Dup could start the begining of delegate creation sequence, remember that
12559                     delegateCreateStart = codeAddr - 1;
12560                     impStackTop(0);
12561                 }
12562
12563                 // If the expression to dup is simple, just clone it.
12564                 // Otherwise spill it to a temp, and reload the temp
12565                 // twice.
12566                 StackEntry se   = impPopStack();
12567                 GenTree*   tree = se.val;
12568                 tiRetVal        = se.seTypeInfo;
12569                 op1             = tree;
12570
12571                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12572                 {
12573                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12574                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12575                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12576                     op1            = gtNewLclvNode(tmpNum, type);
12577
12578                     // Propagate type info to the temp from the stack and the original tree
12579                     if (type == TYP_REF)
12580                     {
12581                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12582                     }
12583                 }
12584
12585                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12586                                    nullptr DEBUGARG("DUP instruction"));
12587
12588                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12589                 impPushOnStack(op1, tiRetVal);
12590                 impPushOnStack(op2, tiRetVal);
12591             }
12592             break;
12593
12594             case CEE_STIND_I1:
12595                 lclTyp = TYP_BYTE;
12596                 goto STIND;
12597             case CEE_STIND_I2:
12598                 lclTyp = TYP_SHORT;
12599                 goto STIND;
12600             case CEE_STIND_I4:
12601                 lclTyp = TYP_INT;
12602                 goto STIND;
12603             case CEE_STIND_I8:
12604                 lclTyp = TYP_LONG;
12605                 goto STIND;
12606             case CEE_STIND_I:
12607                 lclTyp = TYP_I_IMPL;
12608                 goto STIND;
12609             case CEE_STIND_REF:
12610                 lclTyp = TYP_REF;
12611                 goto STIND;
12612             case CEE_STIND_R4:
12613                 lclTyp = TYP_FLOAT;
12614                 goto STIND;
12615             case CEE_STIND_R8:
12616                 lclTyp = TYP_DOUBLE;
12617                 goto STIND;
12618             STIND:
12619
12620                 if (tiVerificationNeeded)
12621                 {
12622                     typeInfo instrType(lclTyp);
12623 #ifdef _TARGET_64BIT_
12624                     if (opcode == CEE_STIND_I)
12625                     {
12626                         instrType = typeInfo::nativeInt();
12627                     }
12628 #endif // _TARGET_64BIT_
12629                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12630                 }
12631                 else
12632                 {
12633                     compUnsafeCastUsed = true; // Have to go conservative
12634                 }
12635
12636             STIND_POST_VERIFY:
12637
12638                 op2 = impPopStack().val; // value to store
12639                 op1 = impPopStack().val; // address to store to
12640
12641                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12642                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12643
12644                 impBashVarAddrsToI(op1, op2);
12645
12646                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12647
12648 #ifdef _TARGET_64BIT_
12649                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12650                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12651                 {
12652                     op2->gtType = TYP_I_IMPL;
12653                 }
12654                 else
12655                 {
12656                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12657                     //
12658                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12659                     {
12660                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12661                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12662                     }
12663                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12664                     //
12665                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12666                     {
12667                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12668                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12669                     }
12670                 }
12671 #endif // _TARGET_64BIT_
12672
12673                 if (opcode == CEE_STIND_REF)
12674                 {
12675                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12676                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12677                     lclTyp = genActualType(op2->TypeGet());
12678                 }
12679
12680 // Check target type.
12681 #ifdef DEBUG
12682                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12683                 {
12684                     if (op2->gtType == TYP_BYREF)
12685                     {
12686                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12687                     }
12688                     else if (lclTyp == TYP_BYREF)
12689                     {
12690                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12691                     }
12692                 }
12693                 else
12694                 {
12695                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12696                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12697                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12698                 }
12699 #endif
12700
12701                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12702
12703                 // stind could point anywhere, example a boxed class static int
12704                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12705
12706                 if (prefixFlags & PREFIX_VOLATILE)
12707                 {
12708                     assert(op1->OperGet() == GT_IND);
12709                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12710                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12711                     op1->gtFlags |= GTF_IND_VOLATILE;
12712                 }
12713
12714                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12715                 {
12716                     assert(op1->OperGet() == GT_IND);
12717                     op1->gtFlags |= GTF_IND_UNALIGNED;
12718                 }
12719
12720                 op1 = gtNewAssignNode(op1, op2);
12721                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12722
12723                 // Spill side-effects AND global-data-accesses
12724                 if (verCurrentState.esStackDepth > 0)
12725                 {
12726                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12727                 }
12728
12729                 goto APPEND;
12730
12731             case CEE_LDIND_I1:
12732                 lclTyp = TYP_BYTE;
12733                 goto LDIND;
12734             case CEE_LDIND_I2:
12735                 lclTyp = TYP_SHORT;
12736                 goto LDIND;
12737             case CEE_LDIND_U4:
12738             case CEE_LDIND_I4:
12739                 lclTyp = TYP_INT;
12740                 goto LDIND;
12741             case CEE_LDIND_I8:
12742                 lclTyp = TYP_LONG;
12743                 goto LDIND;
12744             case CEE_LDIND_REF:
12745                 lclTyp = TYP_REF;
12746                 goto LDIND;
12747             case CEE_LDIND_I:
12748                 lclTyp = TYP_I_IMPL;
12749                 goto LDIND;
12750             case CEE_LDIND_R4:
12751                 lclTyp = TYP_FLOAT;
12752                 goto LDIND;
12753             case CEE_LDIND_R8:
12754                 lclTyp = TYP_DOUBLE;
12755                 goto LDIND;
12756             case CEE_LDIND_U1:
12757                 lclTyp = TYP_UBYTE;
12758                 goto LDIND;
12759             case CEE_LDIND_U2:
12760                 lclTyp = TYP_USHORT;
12761                 goto LDIND;
12762             LDIND:
12763
12764                 if (tiVerificationNeeded)
12765                 {
12766                     typeInfo lclTiType(lclTyp);
12767 #ifdef _TARGET_64BIT_
12768                     if (opcode == CEE_LDIND_I)
12769                     {
12770                         lclTiType = typeInfo::nativeInt();
12771                     }
12772 #endif // _TARGET_64BIT_
12773                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12774                     tiRetVal.NormaliseForStack();
12775                 }
12776                 else
12777                 {
12778                     compUnsafeCastUsed = true; // Have to go conservative
12779                 }
12780
12781             LDIND_POST_VERIFY:
12782
12783                 op1 = impPopStack().val; // address to load from
12784                 impBashVarAddrsToI(op1);
12785
12786 #ifdef _TARGET_64BIT_
12787                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12788                 //
12789                 if (genActualType(op1->gtType) == TYP_INT)
12790                 {
12791                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12792                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12793                 }
12794 #endif
12795
12796                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12797
12798                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12799
12800                 // ldind could point anywhere, example a boxed class static int
12801                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12802
12803                 if (prefixFlags & PREFIX_VOLATILE)
12804                 {
12805                     assert(op1->OperGet() == GT_IND);
12806                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12807                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12808                     op1->gtFlags |= GTF_IND_VOLATILE;
12809                 }
12810
12811                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12812                 {
12813                     assert(op1->OperGet() == GT_IND);
12814                     op1->gtFlags |= GTF_IND_UNALIGNED;
12815                 }
12816
12817                 impPushOnStack(op1, tiRetVal);
12818
12819                 break;
12820
12821             case CEE_UNALIGNED:
12822
12823                 assert(sz == 1);
12824                 val = getU1LittleEndian(codeAddr);
12825                 ++codeAddr;
12826                 JITDUMP(" %u", val);
12827                 if ((val != 1) && (val != 2) && (val != 4))
12828                 {
12829                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12830                 }
12831
12832                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12833                 prefixFlags |= PREFIX_UNALIGNED;
12834
12835                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12836
12837             PREFIX:
12838                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12839                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12840                 codeAddr += sizeof(__int8);
12841                 goto DECODE_OPCODE;
12842
12843             case CEE_VOLATILE:
12844
12845                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12846                 prefixFlags |= PREFIX_VOLATILE;
12847
12848                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12849
12850                 assert(sz == 0);
12851                 goto PREFIX;
12852
12853             case CEE_LDFTN:
12854             {
12855                 // Need to do a lookup here so that we perform an access check
12856                 // and do a NOWAY if protections are violated
12857                 _impResolveToken(CORINFO_TOKENKIND_Method);
12858
12859                 JITDUMP(" %08X", resolvedToken.token);
12860
12861                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12862                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12863                               &callInfo);
12864
12865                 // This check really only applies to intrinsic Array.Address methods
12866                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12867                 {
12868                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12869                 }
12870
12871                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12872                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12873
12874                 if (tiVerificationNeeded)
12875                 {
12876                     // LDFTN could start the begining of delegate creation sequence, remember that
12877                     delegateCreateStart = codeAddr - 2;
12878
12879                     // check any constraints on the callee's class and type parameters
12880                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12881                                    "method has unsatisfied class constraints");
12882                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12883                                                                                 resolvedToken.hMethod),
12884                                    "method has unsatisfied method constraints");
12885
12886                     mflags = callInfo.verMethodFlags;
12887                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12888                 }
12889
12890             DO_LDFTN:
12891                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12892                 if (compDonotInline())
12893                 {
12894                     return;
12895                 }
12896
12897                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12898                 impPushOnStack(op1, typeInfo(heapToken));
12899
12900                 break;
12901             }
12902
12903             case CEE_LDVIRTFTN:
12904             {
12905                 /* Get the method token */
12906
12907                 _impResolveToken(CORINFO_TOKENKIND_Method);
12908
12909                 JITDUMP(" %08X", resolvedToken.token);
12910
12911                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12912                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12913                                                     CORINFO_CALLINFO_CALLVIRT)),
12914                               &callInfo);
12915
12916                 // This check really only applies to intrinsic Array.Address methods
12917                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12918                 {
12919                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12920                 }
12921
12922                 mflags = callInfo.methodFlags;
12923
12924                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12925
12926                 if (compIsForInlining())
12927                 {
12928                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12929                     {
12930                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12931                         return;
12932                     }
12933                 }
12934
12935                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12936
12937                 if (tiVerificationNeeded)
12938                 {
12939
12940                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12941                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12942
12943                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12944                     typeInfo declType =
12945                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12946
12947                     typeInfo arg = impStackTop().seTypeInfo;
12948                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12949                            "bad ldvirtftn");
12950
12951                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12952                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12953                     {
12954                         instanceClassHnd = arg.GetClassHandleForObjRef();
12955                     }
12956
12957                     // check any constraints on the method's class and type parameters
12958                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12959                                    "method has unsatisfied class constraints");
12960                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12961                                                                                 resolvedToken.hMethod),
12962                                    "method has unsatisfied method constraints");
12963
12964                     if (mflags & CORINFO_FLG_PROTECTED)
12965                     {
12966                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12967                                "Accessing protected method through wrong type.");
12968                     }
12969                 }
12970
12971                 /* Get the object-ref */
12972                 op1 = impPopStack().val;
12973                 assertImp(op1->gtType == TYP_REF);
12974
12975                 if (opts.IsReadyToRun())
12976                 {
12977                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12978                     {
12979                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12980                         {
12981                             op1 = gtUnusedValNode(op1);
12982                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12983                         }
12984                         goto DO_LDFTN;
12985                     }
12986                 }
12987                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12988                 {
12989                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12990                     {
12991                         op1 = gtUnusedValNode(op1);
12992                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12993                     }
12994                     goto DO_LDFTN;
12995                 }
12996
12997                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12998                 if (compDonotInline())
12999                 {
13000                     return;
13001                 }
13002
13003                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13004                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13005                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13006                 impPushOnStack(fptr, typeInfo(heapToken));
13007
13008                 break;
13009             }
13010
13011             case CEE_CONSTRAINED:
13012
13013                 assertImp(sz == sizeof(unsigned));
13014                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13015                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13016                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13017
13018                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13019                 prefixFlags |= PREFIX_CONSTRAINED;
13020
13021                 {
13022                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13023                     if (actualOpcode != CEE_CALLVIRT)
13024                     {
13025                         BADCODE("constrained. has to be followed by callvirt");
13026                     }
13027                 }
13028
13029                 goto PREFIX;
13030
13031             case CEE_READONLY:
13032                 JITDUMP(" readonly.");
13033
13034                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13035                 prefixFlags |= PREFIX_READONLY;
13036
13037                 {
13038                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13039                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13040                     {
13041                         BADCODE("readonly. has to be followed by ldelema or call");
13042                     }
13043                 }
13044
13045                 assert(sz == 0);
13046                 goto PREFIX;
13047
13048             case CEE_TAILCALL:
13049                 JITDUMP(" tail.");
13050
13051                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13052                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13053
13054                 {
13055                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13056                     if (!impOpcodeIsCallOpcode(actualOpcode))
13057                     {
13058                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13059                     }
13060                 }
13061                 assert(sz == 0);
13062                 goto PREFIX;
13063
13064             case CEE_NEWOBJ:
13065
13066                 /* Since we will implicitly insert newObjThisPtr at the start of the
13067                    argument list, spill any GTF_ORDER_SIDEEFF */
13068                 impSpillSpecialSideEff();
13069
13070                 /* NEWOBJ does not respond to TAIL */
13071                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13072
13073                 /* NEWOBJ does not respond to CONSTRAINED */
13074                 prefixFlags &= ~PREFIX_CONSTRAINED;
13075
13076                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13077
13078                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13079                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13080                               &callInfo);
13081
13082                 if (compIsForInlining())
13083                 {
13084                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13085                     {
13086                         // Check to see if this call violates the boundary.
13087                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13088                         return;
13089                     }
13090                 }
13091
13092                 mflags = callInfo.methodFlags;
13093
13094                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13095                 {
13096                     BADCODE("newobj on static or abstract method");
13097                 }
13098
13099                 // Insert the security callout before any actual code is generated
13100                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13101
13102                 // There are three different cases for new
13103                 // Object size is variable (depends on arguments)
13104                 //      1) Object is an array (arrays treated specially by the EE)
13105                 //      2) Object is some other variable sized object (e.g. String)
13106                 //      3) Class Size can be determined beforehand (normal case)
13107                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13108                 // in the second case we call the constructor with a '0' this pointer
13109                 // In the third case we alloc the memory, then call the constuctor
13110
13111                 clsFlags = callInfo.classFlags;
13112                 if (clsFlags & CORINFO_FLG_ARRAY)
13113                 {
13114                     if (tiVerificationNeeded)
13115                     {
13116                         CORINFO_CLASS_HANDLE elemTypeHnd;
13117                         INDEBUG(CorInfoType corType =)
13118                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13119                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13120                         Verify(elemTypeHnd == nullptr ||
13121                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13122                                "newarr of byref-like objects");
13123                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13124                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13125                                       &callInfo DEBUGARG(info.compFullName));
13126                     }
13127                     // Arrays need to call the NEWOBJ helper.
13128                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13129
13130                     impImportNewObjArray(&resolvedToken, &callInfo);
13131                     if (compDonotInline())
13132                     {
13133                         return;
13134                     }
13135
13136                     callTyp = TYP_REF;
13137                     break;
13138                 }
13139                 // At present this can only be String
13140                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13141                 {
13142                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13143                     {
13144                         // The dummy argument does not exist in CoreRT
13145                         newObjThisPtr = nullptr;
13146                     }
13147                     else
13148                     {
13149                         // This is the case for variable-sized objects that are not
13150                         // arrays.  In this case, call the constructor with a null 'this'
13151                         // pointer
13152                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13153                     }
13154
13155                     /* Remember that this basic block contains 'new' of an object */
13156                     block->bbFlags |= BBF_HAS_NEWOBJ;
13157                     optMethodFlags |= OMF_HAS_NEWOBJ;
13158                 }
13159                 else
13160                 {
13161                     // This is the normal case where the size of the object is
13162                     // fixed.  Allocate the memory and call the constructor.
13163
13164                     // Note: We cannot add a peep to avoid use of temp here
13165                     // becase we don't have enough interference info to detect when
13166                     // sources and destination interfere, example: s = new S(ref);
13167
13168                     // TODO: We find the correct place to introduce a general
13169                     // reverse copy prop for struct return values from newobj or
13170                     // any function returning structs.
13171
13172                     /* get a temporary for the new object */
13173                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13174                     if (compDonotInline())
13175                     {
13176                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13177                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13178                         return;
13179                     }
13180
13181                     // In the value class case we only need clsHnd for size calcs.
13182                     //
13183                     // The lookup of the code pointer will be handled by CALL in this case
13184                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13185                     {
13186                         if (compIsForInlining())
13187                         {
13188                             // If value class has GC fields, inform the inliner. It may choose to
13189                             // bail out on the inline.
13190                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13191                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13192                             {
13193                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13194                                 if (compInlineResult->IsFailure())
13195                                 {
13196                                     return;
13197                                 }
13198
13199                                 // Do further notification in the case where the call site is rare;
13200                                 // some policies do not track the relative hotness of call sites for
13201                                 // "always" inline cases.
13202                                 if (impInlineInfo->iciBlock->isRunRarely())
13203                                 {
13204                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13205                                     if (compInlineResult->IsFailure())
13206                                     {
13207                                         return;
13208                                     }
13209                                 }
13210                             }
13211                         }
13212
13213                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13214                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13215
13216                         if (impIsPrimitive(jitTyp))
13217                         {
13218                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13219                         }
13220                         else
13221                         {
13222                             // The local variable itself is the allocated space.
13223                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13224                             // and potentially exploitable.
13225                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13226                         }
13227                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13228                         {
13229                             // Append a tree to zero-out the temp
13230                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13231
13232                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13233                                                            gtNewIconNode(0), // Value
13234                                                            size,             // Size
13235                                                            false,            // isVolatile
13236                                                            false);           // not copyBlock
13237                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13238                         }
13239
13240                         // Obtain the address of the temp
13241                         newObjThisPtr =
13242                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13243                     }
13244                     else
13245                     {
13246 #ifdef FEATURE_READYTORUN_COMPILER
13247                         if (opts.IsReadyToRun())
13248                         {
13249                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13250                             usingReadyToRunHelper = (op1 != nullptr);
13251                         }
13252
13253                         if (!usingReadyToRunHelper)
13254 #endif
13255                         {
13256                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13257                             if (op1 == nullptr)
13258                             { // compDonotInline()
13259                                 return;
13260                             }
13261
13262                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13263                             // and the newfast call with a single call to a dynamic R2R cell that will:
13264                             //      1) Load the context
13265                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13266                             //      stub
13267                             //      3) Allocate and return the new object
13268                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13269
13270                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13271                                                     resolvedToken.hClass, TYP_REF, op1);
13272                         }
13273
13274                         // Remember that this basic block contains 'new' of an object
13275                         block->bbFlags |= BBF_HAS_NEWOBJ;
13276                         optMethodFlags |= OMF_HAS_NEWOBJ;
13277
13278                         // Append the assignment to the temp/local. Dont need to spill
13279                         // at all as we are just calling an EE-Jit helper which can only
13280                         // cause an (async) OutOfMemoryException.
13281
13282                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13283                         // to a temp. Note that the pattern "temp = allocObj" is required
13284                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13285                         // without exhaustive walk over all expressions.
13286
13287                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13288                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13289
13290                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13291                     }
13292                 }
13293                 goto CALL;
13294
13295             case CEE_CALLI:
13296
13297                 /* CALLI does not respond to CONSTRAINED */
13298                 prefixFlags &= ~PREFIX_CONSTRAINED;
13299
13300                 if (compIsForInlining())
13301                 {
13302                     // CALLI doesn't have a method handle, so assume the worst.
13303                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13304                     {
13305                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13306                         return;
13307                     }
13308                 }
13309
13310             // fall through
13311
13312             case CEE_CALLVIRT:
13313             case CEE_CALL:
13314
13315                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13316                 // many other places.  We unfortunately embed that knowledge here.
13317                 if (opcode != CEE_CALLI)
13318                 {
13319                     _impResolveToken(CORINFO_TOKENKIND_Method);
13320
13321                     eeGetCallInfo(&resolvedToken,
13322                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13323                                   // this is how impImportCall invokes getCallInfo
13324                                   addVerifyFlag(
13325                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13326                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13327                                                                        : CORINFO_CALLINFO_NONE)),
13328                                   &callInfo);
13329                 }
13330                 else
13331                 {
13332                     // Suppress uninitialized use warning.
13333                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13334                     memset(&callInfo, 0, sizeof(callInfo));
13335
13336                     resolvedToken.token = getU4LittleEndian(codeAddr);
13337                 }
13338
13339             CALL: // memberRef should be set.
13340                 // newObjThisPtr should be set for CEE_NEWOBJ
13341
13342                 JITDUMP(" %08X", resolvedToken.token);
13343                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13344
13345                 bool newBBcreatedForTailcallStress;
13346
13347                 newBBcreatedForTailcallStress = false;
13348
13349                 if (compIsForInlining())
13350                 {
13351                     if (compDonotInline())
13352                     {
13353                         return;
13354                     }
13355                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13356                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13357                 }
13358                 else
13359                 {
13360                     if (compTailCallStress())
13361                     {
13362                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13363                         // Tail call stress only recognizes call+ret patterns and forces them to be
13364                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13365                         // doesn't import 'ret' opcode following the call into the basic block containing
13366                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13367                         // is already checking that there is an opcode following call and hence it is
13368                         // safe here to read next opcode without bounds check.
13369                         newBBcreatedForTailcallStress =
13370                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13371                                                              // make it jump to RET.
13372                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13373
13374                         if (newBBcreatedForTailcallStress &&
13375                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13376                             verCheckTailCallConstraint(opcode, &resolvedToken,
13377                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13378                                                        true) // Is it legal to do tailcall?
13379                             )
13380                         {
13381                             // Stress the tailcall.
13382                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13383                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13384                         }
13385                     }
13386                 }
13387
13388                 // This is split up to avoid goto flow warnings.
13389                 bool isRecursive;
13390                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13391
13392                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13393                 // hence will not be considered for implicit tail calling.
13394                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13395                 {
13396                     if (compIsForInlining())
13397                     {
13398 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13399                         // Are we inlining at an implicit tail call site? If so the we can flag
13400                         // implicit tail call sites in the inline body. These call sites
13401                         // often end up in non BBJ_RETURN blocks, so only flag them when
13402                         // we're able to handle shared returns.
13403                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13404                         {
13405                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13406                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13407                         }
13408 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13409                     }
13410                     else
13411                     {
13412                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13413                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13414                     }
13415                 }
13416
13417                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13418                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13419                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13420
13421                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13422                 {
13423                     // All calls and delegates need a security callout.
13424                     // For delegates, this is the call to the delegate constructor, not the access check on the
13425                     // LD(virt)FTN.
13426                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13427
13428 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13429
13430                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13431                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13432                 // ldtoken <filed token>, and we now check accessibility
13433                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13434                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13435                 {
13436                     if (prevOpcode != CEE_LDTOKEN)
13437                     {
13438                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13439                     }
13440                     else
13441                     {
13442                         assert(lastLoadToken != NULL);
13443                         // Now that we know we have a token, verify that it is accessible for loading
13444                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13445                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13446                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13447                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13448                     }
13449                 }
13450
13451 #endif // DevDiv 410397
13452                 }
13453
13454                 if (tiVerificationNeeded)
13455                 {
13456                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13457                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13458                                   &callInfo DEBUGARG(info.compFullName));
13459                 }
13460
13461                 // Insert delegate callout here.
13462                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13463                 {
13464 #ifdef DEBUG
13465                     // We should do this only if verification is enabled
13466                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13467                     if (tiVerificationNeeded)
13468                     {
13469                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13470                         // We should get here only for well formed delegate creation.
13471                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13472                     }
13473 #endif
13474                 }
13475
13476                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13477                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13478                 if (compDonotInline())
13479                 {
13480                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13481                     assert((callTyp == TYP_UNDEF) ||
13482                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13483                     return;
13484                 }
13485
13486                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13487                                                                        // have created a new BB after the "call"
13488                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13489                 {
13490                     assert(!compIsForInlining());
13491                     goto RET;
13492                 }
13493
13494                 break;
13495
13496             case CEE_LDFLD:
13497             case CEE_LDSFLD:
13498             case CEE_LDFLDA:
13499             case CEE_LDSFLDA:
13500             {
13501
13502                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13503                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13504
13505                 /* Get the CP_Fieldref index */
13506                 assertImp(sz == sizeof(unsigned));
13507
13508                 _impResolveToken(CORINFO_TOKENKIND_Field);
13509
13510                 JITDUMP(" %08X", resolvedToken.token);
13511
13512                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13513
13514                 GenTreePtr           obj     = nullptr;
13515                 typeInfo*            tiObj   = nullptr;
13516                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13517
13518                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13519                 {
13520                     tiObj         = &impStackTop().seTypeInfo;
13521                     StackEntry se = impPopStack();
13522                     objType       = se.seTypeInfo.GetClassHandle();
13523                     obj           = se.val;
13524
13525                     if (impIsThis(obj))
13526                     {
13527                         aflags |= CORINFO_ACCESS_THIS;
13528
13529                         // An optimization for Contextful classes:
13530                         // we unwrap the proxy when we have a 'this reference'
13531
13532                         if (info.compUnwrapContextful)
13533                         {
13534                             aflags |= CORINFO_ACCESS_UNWRAP;
13535                         }
13536                     }
13537                 }
13538
13539                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13540
13541                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13542                 // handle
13543                 CorInfoType ciType = fieldInfo.fieldType;
13544                 clsHnd             = fieldInfo.structType;
13545
13546                 lclTyp = JITtype2varType(ciType);
13547
13548 #ifdef _TARGET_AMD64
13549                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13550 #endif // _TARGET_AMD64
13551
13552                 if (compIsForInlining())
13553                 {
13554                     switch (fieldInfo.fieldAccessor)
13555                     {
13556                         case CORINFO_FIELD_INSTANCE_HELPER:
13557                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13558                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13559                         case CORINFO_FIELD_STATIC_TLS:
13560
13561                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13562                             return;
13563
13564                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13565                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13566                             /* We may be able to inline the field accessors in specific instantiations of generic
13567                              * methods */
13568                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13569                             return;
13570
13571                         default:
13572                             break;
13573                     }
13574
13575                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13576                         clsHnd)
13577                     {
13578                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13579                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13580                         {
13581                             // Loading a static valuetype field usually will cause a JitHelper to be called
13582                             // for the static base. This will bloat the code.
13583                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13584
13585                             if (compInlineResult->IsFailure())
13586                             {
13587                                 return;
13588                             }
13589                         }
13590                     }
13591                 }
13592
13593                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13594                 if (isLoadAddress)
13595                 {
13596                     tiRetVal.MakeByRef();
13597                 }
13598                 else
13599                 {
13600                     tiRetVal.NormaliseForStack();
13601                 }
13602
13603                 // Perform this check always to ensure that we get field access exceptions even with
13604                 // SkipVerification.
13605                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13606
13607                 if (tiVerificationNeeded)
13608                 {
13609                     // You can also pass the unboxed struct to  LDFLD
13610                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13611                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13612                     {
13613                         bAllowPlainValueTypeAsThis = TRUE;
13614                     }
13615
13616                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13617
13618                     // If we're doing this on a heap object or from a 'safe' byref
13619                     // then the result is a safe byref too
13620                     if (isLoadAddress) // load address
13621                     {
13622                         if (fieldInfo.fieldFlags &
13623                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13624                         {
13625                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13626                             {
13627                                 tiRetVal.SetIsPermanentHomeByRef();
13628                             }
13629                         }
13630                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13631                         {
13632                             // ldflda of byref is safe if done on a gc object or on  a
13633                             // safe byref
13634                             tiRetVal.SetIsPermanentHomeByRef();
13635                         }
13636                     }
13637                 }
13638                 else
13639                 {
13640                     // tiVerificationNeeded is false.
13641                     // Raise InvalidProgramException if static load accesses non-static field
13642                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13643                     {
13644                         BADCODE("static access on an instance field");
13645                     }
13646                 }
13647
13648                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13649                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13650                 {
13651                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13652                     {
13653                         obj = gtUnusedValNode(obj);
13654                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13655                     }
13656                     obj = nullptr;
13657                 }
13658
13659                 /* Preserve 'small' int types */
13660                 if (!varTypeIsSmall(lclTyp))
13661                 {
13662                     lclTyp = genActualType(lclTyp);
13663                 }
13664
13665                 bool usesHelper = false;
13666
13667                 switch (fieldInfo.fieldAccessor)
13668                 {
13669                     case CORINFO_FIELD_INSTANCE:
13670 #ifdef FEATURE_READYTORUN_COMPILER
13671                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13672 #endif
13673                     {
13674                         bool nullcheckNeeded = false;
13675
13676                         obj = impCheckForNullPointer(obj);
13677
13678                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13679                         {
13680                             nullcheckNeeded = true;
13681                         }
13682
13683                         // If the object is a struct, what we really want is
13684                         // for the field to operate on the address of the struct.
13685                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13686                         {
13687                             assert(opcode == CEE_LDFLD && objType != nullptr);
13688
13689                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13690                         }
13691
13692                         /* Create the data member node */
13693                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13694
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13697                         {
13698                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13699                         }
13700 #endif
13701
13702                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13703
13704                         if (fgAddrCouldBeNull(obj))
13705                         {
13706                             op1->gtFlags |= GTF_EXCEPT;
13707                         }
13708
13709                         // If gtFldObj is a BYREF then our target is a value class and
13710                         // it could point anywhere, example a boxed class static int
13711                         if (obj->gtType == TYP_BYREF)
13712                         {
13713                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13714                         }
13715
13716                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13717                         if (StructHasOverlappingFields(typeFlags))
13718                         {
13719                             op1->gtField.gtFldMayOverlap = true;
13720                         }
13721
13722                         // wrap it in a address of operator if necessary
13723                         if (isLoadAddress)
13724                         {
13725                             op1 = gtNewOperNode(GT_ADDR,
13726                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13727                         }
13728                         else
13729                         {
13730                             if (compIsForInlining() &&
13731                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13732                                                                                    impInlineInfo->inlArgInfo))
13733                             {
13734                                 impInlineInfo->thisDereferencedFirst = true;
13735                             }
13736                         }
13737                     }
13738                     break;
13739
13740                     case CORINFO_FIELD_STATIC_TLS:
13741 #ifdef _TARGET_X86_
13742                         // Legacy TLS access is implemented as intrinsic on x86 only
13743
13744                         /* Create the data member node */
13745                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13746                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13747
13748                         if (isLoadAddress)
13749                         {
13750                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13751                         }
13752                         break;
13753 #else
13754                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13755
13756                         __fallthrough;
13757 #endif
13758
13759                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13760                     case CORINFO_FIELD_INSTANCE_HELPER:
13761                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13762                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13763                                                clsHnd, nullptr);
13764                         usesHelper = true;
13765                         break;
13766
13767                     case CORINFO_FIELD_STATIC_ADDRESS:
13768                         // Replace static read-only fields with constant if possible
13769                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13770                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13771                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13772                         {
13773                             CorInfoInitClassResult initClassResult =
13774                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13775                                                             impTokenLookupContextHandle);
13776
13777                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13778                             {
13779                                 void** pFldAddr = nullptr;
13780                                 void*  fldAddr =
13781                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13782
13783                                 // We should always be able to access this static's address directly
13784                                 assert(pFldAddr == nullptr);
13785
13786                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13787                                 goto FIELD_DONE;
13788                             }
13789                         }
13790
13791                         __fallthrough;
13792
13793                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13794                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13795                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13796                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13797                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13798                                                          lclTyp);
13799                         break;
13800
13801                     case CORINFO_FIELD_INTRINSIC_ZERO:
13802                     {
13803                         assert(aflags & CORINFO_ACCESS_GET);
13804                         op1 = gtNewIconNode(0, lclTyp);
13805                         goto FIELD_DONE;
13806                     }
13807                     break;
13808
13809                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13810                     {
13811                         assert(aflags & CORINFO_ACCESS_GET);
13812
13813                         LPVOID         pValue;
13814                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13815                         op1                = gtNewStringLiteralNode(iat, pValue);
13816                         goto FIELD_DONE;
13817                     }
13818                     break;
13819
13820                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13821                     {
13822                         assert(aflags & CORINFO_ACCESS_GET);
13823 #if BIGENDIAN
13824                         op1 = gtNewIconNode(0, lclTyp);
13825 #else
13826                         op1                     = gtNewIconNode(1, lclTyp);
13827 #endif
13828                         goto FIELD_DONE;
13829                     }
13830                     break;
13831
13832                     default:
13833                         assert(!"Unexpected fieldAccessor");
13834                 }
13835
13836                 if (!isLoadAddress)
13837                 {
13838
13839                     if (prefixFlags & PREFIX_VOLATILE)
13840                     {
13841                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13842                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13843
13844                         if (!usesHelper)
13845                         {
13846                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13847                                    (op1->OperGet() == GT_OBJ));
13848                             op1->gtFlags |= GTF_IND_VOLATILE;
13849                         }
13850                     }
13851
13852                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13853                     {
13854                         if (!usesHelper)
13855                         {
13856                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13857                                    (op1->OperGet() == GT_OBJ));
13858                             op1->gtFlags |= GTF_IND_UNALIGNED;
13859                         }
13860                     }
13861                 }
13862
13863                 /* Check if the class needs explicit initialization */
13864
13865                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13866                 {
13867                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13868                     if (compDonotInline())
13869                     {
13870                         return;
13871                     }
13872                     if (helperNode != nullptr)
13873                     {
13874                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13875                     }
13876                 }
13877
13878             FIELD_DONE:
13879                 impPushOnStack(op1, tiRetVal);
13880             }
13881             break;
13882
13883             case CEE_STFLD:
13884             case CEE_STSFLD:
13885             {
13886
13887                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13888
13889                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13890
13891                 /* Get the CP_Fieldref index */
13892
13893                 assertImp(sz == sizeof(unsigned));
13894
13895                 _impResolveToken(CORINFO_TOKENKIND_Field);
13896
13897                 JITDUMP(" %08X", resolvedToken.token);
13898
13899                 int        aflags = CORINFO_ACCESS_SET;
13900                 GenTreePtr obj    = nullptr;
13901                 typeInfo*  tiObj  = nullptr;
13902                 typeInfo   tiVal;
13903
13904                 /* Pull the value from the stack */
13905                 StackEntry se = impPopStack();
13906                 op2           = se.val;
13907                 tiVal         = se.seTypeInfo;
13908                 clsHnd        = tiVal.GetClassHandle();
13909
13910                 if (opcode == CEE_STFLD)
13911                 {
13912                     tiObj = &impStackTop().seTypeInfo;
13913                     obj   = impPopStack().val;
13914
13915                     if (impIsThis(obj))
13916                     {
13917                         aflags |= CORINFO_ACCESS_THIS;
13918
13919                         // An optimization for Contextful classes:
13920                         // we unwrap the proxy when we have a 'this reference'
13921
13922                         if (info.compUnwrapContextful)
13923                         {
13924                             aflags |= CORINFO_ACCESS_UNWRAP;
13925                         }
13926                     }
13927                 }
13928
13929                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13930
13931                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13932                 // handle
13933                 CorInfoType ciType = fieldInfo.fieldType;
13934                 fieldClsHnd        = fieldInfo.structType;
13935
13936                 lclTyp = JITtype2varType(ciType);
13937
13938                 if (compIsForInlining())
13939                 {
13940                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13941                      * per-inst static? */
13942
13943                     switch (fieldInfo.fieldAccessor)
13944                     {
13945                         case CORINFO_FIELD_INSTANCE_HELPER:
13946                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13947                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13948                         case CORINFO_FIELD_STATIC_TLS:
13949
13950                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13951                             return;
13952
13953                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13954                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13955                             /* We may be able to inline the field accessors in specific instantiations of generic
13956                              * methods */
13957                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13958                             return;
13959
13960                         default:
13961                             break;
13962                     }
13963                 }
13964
13965                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13966
13967                 if (tiVerificationNeeded)
13968                 {
13969                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13970                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13971                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13972                 }
13973                 else
13974                 {
13975                     // tiVerificationNeed is false.
13976                     // Raise InvalidProgramException if static store accesses non-static field
13977                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13978                     {
13979                         BADCODE("static access on an instance field");
13980                     }
13981                 }
13982
13983                 // We are using stfld on a static field.
13984                 // We allow it, but need to eval any side-effects for obj
13985                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13986                 {
13987                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13988                     {
13989                         obj = gtUnusedValNode(obj);
13990                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13991                     }
13992                     obj = nullptr;
13993                 }
13994
13995                 /* Preserve 'small' int types */
13996                 if (!varTypeIsSmall(lclTyp))
13997                 {
13998                     lclTyp = genActualType(lclTyp);
13999                 }
14000
14001                 switch (fieldInfo.fieldAccessor)
14002                 {
14003                     case CORINFO_FIELD_INSTANCE:
14004 #ifdef FEATURE_READYTORUN_COMPILER
14005                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14006 #endif
14007                     {
14008                         obj = impCheckForNullPointer(obj);
14009
14010                         /* Create the data member node */
14011                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14012                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14013                         if (StructHasOverlappingFields(typeFlags))
14014                         {
14015                             op1->gtField.gtFldMayOverlap = true;
14016                         }
14017
14018 #ifdef FEATURE_READYTORUN_COMPILER
14019                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14020                         {
14021                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14022                         }
14023 #endif
14024
14025                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14026
14027                         if (fgAddrCouldBeNull(obj))
14028                         {
14029                             op1->gtFlags |= GTF_EXCEPT;
14030                         }
14031
14032                         // If gtFldObj is a BYREF then our target is a value class and
14033                         // it could point anywhere, example a boxed class static int
14034                         if (obj->gtType == TYP_BYREF)
14035                         {
14036                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14037                         }
14038
14039                         if (compIsForInlining() &&
14040                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14041                         {
14042                             impInlineInfo->thisDereferencedFirst = true;
14043                         }
14044                     }
14045                     break;
14046
14047                     case CORINFO_FIELD_STATIC_TLS:
14048 #ifdef _TARGET_X86_
14049                         // Legacy TLS access is implemented as intrinsic on x86 only
14050
14051                         /* Create the data member node */
14052                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14053                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14054
14055                         break;
14056 #else
14057                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14058
14059                         __fallthrough;
14060 #endif
14061
14062                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14063                     case CORINFO_FIELD_INSTANCE_HELPER:
14064                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14065                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14066                                                clsHnd, op2);
14067                         goto SPILL_APPEND;
14068
14069                     case CORINFO_FIELD_STATIC_ADDRESS:
14070                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14071                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14072                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14073                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14074                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14075                                                          lclTyp);
14076                         break;
14077
14078                     default:
14079                         assert(!"Unexpected fieldAccessor");
14080                 }
14081
14082                 // Create the member assignment, unless we have a struct.
14083                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14084                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14085
14086                 if (!deferStructAssign)
14087                 {
14088                     if (prefixFlags & PREFIX_VOLATILE)
14089                     {
14090                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14091                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14092                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14093                         op1->gtFlags |= GTF_IND_VOLATILE;
14094                     }
14095                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14096                     {
14097                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14098                         op1->gtFlags |= GTF_IND_UNALIGNED;
14099                     }
14100
14101                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14102                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14103                        importation and reads from the union as if it were a long during code generation. Though this
14104                        can potentially read garbage, one can get lucky to have this working correctly.
14105
14106                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14107                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14108                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14109                        it works correctly always.
14110
14111                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14112                        for V4.0.
14113                     */
14114                     CLANG_FORMAT_COMMENT_ANCHOR;
14115
14116 #ifndef _TARGET_64BIT_
14117                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14118                     // generated for ARM as well as x86, so the following IR will be accepted:
14119                     //     *  STMT      void
14120                     //         |  /--*  CNS_INT   int    2
14121                     //         \--*  ASG       long
14122                     //            \--*  CLS_VAR   long
14123
14124                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14125                         varTypeIsLong(op1->TypeGet()))
14126                     {
14127                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14128                     }
14129 #endif
14130
14131 #ifdef _TARGET_64BIT_
14132                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14133                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14134                     {
14135                         op2->gtType = TYP_I_IMPL;
14136                     }
14137                     else
14138                     {
14139                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14140                         //
14141                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14142                         {
14143                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
14144                         }
14145                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14146                         //
14147                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14148                         {
14149                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
14150                         }
14151                     }
14152 #endif
14153
14154 #if !FEATURE_X87_DOUBLES
14155                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14156                     // We insert a cast to the dest 'op1' type
14157                     //
14158                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14159                         varTypeIsFloating(op2->gtType))
14160                     {
14161                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14162                     }
14163 #endif // !FEATURE_X87_DOUBLES
14164
14165                     op1 = gtNewAssignNode(op1, op2);
14166
14167                     /* Mark the expression as containing an assignment */
14168
14169                     op1->gtFlags |= GTF_ASG;
14170                 }
14171
14172                 /* Check if the class needs explicit initialization */
14173
14174                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14175                 {
14176                     GenTreePtr helperNode = impInitClass(&resolvedToken);
14177                     if (compDonotInline())
14178                     {
14179                         return;
14180                     }
14181                     if (helperNode != nullptr)
14182                     {
14183                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14184                     }
14185                 }
14186
14187                 /* stfld can interfere with value classes (consider the sequence
14188                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14189                    spill all value class references from the stack. */
14190
14191                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14192                 {
14193                     assert(tiObj);
14194
14195                     if (impIsValueType(tiObj))
14196                     {
14197                         impSpillEvalStack();
14198                     }
14199                     else
14200                     {
14201                         impSpillValueClasses();
14202                     }
14203                 }
14204
14205                 /* Spill any refs to the same member from the stack */
14206
14207                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14208
14209                 /* stsfld also interferes with indirect accesses (for aliased
14210                    statics) and calls. But don't need to spill other statics
14211                    as we have explicitly spilled this particular static field. */
14212
14213                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14214
14215                 if (deferStructAssign)
14216                 {
14217                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14218                 }
14219             }
14220                 goto APPEND;
14221
14222             case CEE_NEWARR:
14223             {
14224
14225                 /* Get the class type index operand */
14226
14227                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14228
14229                 JITDUMP(" %08X", resolvedToken.token);
14230
14231                 if (!opts.IsReadyToRun())
14232                 {
14233                     // Need to restore array classes before creating array objects on the heap
14234                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14235                     if (op1 == nullptr)
14236                     { // compDonotInline()
14237                         return;
14238                     }
14239                 }
14240
14241                 if (tiVerificationNeeded)
14242                 {
14243                     // As per ECMA 'numElems' specified can be either int32 or native int.
14244                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14245
14246                     CORINFO_CLASS_HANDLE elemTypeHnd;
14247                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14248                     Verify(elemTypeHnd == nullptr ||
14249                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14250                            "array of byref-like type");
14251                 }
14252
14253                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14254
14255                 accessAllowedResult =
14256                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14257                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14258
14259                 /* Form the arglist: array class handle, size */
14260                 op2 = impPopStack().val;
14261                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14262
14263 #ifdef FEATURE_READYTORUN_COMPILER
14264                 if (opts.IsReadyToRun())
14265                 {
14266                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14267                                                     gtNewArgList(op2));
14268                     usingReadyToRunHelper = (op1 != nullptr);
14269
14270                     if (!usingReadyToRunHelper)
14271                     {
14272                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14273                         // and the newarr call with a single call to a dynamic R2R cell that will:
14274                         //      1) Load the context
14275                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14276                         //      3) Allocate the new array
14277                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14278
14279                         // Need to restore array classes before creating array objects on the heap
14280                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14281                         if (op1 == nullptr)
14282                         { // compDonotInline()
14283                             return;
14284                         }
14285                     }
14286                 }
14287
14288                 if (!usingReadyToRunHelper)
14289 #endif
14290                 {
14291                     args = gtNewArgList(op1, op2);
14292
14293                     /* Create a call to 'new' */
14294
14295                     // Note that this only works for shared generic code because the same helper is used for all
14296                     // reference array types
14297                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14298                 }
14299
14300                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14301
14302                 /* Remember that this basic block contains 'new' of an sd array */
14303
14304                 block->bbFlags |= BBF_HAS_NEWARRAY;
14305                 optMethodFlags |= OMF_HAS_NEWARRAY;
14306
14307                 /* Push the result of the call on the stack */
14308
14309                 impPushOnStack(op1, tiRetVal);
14310
14311                 callTyp = TYP_REF;
14312             }
14313             break;
14314
14315             case CEE_LOCALLOC:
14316                 if (tiVerificationNeeded)
14317                 {
14318                     Verify(false, "bad opcode");
14319                 }
14320
14321                 // We don't allow locallocs inside handlers
14322                 if (block->hasHndIndex())
14323                 {
14324                     BADCODE("Localloc can't be inside handler");
14325                 }
14326
14327                 setNeedsGSSecurityCookie();
14328
14329                 // Get the size to allocate
14330
14331                 op2 = impPopStack().val;
14332                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14333
14334                 if (verCurrentState.esStackDepth != 0)
14335                 {
14336                     BADCODE("Localloc can only be used when the stack is empty");
14337                 }
14338
14339                 // If the localloc is not in a loop and its size is a small constant,
14340                 // create a new local var of TYP_BLK and return its address.
14341                 {
14342                     bool convertedToLocal = false;
14343
14344                     // Need to aggressively fold here, as even fixed-size locallocs
14345                     // will have casts in the way.
14346                     op2 = gtFoldExpr(op2);
14347
14348                     if (op2->IsIntegralConst())
14349                     {
14350                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14351
14352                         if (allocSize == 0)
14353                         {
14354                             // Result is nullptr
14355                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14356                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14357                             convertedToLocal = true;
14358                         }
14359                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14360                         {
14361                             // Get the size threshold for local conversion
14362                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14363
14364 #ifdef DEBUG
14365                             // Optionally allow this to be modified
14366                             maxSize = JitConfig.JitStackAllocToLocalSize();
14367 #endif // DEBUG
14368
14369                             if (allocSize <= maxSize)
14370                             {
14371                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14372                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14373                                         stackallocAsLocal);
14374                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14375                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14376                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14377                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14378                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14379                                 convertedToLocal         = true;
14380                                 compGSReorderStackLayout = true;
14381                             }
14382                         }
14383                     }
14384
14385                     if (!convertedToLocal)
14386                     {
14387                         // Bail out if inlining and the localloc was not converted.
14388                         //
14389                         // Note we might consider allowing the inline, if the call
14390                         // site is not in a loop.
14391                         if (compIsForInlining())
14392                         {
14393                             InlineObservation obs = op2->IsIntegralConst()
14394                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14395                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14396                             compInlineResult->NoteFatal(obs);
14397                             return;
14398                         }
14399
14400                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14401                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14402                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14403
14404                         /* The FP register may not be back to the original value at the end
14405                            of the method, even if the frame size is 0, as localloc may
14406                            have modified it. So we will HAVE to reset it */
14407                         compLocallocUsed = true;
14408                     }
14409                     else
14410                     {
14411                         compLocallocOptimized = true;
14412                     }
14413                 }
14414
14415                 impPushOnStack(op1, tiRetVal);
14416                 break;
14417
14418             case CEE_ISINST:
14419             {
14420                 /* Get the type token */
14421                 assertImp(sz == sizeof(unsigned));
14422
14423                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14424
14425                 JITDUMP(" %08X", resolvedToken.token);
14426
14427                 if (!opts.IsReadyToRun())
14428                 {
14429                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14430                     if (op2 == nullptr)
14431                     { // compDonotInline()
14432                         return;
14433                     }
14434                 }
14435
14436                 if (tiVerificationNeeded)
14437                 {
14438                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14439                     // Even if this is a value class, we know it is boxed.
14440                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14441                 }
14442                 accessAllowedResult =
14443                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14444                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14445
14446                 op1 = impPopStack().val;
14447
14448                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14449
14450                 if (optTree != nullptr)
14451                 {
14452                     impPushOnStack(optTree, tiRetVal);
14453                 }
14454                 else
14455                 {
14456
14457 #ifdef FEATURE_READYTORUN_COMPILER
14458                     if (opts.IsReadyToRun())
14459                     {
14460                         GenTreeCall* opLookup =
14461                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14462                                                       gtNewArgList(op1));
14463                         usingReadyToRunHelper = (opLookup != nullptr);
14464                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14465
14466                         if (!usingReadyToRunHelper)
14467                         {
14468                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14469                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14470                             //      1) Load the context
14471                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14472                             //      stub
14473                             //      3) Perform the 'is instance' check on the input object
14474                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14475
14476                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14477                             if (op2 == nullptr)
14478                             { // compDonotInline()
14479                                 return;
14480                             }
14481                         }
14482                     }
14483
14484                     if (!usingReadyToRunHelper)
14485 #endif
14486                     {
14487                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14488                     }
14489                     if (compDonotInline())
14490                     {
14491                         return;
14492                     }
14493
14494                     impPushOnStack(op1, tiRetVal);
14495                 }
14496                 break;
14497             }
14498
14499             case CEE_REFANYVAL:
14500
14501                 // get the class handle and make a ICON node out of it
14502
14503                 _impResolveToken(CORINFO_TOKENKIND_Class);
14504
14505                 JITDUMP(" %08X", resolvedToken.token);
14506
14507                 op2 = impTokenToHandle(&resolvedToken);
14508                 if (op2 == nullptr)
14509                 { // compDonotInline()
14510                     return;
14511                 }
14512
14513                 if (tiVerificationNeeded)
14514                 {
14515                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14516                            "need refany");
14517                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14518                 }
14519
14520                 op1 = impPopStack().val;
14521                 // make certain it is normalized;
14522                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14523
14524                 // Call helper GETREFANY(classHandle, op1);
14525                 args = gtNewArgList(op2, op1);
14526                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14527
14528                 impPushOnStack(op1, tiRetVal);
14529                 break;
14530
14531             case CEE_REFANYTYPE:
14532
14533                 if (tiVerificationNeeded)
14534                 {
14535                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14536                            "need refany");
14537                 }
14538
14539                 op1 = impPopStack().val;
14540
14541                 // make certain it is normalized;
14542                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14543
14544                 if (op1->gtOper == GT_OBJ)
14545                 {
14546                     // Get the address of the refany
14547                     op1 = op1->gtOp.gtOp1;
14548
14549                     // Fetch the type from the correct slot
14550                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14551                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14552                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14553                 }
14554                 else
14555                 {
14556                     assertImp(op1->gtOper == GT_MKREFANY);
14557
14558                     // The pointer may have side-effects
14559                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14560                     {
14561                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14562 #ifdef DEBUG
14563                         impNoteLastILoffs();
14564 #endif
14565                     }
14566
14567                     // We already have the class handle
14568                     op1 = op1->gtOp.gtOp2;
14569                 }
14570
14571                 // convert native TypeHandle to RuntimeTypeHandle
14572                 {
14573                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14574
14575                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14576
14577                     // The handle struct is returned in register
14578                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14579
14580                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14581                 }
14582
14583                 impPushOnStack(op1, tiRetVal);
14584                 break;
14585
14586             case CEE_LDTOKEN:
14587             {
14588                 /* Get the Class index */
14589                 assertImp(sz == sizeof(unsigned));
14590                 lastLoadToken = codeAddr;
14591                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14592
14593                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14594
14595                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14596                 if (op1 == nullptr)
14597                 { // compDonotInline()
14598                     return;
14599                 }
14600
14601                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14602                 assert(resolvedToken.hClass != nullptr);
14603
14604                 if (resolvedToken.hMethod != nullptr)
14605                 {
14606                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14607                 }
14608                 else if (resolvedToken.hField != nullptr)
14609                 {
14610                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14611                 }
14612
14613                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14614
14615                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14616
14617                 // The handle struct is returned in register
14618                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14619
14620                 tiRetVal = verMakeTypeInfo(tokenType);
14621                 impPushOnStack(op1, tiRetVal);
14622             }
14623             break;
14624
14625             case CEE_UNBOX:
14626             case CEE_UNBOX_ANY:
14627             {
14628                 /* Get the Class index */
14629                 assertImp(sz == sizeof(unsigned));
14630
14631                 _impResolveToken(CORINFO_TOKENKIND_Class);
14632
14633                 JITDUMP(" %08X", resolvedToken.token);
14634
14635                 BOOL runtimeLookup;
14636                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14637                 if (op2 == nullptr)
14638                 {
14639                     assert(compDonotInline());
14640                     return;
14641                 }
14642
14643                 // Run this always so we can get access exceptions even with SkipVerification.
14644                 accessAllowedResult =
14645                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14646                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14647
14648                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14649                 {
14650                     if (tiVerificationNeeded)
14651                     {
14652                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14653                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14654                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14655                         tiRetVal.NormaliseForStack();
14656                     }
14657                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14658                     op1 = impPopStack().val;
14659                     goto CASTCLASS;
14660                 }
14661
14662                 /* Pop the object and create the unbox helper call */
14663                 /* You might think that for UNBOX_ANY we need to push a different */
14664                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14665                 /* for the intermediate pointer which we then transfer onto the OBJ */
14666                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14667                 if (tiVerificationNeeded)
14668                 {
14669                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14670                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14671
14672                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14673                     Verify(tiRetVal.IsValueClass(), "not value class");
14674                     tiRetVal.MakeByRef();
14675
14676                     // We always come from an objref, so this is safe byref
14677                     tiRetVal.SetIsPermanentHomeByRef();
14678                     tiRetVal.SetIsReadonlyByRef();
14679                 }
14680
14681                 op1 = impPopStack().val;
14682                 assertImp(op1->gtType == TYP_REF);
14683
14684                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14685                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14686
14687                 // Check legality and profitability of inline expansion for unboxing.
14688                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14689                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14690
14691                 if (canExpandInline && shouldExpandInline)
14692                 {
14693                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14694                     // we are doing normal unboxing
14695                     // inline the common case of the unbox helper
14696                     // UNBOX(exp) morphs into
14697                     // clone = pop(exp);
14698                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14699                     // push(clone + TARGET_POINTER_SIZE)
14700                     //
14701                     GenTreePtr cloneOperand;
14702                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14703                                        nullptr DEBUGARG("inline UNBOX clone1"));
14704                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14705
14706                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14707
14708                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14709                                        nullptr DEBUGARG("inline UNBOX clone2"));
14710                     op2 = impTokenToHandle(&resolvedToken);
14711                     if (op2 == nullptr)
14712                     { // compDonotInline()
14713                         return;
14714                     }
14715                     args = gtNewArgList(op2, op1);
14716                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14717
14718                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14719                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14720                     condBox->gtFlags |= GTF_RELOP_QMARK;
14721
14722                     // QMARK nodes cannot reside on the evaluation stack. Because there
14723                     // may be other trees on the evaluation stack that side-effect the
14724                     // sources of the UNBOX operation we must spill the stack.
14725
14726                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14727
14728                     // Create the address-expression to reference past the object header
14729                     // to the beginning of the value-type. Today this means adjusting
14730                     // past the base of the objects vtable field which is pointer sized.
14731
14732                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14733                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14734                 }
14735                 else
14736                 {
14737                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14738                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14739
14740                     // Don't optimize, just call the helper and be done with it
14741                     args = gtNewArgList(op2, op1);
14742                     op1 =
14743                         gtNewHelperCallNode(helper,
14744                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14745                 }
14746
14747                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14748                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14749                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14750                        );
14751
14752                 /*
14753                   ----------------------------------------------------------------------
14754                   | \ helper  |                         |                              |
14755                   |   \       |                         |                              |
14756                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14757                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14758                   | opcode  \ |                         |                              |
14759                   |---------------------------------------------------------------------
14760                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14761                   |           |                         | push the BYREF to this local |
14762                   |---------------------------------------------------------------------
14763                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14764                   |           | the BYREF               | For Linux when the           |
14765                   |           |                         |  struct is returned in two   |
14766                   |           |                         |  registers create a temp     |
14767                   |           |                         |  which address is passed to  |
14768                   |           |                         |  the unbox_nullable helper.  |
14769                   |---------------------------------------------------------------------
14770                 */
14771
14772                 if (opcode == CEE_UNBOX)
14773                 {
14774                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14775                     {
14776                         // Unbox nullable helper returns a struct type.
14777                         // We need to spill it to a temp so than can take the address of it.
14778                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14779                         // further along and potetially be exploitable.
14780
14781                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14782                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14783
14784                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14785                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14786                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14787
14788                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14789                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14790                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14791                     }
14792
14793                     assert(op1->gtType == TYP_BYREF);
14794                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14795                 }
14796                 else
14797                 {
14798                     assert(opcode == CEE_UNBOX_ANY);
14799
14800                     if (helper == CORINFO_HELP_UNBOX)
14801                     {
14802                         // Normal unbox helper returns a TYP_BYREF.
14803                         impPushOnStack(op1, tiRetVal);
14804                         oper = GT_OBJ;
14805                         goto OBJ;
14806                     }
14807
14808                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14809
14810 #if FEATURE_MULTIREG_RET
14811
14812                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14813                     {
14814                         // Unbox nullable helper returns a TYP_STRUCT.
14815                         // For the multi-reg case we need to spill it to a temp so that
14816                         // we can pass the address to the unbox_nullable jit helper.
14817
14818                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14819                         lvaTable[tmp].lvIsMultiRegArg = true;
14820                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14821
14822                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14823                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14824                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14825
14826                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14827                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14828                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14829
14830                         // In this case the return value of the unbox helper is TYP_BYREF.
14831                         // Make sure the right type is placed on the operand type stack.
14832                         impPushOnStack(op1, tiRetVal);
14833
14834                         // Load the struct.
14835                         oper = GT_OBJ;
14836
14837                         assert(op1->gtType == TYP_BYREF);
14838                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14839
14840                         goto OBJ;
14841                     }
14842                     else
14843
14844 #endif // !FEATURE_MULTIREG_RET
14845
14846                     {
14847                         // If non register passable struct we have it materialized in the RetBuf.
14848                         assert(op1->gtType == TYP_STRUCT);
14849                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14850                         assert(tiRetVal.IsValueClass());
14851                     }
14852                 }
14853
14854                 impPushOnStack(op1, tiRetVal);
14855             }
14856             break;
14857
14858             case CEE_BOX:
14859             {
14860                 /* Get the Class index */
14861                 assertImp(sz == sizeof(unsigned));
14862
14863                 _impResolveToken(CORINFO_TOKENKIND_Box);
14864
14865                 JITDUMP(" %08X", resolvedToken.token);
14866
14867                 if (tiVerificationNeeded)
14868                 {
14869                     typeInfo tiActual = impStackTop().seTypeInfo;
14870                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14871
14872                     Verify(verIsBoxable(tiBox), "boxable type expected");
14873
14874                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14875                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14876                            "boxed type has unsatisfied class constraints");
14877
14878                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14879
14880                     // Observation: the following code introduces a boxed value class on the stack, but,
14881                     // according to the ECMA spec, one would simply expect: tiRetVal =
14882                     // typeInfo(TI_REF,impGetObjectClass());
14883
14884                     // Push the result back on the stack,
14885                     // even if clsHnd is a value class we want the TI_REF
14886                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14887                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14888                 }
14889
14890                 accessAllowedResult =
14891                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14892                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14893
14894                 // Note BOX can be used on things that are not value classes, in which
14895                 // case we get a NOP.  However the verifier's view of the type on the
14896                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14897                 if (!eeIsValueClass(resolvedToken.hClass))
14898                 {
14899                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14900                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14901                     break;
14902                 }
14903
14904                 // Look ahead for unbox.any
14905                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14906                 {
14907                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14908
14909                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14910
14911                     // See if the resolved tokens describe types that are equal.
14912                     const TypeCompareState compare =
14913                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14914
14915                     // If so, box/unbox.any is a nop.
14916                     if (compare == TypeCompareState::Must)
14917                     {
14918                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14919                         // Skip the next unbox.any instruction
14920                         sz += sizeof(mdToken) + 1;
14921                         break;
14922                     }
14923                 }
14924
14925                 impImportAndPushBox(&resolvedToken);
14926                 if (compDonotInline())
14927                 {
14928                     return;
14929                 }
14930             }
14931             break;
14932
14933             case CEE_SIZEOF:
14934
14935                 /* Get the Class index */
14936                 assertImp(sz == sizeof(unsigned));
14937
14938                 _impResolveToken(CORINFO_TOKENKIND_Class);
14939
14940                 JITDUMP(" %08X", resolvedToken.token);
14941
14942                 if (tiVerificationNeeded)
14943                 {
14944                     tiRetVal = typeInfo(TI_INT);
14945                 }
14946
14947                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14948                 impPushOnStack(op1, tiRetVal);
14949                 break;
14950
14951             case CEE_CASTCLASS:
14952
14953                 /* Get the Class index */
14954
14955                 assertImp(sz == sizeof(unsigned));
14956
14957                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14958
14959                 JITDUMP(" %08X", resolvedToken.token);
14960
14961                 if (!opts.IsReadyToRun())
14962                 {
14963                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14964                     if (op2 == nullptr)
14965                     { // compDonotInline()
14966                         return;
14967                     }
14968                 }
14969
14970                 if (tiVerificationNeeded)
14971                 {
14972                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14973                     // box it
14974                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14975                 }
14976
14977                 accessAllowedResult =
14978                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14979                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14980
14981                 op1 = impPopStack().val;
14982
14983             /* Pop the address and create the 'checked cast' helper call */
14984
14985             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14986             // and op2 to contain code that creates the type handle corresponding to typeRef
14987             CASTCLASS:
14988             {
14989                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
14990
14991                 if (optTree != nullptr)
14992                 {
14993                     impPushOnStack(optTree, tiRetVal);
14994                 }
14995                 else
14996                 {
14997
14998 #ifdef FEATURE_READYTORUN_COMPILER
14999                     if (opts.IsReadyToRun())
15000                     {
15001                         GenTreeCall* opLookup =
15002                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15003                                                       gtNewArgList(op1));
15004                         usingReadyToRunHelper = (opLookup != nullptr);
15005                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15006
15007                         if (!usingReadyToRunHelper)
15008                         {
15009                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15010                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15011                             //      1) Load the context
15012                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15013                             //      stub
15014                             //      3) Check the object on the stack for the type-cast
15015                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15016
15017                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15018                             if (op2 == nullptr)
15019                             { // compDonotInline()
15020                                 return;
15021                             }
15022                         }
15023                     }
15024
15025                     if (!usingReadyToRunHelper)
15026 #endif
15027                     {
15028                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15029                     }
15030                     if (compDonotInline())
15031                     {
15032                         return;
15033                     }
15034
15035                     /* Push the result back on the stack */
15036                     impPushOnStack(op1, tiRetVal);
15037                 }
15038             }
15039             break;
15040
15041             case CEE_THROW:
15042
15043                 if (compIsForInlining())
15044                 {
15045                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15046                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15047                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15048
15049                     /* Do we have just the exception on the stack ?*/
15050
15051                     if (verCurrentState.esStackDepth != 1)
15052                     {
15053                         /* if not, just don't inline the method */
15054
15055                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15056                         return;
15057                     }
15058                 }
15059
15060                 if (tiVerificationNeeded)
15061                 {
15062                     tiRetVal = impStackTop().seTypeInfo;
15063                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15064                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15065                     {
15066                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15067                     }
15068                 }
15069
15070                 block->bbSetRunRarely(); // any block with a throw is rare
15071                 /* Pop the exception object and create the 'throw' helper call */
15072
15073                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15074
15075             EVAL_APPEND:
15076                 if (verCurrentState.esStackDepth > 0)
15077                 {
15078                     impEvalSideEffects();
15079                 }
15080
15081                 assert(verCurrentState.esStackDepth == 0);
15082
15083                 goto APPEND;
15084
15085             case CEE_RETHROW:
15086
15087                 assert(!compIsForInlining());
15088
15089                 if (info.compXcptnsCount == 0)
15090                 {
15091                     BADCODE("rethrow outside catch");
15092                 }
15093
15094                 if (tiVerificationNeeded)
15095                 {
15096                     Verify(block->hasHndIndex(), "rethrow outside catch");
15097                     if (block->hasHndIndex())
15098                     {
15099                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15100                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15101                         if (HBtab->HasFilter())
15102                         {
15103                             // we better be in the handler clause part, not the filter part
15104                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15105                                    "rethrow in filter");
15106                         }
15107                     }
15108                 }
15109
15110                 /* Create the 'rethrow' helper call */
15111
15112                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15113
15114                 goto EVAL_APPEND;
15115
15116             case CEE_INITOBJ:
15117
15118                 assertImp(sz == sizeof(unsigned));
15119
15120                 _impResolveToken(CORINFO_TOKENKIND_Class);
15121
15122                 JITDUMP(" %08X", resolvedToken.token);
15123
15124                 if (tiVerificationNeeded)
15125                 {
15126                     typeInfo tiTo    = impStackTop().seTypeInfo;
15127                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15128
15129                     Verify(tiTo.IsByRef(), "byref expected");
15130                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15131
15132                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15133                            "type operand incompatible with type of address");
15134                 }
15135
15136                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15137                 op2  = gtNewIconNode(0);                                     // Value
15138                 op1  = impPopStack().val;                                    // Dest
15139                 op1  = gtNewBlockVal(op1, size);
15140                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15141                 goto SPILL_APPEND;
15142
15143             case CEE_INITBLK:
15144
15145                 if (tiVerificationNeeded)
15146                 {
15147                     Verify(false, "bad opcode");
15148                 }
15149
15150                 op3 = impPopStack().val; // Size
15151                 op2 = impPopStack().val; // Value
15152                 op1 = impPopStack().val; // Dest
15153
15154                 if (op3->IsCnsIntOrI())
15155                 {
15156                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15157                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15158                 }
15159                 else
15160                 {
15161                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15162                     size = 0;
15163                 }
15164                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15165
15166                 goto SPILL_APPEND;
15167
15168             case CEE_CPBLK:
15169
15170                 if (tiVerificationNeeded)
15171                 {
15172                     Verify(false, "bad opcode");
15173                 }
15174                 op3 = impPopStack().val; // Size
15175                 op2 = impPopStack().val; // Src
15176                 op1 = impPopStack().val; // Dest
15177
15178                 if (op3->IsCnsIntOrI())
15179                 {
15180                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15181                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15182                 }
15183                 else
15184                 {
15185                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15186                     size = 0;
15187                 }
15188                 if (op2->OperGet() == GT_ADDR)
15189                 {
15190                     op2 = op2->gtOp.gtOp1;
15191                 }
15192                 else
15193                 {
15194                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15195                 }
15196
15197                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15198                 goto SPILL_APPEND;
15199
15200             case CEE_CPOBJ:
15201
15202                 assertImp(sz == sizeof(unsigned));
15203
15204                 _impResolveToken(CORINFO_TOKENKIND_Class);
15205
15206                 JITDUMP(" %08X", resolvedToken.token);
15207
15208                 if (tiVerificationNeeded)
15209                 {
15210                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15211                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15212                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15213
15214                     Verify(tiFrom.IsByRef(), "expected byref source");
15215                     Verify(tiTo.IsByRef(), "expected byref destination");
15216
15217                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15218                            "type of source address incompatible with type operand");
15219                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15220                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15221                            "type operand incompatible with type of destination address");
15222                 }
15223
15224                 if (!eeIsValueClass(resolvedToken.hClass))
15225                 {
15226                     op1 = impPopStack().val; // address to load from
15227
15228                     impBashVarAddrsToI(op1);
15229
15230                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15231
15232                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15233                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15234
15235                     impPushOnStack(op1, typeInfo());
15236                     opcode = CEE_STIND_REF;
15237                     lclTyp = TYP_REF;
15238                     goto STIND_POST_VERIFY;
15239                 }
15240
15241                 op2 = impPopStack().val; // Src
15242                 op1 = impPopStack().val; // Dest
15243                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15244                 goto SPILL_APPEND;
15245
15246             case CEE_STOBJ:
15247             {
15248                 assertImp(sz == sizeof(unsigned));
15249
15250                 _impResolveToken(CORINFO_TOKENKIND_Class);
15251
15252                 JITDUMP(" %08X", resolvedToken.token);
15253
15254                 if (eeIsValueClass(resolvedToken.hClass))
15255                 {
15256                     lclTyp = TYP_STRUCT;
15257                 }
15258                 else
15259                 {
15260                     lclTyp = TYP_REF;
15261                 }
15262
15263                 if (tiVerificationNeeded)
15264                 {
15265
15266                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15267
15268                     // Make sure we have a good looking byref
15269                     Verify(tiPtr.IsByRef(), "pointer not byref");
15270                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15271                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15272                     {
15273                         compUnsafeCastUsed = true;
15274                     }
15275
15276                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15277                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15278
15279                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15280                     {
15281                         Verify(false, "type of value incompatible with type operand");
15282                         compUnsafeCastUsed = true;
15283                     }
15284
15285                     if (!tiCompatibleWith(argVal, ptrVal, false))
15286                     {
15287                         Verify(false, "type operand incompatible with type of address");
15288                         compUnsafeCastUsed = true;
15289                     }
15290                 }
15291                 else
15292                 {
15293                     compUnsafeCastUsed = true;
15294                 }
15295
15296                 if (lclTyp == TYP_REF)
15297                 {
15298                     opcode = CEE_STIND_REF;
15299                     goto STIND_POST_VERIFY;
15300                 }
15301
15302                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15303                 if (impIsPrimitive(jitTyp))
15304                 {
15305                     lclTyp = JITtype2varType(jitTyp);
15306                     goto STIND_POST_VERIFY;
15307                 }
15308
15309                 op2 = impPopStack().val; // Value
15310                 op1 = impPopStack().val; // Ptr
15311
15312                 assertImp(varTypeIsStruct(op2));
15313
15314                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15315
15316                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15317                 {
15318                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15319                 }
15320                 goto SPILL_APPEND;
15321             }
15322
15323             case CEE_MKREFANY:
15324
15325                 assert(!compIsForInlining());
15326
15327                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15328                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15329
15330                 JITDUMP("disabling struct promotion because of mkrefany\n");
15331                 fgNoStructPromotion = true;
15332
15333                 oper = GT_MKREFANY;
15334                 assertImp(sz == sizeof(unsigned));
15335
15336                 _impResolveToken(CORINFO_TOKENKIND_Class);
15337
15338                 JITDUMP(" %08X", resolvedToken.token);
15339
15340                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15341                 if (op2 == nullptr)
15342                 { // compDonotInline()
15343                     return;
15344                 }
15345
15346                 if (tiVerificationNeeded)
15347                 {
15348                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15349                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15350
15351                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15352                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15353                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15354                 }
15355
15356                 accessAllowedResult =
15357                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15358                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15359
15360                 op1 = impPopStack().val;
15361
15362                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15363                 // But JIT32 allowed it, so we continue to allow it.
15364                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15365
15366                 // MKREFANY returns a struct.  op2 is the class token.
15367                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15368
15369                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15370                 break;
15371
15372             case CEE_LDOBJ:
15373             {
15374                 oper = GT_OBJ;
15375                 assertImp(sz == sizeof(unsigned));
15376
15377                 _impResolveToken(CORINFO_TOKENKIND_Class);
15378
15379                 JITDUMP(" %08X", resolvedToken.token);
15380
15381             OBJ:
15382
15383                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15384
15385                 if (tiVerificationNeeded)
15386                 {
15387                     typeInfo tiPtr = impStackTop().seTypeInfo;
15388
15389                     // Make sure we have a byref
15390                     if (!tiPtr.IsByRef())
15391                     {
15392                         Verify(false, "pointer not byref");
15393                         compUnsafeCastUsed = true;
15394                     }
15395                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15396
15397                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15398                     {
15399                         Verify(false, "type of address incompatible with type operand");
15400                         compUnsafeCastUsed = true;
15401                     }
15402                     tiRetVal.NormaliseForStack();
15403                 }
15404                 else
15405                 {
15406                     compUnsafeCastUsed = true;
15407                 }
15408
15409                 if (eeIsValueClass(resolvedToken.hClass))
15410                 {
15411                     lclTyp = TYP_STRUCT;
15412                 }
15413                 else
15414                 {
15415                     lclTyp = TYP_REF;
15416                     opcode = CEE_LDIND_REF;
15417                     goto LDIND_POST_VERIFY;
15418                 }
15419
15420                 op1 = impPopStack().val;
15421
15422                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15423
15424                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15425                 if (impIsPrimitive(jitTyp))
15426                 {
15427                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15428
15429                     // Could point anywhere, example a boxed class static int
15430                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15431                     assertImp(varTypeIsArithmetic(op1->gtType));
15432                 }
15433                 else
15434                 {
15435                     // OBJ returns a struct
15436                     // and an inline argument which is the class token of the loaded obj
15437                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15438                 }
15439                 op1->gtFlags |= GTF_EXCEPT;
15440
15441                 if (prefixFlags & PREFIX_UNALIGNED)
15442                 {
15443                     op1->gtFlags |= GTF_IND_UNALIGNED;
15444                 }
15445
15446                 impPushOnStack(op1, tiRetVal);
15447                 break;
15448             }
15449
15450             case CEE_LDLEN:
15451                 if (tiVerificationNeeded)
15452                 {
15453                     typeInfo tiArray = impStackTop().seTypeInfo;
15454                     Verify(verIsSDArray(tiArray), "bad array");
15455                     tiRetVal = typeInfo(TI_INT);
15456                 }
15457
15458                 op1 = impPopStack().val;
15459                 if (!opts.MinOpts() && !opts.compDbgCode)
15460                 {
15461                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15462                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15463
15464                     /* Mark the block as containing a length expression */
15465
15466                     if (op1->gtOper == GT_LCL_VAR)
15467                     {
15468                         block->bbFlags |= BBF_HAS_IDX_LEN;
15469                     }
15470
15471                     op1 = arrLen;
15472                 }
15473                 else
15474                 {
15475                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15476                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15477                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15478                     op1 = gtNewIndir(TYP_INT, op1);
15479                     op1->gtFlags |= GTF_IND_ARR_LEN;
15480                 }
15481
15482                 /* Push the result back on the stack */
15483                 impPushOnStack(op1, tiRetVal);
15484                 break;
15485
15486             case CEE_BREAK:
15487                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15488                 goto SPILL_APPEND;
15489
15490             case CEE_NOP:
15491                 if (opts.compDbgCode)
15492                 {
15493                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15494                     goto SPILL_APPEND;
15495                 }
15496                 break;
15497
15498             /******************************** NYI *******************************/
15499
15500             case 0xCC:
15501                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15502
15503             case CEE_ILLEGAL:
15504             case CEE_MACRO_END:
15505
15506             default:
15507                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15508         }
15509
15510         codeAddr += sz;
15511         prevOpcode = opcode;
15512
15513         prefixFlags = 0;
15514     }
15515
15516     return;
15517 #undef _impResolveToken
15518 }
15519 #ifdef _PREFAST_
15520 #pragma warning(pop)
15521 #endif
15522
15523 // Push a local/argument treeon the operand stack
15524 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15525 {
15526     tiRetVal.NormaliseForStack();
15527
15528     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15529     {
15530         tiRetVal.SetUninitialisedObjRef();
15531     }
15532
15533     impPushOnStack(op, tiRetVal);
15534 }
15535
15536 // Load a local/argument on the operand stack
15537 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15538 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15539 {
15540     var_types lclTyp;
15541
15542     if (lvaTable[lclNum].lvNormalizeOnLoad())
15543     {
15544         lclTyp = lvaGetRealType(lclNum);
15545     }
15546     else
15547     {
15548         lclTyp = lvaGetActualType(lclNum);
15549     }
15550
15551     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15552 }
15553
15554 // Load an argument on the operand stack
15555 // Shared by the various CEE_LDARG opcodes
15556 // ilArgNum is the argument index as specified in IL.
15557 // It will be mapped to the correct lvaTable index
15558 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15559 {
15560     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15561
15562     if (compIsForInlining())
15563     {
15564         if (ilArgNum >= info.compArgsCount)
15565         {
15566             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15567             return;
15568         }
15569
15570         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15571                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15572     }
15573     else
15574     {
15575         if (ilArgNum >= info.compArgsCount)
15576         {
15577             BADCODE("Bad IL");
15578         }
15579
15580         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15581
15582         if (lclNum == info.compThisArg)
15583         {
15584             lclNum = lvaArg0Var;
15585         }
15586
15587         impLoadVar(lclNum, offset);
15588     }
15589 }
15590
15591 // Load a local on the operand stack
15592 // Shared by the various CEE_LDLOC opcodes
15593 // ilLclNum is the local index as specified in IL.
15594 // It will be mapped to the correct lvaTable index
15595 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15596 {
15597     if (tiVerificationNeeded)
15598     {
15599         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15600         Verify(info.compInitMem, "initLocals not set");
15601     }
15602
15603     if (compIsForInlining())
15604     {
15605         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15606         {
15607             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15608             return;
15609         }
15610
15611         // Get the local type
15612         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15613
15614         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15615
15616         /* Have we allocated a temp for this local? */
15617
15618         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15619
15620         // All vars of inlined methods should be !lvNormalizeOnLoad()
15621
15622         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15623         lclTyp = genActualType(lclTyp);
15624
15625         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15626     }
15627     else
15628     {
15629         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15630         {
15631             BADCODE("Bad IL");
15632         }
15633
15634         unsigned lclNum = info.compArgsCount + ilLclNum;
15635
15636         impLoadVar(lclNum, offset);
15637     }
15638 }
15639
15640 #ifdef _TARGET_ARM_
15641 /**************************************************************************************
15642  *
15643  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15644  *  dst struct, because struct promotion will turn it into a float/double variable while
15645  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15646  *  a float, but there is nothing that might prevent us from doing so. The tree however
15647  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15648  *
15649  *  tmpNum - the lcl dst variable num that is a struct.
15650  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15651  *  hClass - the type handle for the struct variable.
15652  *
15653  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15654  *        however, we could do a codegen of transferring from int to float registers
15655  *        (transfer, not a cast.)
15656  *
15657  */
15658 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15659 {
15660     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15661     {
15662         int       hfaSlots = GetHfaCount(hClass);
15663         var_types hfaType  = GetHfaType(hClass);
15664
15665         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15666         // type: struct/float at importer because the ABI calls out return in integer registers.
15667         // We don't want struct promotion to replace an expression like this:
15668         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15669         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15670         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15671             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15672         {
15673             // Make sure this struct type stays as struct so we can receive the call in a struct.
15674             lvaTable[tmpNum].lvIsMultiRegRet = true;
15675         }
15676     }
15677 }
15678 #endif // _TARGET_ARM_
15679
15680 #if FEATURE_MULTIREG_RET
15681 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15682 {
15683     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15684     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15685     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15686
15687     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15688     ret->gtFlags |= GTF_DONT_CSE;
15689
15690     assert(IsMultiRegReturnedType(hClass));
15691
15692     // Mark the var so that fields are not promoted and stay together.
15693     lvaTable[tmpNum].lvIsMultiRegRet = true;
15694
15695     return ret;
15696 }
15697 #endif // FEATURE_MULTIREG_RET
15698
15699 // do import for a return
15700 // returns false if inlining was aborted
15701 // opcode can be ret or call in the case of a tail.call
15702 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15703 {
15704     if (tiVerificationNeeded)
15705     {
15706         verVerifyThisPtrInitialised();
15707
15708         unsigned expectedStack = 0;
15709         if (info.compRetType != TYP_VOID)
15710         {
15711             typeInfo tiVal = impStackTop().seTypeInfo;
15712             typeInfo tiDeclared =
15713                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15714
15715             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15716
15717             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15718             expectedStack = 1;
15719         }
15720         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15721     }
15722
15723 #ifdef DEBUG
15724     // If we are importing an inlinee and have GC ref locals we always
15725     // need to have a spill temp for the return value.  This temp
15726     // should have been set up in advance, over in fgFindBasicBlocks.
15727     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15728     {
15729         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15730     }
15731 #endif // DEBUG
15732
15733     GenTree*             op2       = nullptr;
15734     GenTree*             op1       = nullptr;
15735     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15736
15737     if (info.compRetType != TYP_VOID)
15738     {
15739         StackEntry se = impPopStack();
15740         retClsHnd     = se.seTypeInfo.GetClassHandle();
15741         op2           = se.val;
15742
15743         if (!compIsForInlining())
15744         {
15745             impBashVarAddrsToI(op2);
15746             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15747             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15748             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15749                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15750                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15751                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15752                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15753
15754 #ifdef DEBUG
15755             if (opts.compGcChecks && info.compRetType == TYP_REF)
15756             {
15757                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15758                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15759                 // one-return BB.
15760
15761                 assert(op2->gtType == TYP_REF);
15762
15763                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15764                 GenTreeArgList* args = gtNewArgList(op2);
15765                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15766
15767                 if (verbose)
15768                 {
15769                     printf("\ncompGcChecks tree:\n");
15770                     gtDispTree(op2);
15771                 }
15772             }
15773 #endif
15774         }
15775         else
15776         {
15777             // inlinee's stack should be empty now.
15778             assert(verCurrentState.esStackDepth == 0);
15779
15780 #ifdef DEBUG
15781             if (verbose)
15782             {
15783                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15784                 gtDispTree(op2);
15785             }
15786 #endif
15787
15788             // Make sure the type matches the original call.
15789
15790             var_types returnType       = genActualType(op2->gtType);
15791             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15792             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15793             {
15794                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15795             }
15796
15797             if (returnType != originalCallType)
15798             {
15799                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15800                 return false;
15801             }
15802
15803             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15804             // expression. At this point, retExpr could already be set if there are multiple
15805             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15806             // the other blocks already set it. If there is only a single return block,
15807             // retExpr shouldn't be set. However, this is not true if we reimport a block
15808             // with a return. In that case, retExpr will be set, then the block will be
15809             // reimported, but retExpr won't get cleared as part of setting the block to
15810             // be reimported. The reimported retExpr value should be the same, so even if
15811             // we don't unconditionally overwrite it, it shouldn't matter.
15812             if (info.compRetNativeType != TYP_STRUCT)
15813             {
15814                 // compRetNativeType is not TYP_STRUCT.
15815                 // This implies it could be either a scalar type or SIMD vector type or
15816                 // a struct type that can be normalized to a scalar type.
15817
15818                 if (varTypeIsStruct(info.compRetType))
15819                 {
15820                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15821                     // adjust the type away from struct to integral
15822                     // and no normalizing
15823                     op2 = impFixupStructReturnType(op2, retClsHnd);
15824                 }
15825                 else
15826                 {
15827                     // Do we have to normalize?
15828                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15829                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15830                         fgCastNeeded(op2, fncRealRetType))
15831                     {
15832                         // Small-typed return values are normalized by the callee
15833                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15834                     }
15835                 }
15836
15837                 if (fgNeedReturnSpillTemp())
15838                 {
15839                     assert(info.compRetNativeType != TYP_VOID &&
15840                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15841
15842                     // If this method returns a ref type, track the actual types seen
15843                     // in the returns.
15844                     if (info.compRetType == TYP_REF)
15845                     {
15846                         bool                 isExact      = false;
15847                         bool                 isNonNull    = false;
15848                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15849
15850                         if (impInlineInfo->retExpr == nullptr)
15851                         {
15852                             // This is the first return, so best known type is the type
15853                             // of this return value.
15854                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15855                             impInlineInfo->retExprClassHndIsExact = isExact;
15856                         }
15857                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15858                         {
15859                             // This return site type differs from earlier seen sites,
15860                             // so reset the info and we'll fall back to using the method's
15861                             // declared return type for the return spill temp.
15862                             impInlineInfo->retExprClassHnd        = nullptr;
15863                             impInlineInfo->retExprClassHndIsExact = false;
15864                         }
15865                     }
15866
15867                     // This is a bit of a workaround...
15868                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15869                     // not a struct (for example, the struct is composed of exactly one int, and the native
15870                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15871                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15872                     // to the *native* return type), and at least one of the return blocks is the result of
15873                     // a call, then we have a problem. The situation is like this (from a failed test case):
15874                     //
15875                     // inliner:
15876                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15877                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15878                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15879                     //
15880                     // inlinee:
15881                     //      ...
15882                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15883                     //      ret
15884                     //      ...
15885                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15886                     //      object&, class System.Func`1<!!0>)
15887                     //      ret
15888                     //
15889                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15890                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15891                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15892                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15893                     //
15894                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15895                     // native return type, which is what it will be set to eventually. We generate the
15896                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15897                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15898
15899                     bool restoreType = false;
15900                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15901                     {
15902                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15903                         op2->gtType = info.compRetNativeType;
15904                         restoreType = true;
15905                     }
15906
15907                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15908                                      (unsigned)CHECK_SPILL_ALL);
15909
15910                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15911
15912                     if (restoreType)
15913                     {
15914                         op2->gtType = TYP_STRUCT; // restore it to what it was
15915                     }
15916
15917                     op2 = tmpOp2;
15918
15919 #ifdef DEBUG
15920                     if (impInlineInfo->retExpr)
15921                     {
15922                         // Some other block(s) have seen the CEE_RET first.
15923                         // Better they spilled to the same temp.
15924                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15925                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15926                     }
15927 #endif
15928                 }
15929
15930 #ifdef DEBUG
15931                 if (verbose)
15932                 {
15933                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15934                     gtDispTree(op2);
15935                 }
15936 #endif
15937
15938                 // Report the return expression
15939                 impInlineInfo->retExpr = op2;
15940             }
15941             else
15942             {
15943                 // compRetNativeType is TYP_STRUCT.
15944                 // This implies that struct return via RetBuf arg or multi-reg struct return
15945
15946                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15947
15948                 // Assign the inlinee return into a spill temp.
15949                 // spill temp only exists if there are multiple return points
15950                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15951                 {
15952                     // in this case we have to insert multiple struct copies to the temp
15953                     // and the retexpr is just the temp.
15954                     assert(info.compRetNativeType != TYP_VOID);
15955                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15956
15957                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15958                                      (unsigned)CHECK_SPILL_ALL);
15959                 }
15960
15961 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15962 #if defined(_TARGET_ARM_)
15963                 // TODO-ARM64-NYI: HFA
15964                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15965                 // next ifdefs could be refactored in a single method with the ifdef inside.
15966                 if (IsHfa(retClsHnd))
15967                 {
15968 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15969 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15970                 ReturnTypeDesc retTypeDesc;
15971                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15972                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15973
15974                 if (retRegCount != 0)
15975                 {
15976                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15977                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15978                     // max allowed.)
15979                     assert(retRegCount == MAX_RET_REG_COUNT);
15980                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15981                     CLANG_FORMAT_COMMENT_ANCHOR;
15982 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15983
15984                     if (fgNeedReturnSpillTemp())
15985                     {
15986                         if (!impInlineInfo->retExpr)
15987                         {
15988 #if defined(_TARGET_ARM_)
15989                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15990 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15991                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15992                             impInlineInfo->retExpr =
15993                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15994 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15995                         }
15996                     }
15997                     else
15998                     {
15999                         impInlineInfo->retExpr = op2;
16000                     }
16001                 }
16002                 else
16003 #elif defined(_TARGET_ARM64_)
16004                 ReturnTypeDesc retTypeDesc;
16005                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16006                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16007
16008                 if (retRegCount != 0)
16009                 {
16010                     assert(!iciCall->HasRetBufArg());
16011                     assert(retRegCount >= 2);
16012                     if (fgNeedReturnSpillTemp())
16013                     {
16014                         if (!impInlineInfo->retExpr)
16015                         {
16016                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16017                             impInlineInfo->retExpr =
16018                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16019                         }
16020                     }
16021                     else
16022                     {
16023                         impInlineInfo->retExpr = op2;
16024                     }
16025                 }
16026                 else
16027 #endif // defined(_TARGET_ARM64_)
16028                 {
16029                     assert(iciCall->HasRetBufArg());
16030                     GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16031                     // spill temp only exists if there are multiple return points
16032                     if (fgNeedReturnSpillTemp())
16033                     {
16034                         // if this is the first return we have seen set the retExpr
16035                         if (!impInlineInfo->retExpr)
16036                         {
16037                             impInlineInfo->retExpr =
16038                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16039                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16040                         }
16041                     }
16042                     else
16043                     {
16044                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16045                     }
16046                 }
16047             }
16048         }
16049     }
16050
16051     if (compIsForInlining())
16052     {
16053         return true;
16054     }
16055
16056     if (info.compRetType == TYP_VOID)
16057     {
16058         // return void
16059         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16060     }
16061     else if (info.compRetBuffArg != BAD_VAR_NUM)
16062     {
16063         // Assign value to return buff (first param)
16064         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16065
16066         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16067         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16068
16069         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16070         CLANG_FORMAT_COMMENT_ANCHOR;
16071
16072 #if defined(_TARGET_AMD64_)
16073
16074         // x64 (System V and Win64) calling convention requires to
16075         // return the implicit return buffer explicitly (in RAX).
16076         // Change the return type to be BYREF.
16077         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16078 #else  // !defined(_TARGET_AMD64_)
16079         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16080         // In such case the return value of the function is changed to BYREF.
16081         // If profiler hook is not needed the return type of the function is TYP_VOID.
16082         if (compIsProfilerHookNeeded())
16083         {
16084             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16085         }
16086         else
16087         {
16088             // return void
16089             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16090         }
16091 #endif // !defined(_TARGET_AMD64_)
16092     }
16093     else if (varTypeIsStruct(info.compRetType))
16094     {
16095 #if !FEATURE_MULTIREG_RET
16096         // For both ARM architectures the HFA native types are maintained as structs.
16097         // Also on System V AMD64 the multireg structs returns are also left as structs.
16098         noway_assert(info.compRetNativeType != TYP_STRUCT);
16099 #endif
16100         op2 = impFixupStructReturnType(op2, retClsHnd);
16101         // return op2
16102         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16103     }
16104     else
16105     {
16106         // return op2
16107         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16108     }
16109
16110     // We must have imported a tailcall and jumped to RET
16111     if (prefixFlags & PREFIX_TAILCALL)
16112     {
16113 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16114         // Jit64 compat:
16115         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16116         //      tail.call
16117         //      pop
16118         //      ret
16119         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16120 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16121
16122         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16123
16124         // impImportCall() would have already appended TYP_VOID calls
16125         if (info.compRetType == TYP_VOID)
16126         {
16127             return true;
16128         }
16129     }
16130
16131     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16132 #ifdef DEBUG
16133     // Remember at which BC offset the tree was finished
16134     impNoteLastILoffs();
16135 #endif
16136     return true;
16137 }
16138
16139 /*****************************************************************************
16140  *  Mark the block as unimported.
16141  *  Note that the caller is responsible for calling impImportBlockPending(),
16142  *  with the appropriate stack-state
16143  */
16144
16145 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16146 {
16147 #ifdef DEBUG
16148     if (verbose && (block->bbFlags & BBF_IMPORTED))
16149     {
16150         printf("\nBB%02u will be reimported\n", block->bbNum);
16151     }
16152 #endif
16153
16154     block->bbFlags &= ~BBF_IMPORTED;
16155 }
16156
16157 /*****************************************************************************
16158  *  Mark the successors of the given block as unimported.
16159  *  Note that the caller is responsible for calling impImportBlockPending()
16160  *  for all the successors, with the appropriate stack-state.
16161  */
16162
16163 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16164 {
16165     const unsigned numSuccs = block->NumSucc();
16166     for (unsigned i = 0; i < numSuccs; i++)
16167     {
16168         impReimportMarkBlock(block->GetSucc(i));
16169     }
16170 }
16171
16172 /*****************************************************************************
16173  *
16174  *  Filter wrapper to handle only passed in exception code
16175  *  from it).
16176  */
16177
16178 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16179 {
16180     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16181     {
16182         return EXCEPTION_EXECUTE_HANDLER;
16183     }
16184
16185     return EXCEPTION_CONTINUE_SEARCH;
16186 }
16187
16188 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16189 {
16190     assert(block->hasTryIndex());
16191     assert(!compIsForInlining());
16192
16193     unsigned  tryIndex = block->getTryIndex();
16194     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16195
16196     if (isTryStart)
16197     {
16198         assert(block->bbFlags & BBF_TRY_BEG);
16199
16200         // The Stack must be empty
16201         //
16202         if (block->bbStkDepth != 0)
16203         {
16204             BADCODE("Evaluation stack must be empty on entry into a try block");
16205         }
16206     }
16207
16208     // Save the stack contents, we'll need to restore it later
16209     //
16210     SavedStack blockState;
16211     impSaveStackState(&blockState, false);
16212
16213     while (HBtab != nullptr)
16214     {
16215         if (isTryStart)
16216         {
16217             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16218             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16219             //
16220             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16221             {
16222                 // We  trigger an invalid program exception here unless we have a try/fault region.
16223                 //
16224                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16225                 {
16226                     BADCODE(
16227                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16228                 }
16229                 else
16230                 {
16231                     // Allow a try/fault region to proceed.
16232                     assert(HBtab->HasFaultHandler());
16233                 }
16234             }
16235
16236             /* Recursively process the handler block */
16237             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16238
16239             //  Construct the proper verification stack state
16240             //   either empty or one that contains just
16241             //   the Exception Object that we are dealing with
16242             //
16243             verCurrentState.esStackDepth = 0;
16244
16245             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16246             {
16247                 CORINFO_CLASS_HANDLE clsHnd;
16248
16249                 if (HBtab->HasFilter())
16250                 {
16251                     clsHnd = impGetObjectClass();
16252                 }
16253                 else
16254                 {
16255                     CORINFO_RESOLVED_TOKEN resolvedToken;
16256
16257                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16258                     resolvedToken.tokenScope   = info.compScopeHnd;
16259                     resolvedToken.token        = HBtab->ebdTyp;
16260                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16261                     info.compCompHnd->resolveToken(&resolvedToken);
16262
16263                     clsHnd = resolvedToken.hClass;
16264                 }
16265
16266                 // push catch arg the stack, spill to a temp if necessary
16267                 // Note: can update HBtab->ebdHndBeg!
16268                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16269             }
16270
16271             // Queue up the handler for importing
16272             //
16273             impImportBlockPending(hndBegBB);
16274
16275             if (HBtab->HasFilter())
16276             {
16277                 /* @VERIFICATION : Ideally the end of filter state should get
16278                    propagated to the catch handler, this is an incompleteness,
16279                    but is not a security/compliance issue, since the only
16280                    interesting state is the 'thisInit' state.
16281                    */
16282
16283                 verCurrentState.esStackDepth = 0;
16284
16285                 BasicBlock* filterBB = HBtab->ebdFilter;
16286
16287                 // push catch arg the stack, spill to a temp if necessary
16288                 // Note: can update HBtab->ebdFilter!
16289                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16290                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16291
16292                 impImportBlockPending(filterBB);
16293             }
16294         }
16295         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16296         {
16297             /* Recursively process the handler block */
16298
16299             verCurrentState.esStackDepth = 0;
16300
16301             // Queue up the fault handler for importing
16302             //
16303             impImportBlockPending(HBtab->ebdHndBeg);
16304         }
16305
16306         // Now process our enclosing try index (if any)
16307         //
16308         tryIndex = HBtab->ebdEnclosingTryIndex;
16309         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16310         {
16311             HBtab = nullptr;
16312         }
16313         else
16314         {
16315             HBtab = ehGetDsc(tryIndex);
16316         }
16317     }
16318
16319     // Restore the stack contents
16320     impRestoreStackState(&blockState);
16321 }
16322
16323 //***************************************************************
16324 // Import the instructions for the given basic block.  Perform
16325 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16326 // time, or whose verification pre-state is changed.
16327
16328 #ifdef _PREFAST_
16329 #pragma warning(push)
16330 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16331 #endif
16332 void Compiler::impImportBlock(BasicBlock* block)
16333 {
16334     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16335     // handle them specially. In particular, there is no IL to import for them, but we do need
16336     // to mark them as imported and put their successors on the pending import list.
16337     if (block->bbFlags & BBF_INTERNAL)
16338     {
16339         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16340         block->bbFlags |= BBF_IMPORTED;
16341
16342         const unsigned numSuccs = block->NumSucc();
16343         for (unsigned i = 0; i < numSuccs; i++)
16344         {
16345             impImportBlockPending(block->GetSucc(i));
16346         }
16347
16348         return;
16349     }
16350
16351     bool markImport;
16352
16353     assert(block);
16354
16355     /* Make the block globaly available */
16356
16357     compCurBB = block;
16358
16359 #ifdef DEBUG
16360     /* Initialize the debug variables */
16361     impCurOpcName = "unknown";
16362     impCurOpcOffs = block->bbCodeOffs;
16363 #endif
16364
16365     /* Set the current stack state to the merged result */
16366     verResetCurrentState(block, &verCurrentState);
16367
16368     /* Now walk the code and import the IL into GenTrees */
16369
16370     struct FilterVerificationExceptionsParam
16371     {
16372         Compiler*   pThis;
16373         BasicBlock* block;
16374     };
16375     FilterVerificationExceptionsParam param;
16376
16377     param.pThis = this;
16378     param.block = block;
16379
16380     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16381     {
16382         /* @VERIFICATION : For now, the only state propagation from try
16383            to it's handler is "thisInit" state (stack is empty at start of try).
16384            In general, for state that we track in verification, we need to
16385            model the possibility that an exception might happen at any IL
16386            instruction, so we really need to merge all states that obtain
16387            between IL instructions in a try block into the start states of
16388            all handlers.
16389
16390            However we do not allow the 'this' pointer to be uninitialized when
16391            entering most kinds try regions (only try/fault are allowed to have
16392            an uninitialized this pointer on entry to the try)
16393
16394            Fortunately, the stack is thrown away when an exception
16395            leads to a handler, so we don't have to worry about that.
16396            We DO, however, have to worry about the "thisInit" state.
16397            But only for the try/fault case.
16398
16399            The only allowed transition is from TIS_Uninit to TIS_Init.
16400
16401            So for a try/fault region for the fault handler block
16402            we will merge the start state of the try begin
16403            and the post-state of each block that is part of this try region
16404         */
16405
16406         // merge the start state of the try begin
16407         //
16408         if (pParam->block->bbFlags & BBF_TRY_BEG)
16409         {
16410             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16411         }
16412
16413         pParam->pThis->impImportBlockCode(pParam->block);
16414
16415         // As discussed above:
16416         // merge the post-state of each block that is part of this try region
16417         //
16418         if (pParam->block->hasTryIndex())
16419         {
16420             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16421         }
16422     }
16423     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16424     {
16425         verHandleVerificationFailure(block DEBUGARG(false));
16426     }
16427     PAL_ENDTRY
16428
16429     if (compDonotInline())
16430     {
16431         return;
16432     }
16433
16434     assert(!compDonotInline());
16435
16436     markImport = false;
16437
16438 SPILLSTACK:
16439
16440     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16441     bool        reimportSpillClique = false;
16442     BasicBlock* tgtBlock            = nullptr;
16443
16444     /* If the stack is non-empty, we might have to spill its contents */
16445
16446     if (verCurrentState.esStackDepth != 0)
16447     {
16448         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16449                                   // on the stack, its lifetime is hard to determine, simply
16450                                   // don't reuse such temps.
16451
16452         GenTreePtr addStmt = nullptr;
16453
16454         /* Do the successors of 'block' have any other predecessors ?
16455            We do not want to do some of the optimizations related to multiRef
16456            if we can reimport blocks */
16457
16458         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16459
16460         switch (block->bbJumpKind)
16461         {
16462             case BBJ_COND:
16463
16464                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16465
16466                 assert(impTreeLast);
16467                 assert(impTreeLast->gtOper == GT_STMT);
16468                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16469
16470                 addStmt     = impTreeLast;
16471                 impTreeLast = impTreeLast->gtPrev;
16472
16473                 /* Note if the next block has more than one ancestor */
16474
16475                 multRef |= block->bbNext->bbRefs;
16476
16477                 /* Does the next block have temps assigned? */
16478
16479                 baseTmp  = block->bbNext->bbStkTempsIn;
16480                 tgtBlock = block->bbNext;
16481
16482                 if (baseTmp != NO_BASE_TMP)
16483                 {
16484                     break;
16485                 }
16486
16487                 /* Try the target of the jump then */
16488
16489                 multRef |= block->bbJumpDest->bbRefs;
16490                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16491                 tgtBlock = block->bbJumpDest;
16492                 break;
16493
16494             case BBJ_ALWAYS:
16495                 multRef |= block->bbJumpDest->bbRefs;
16496                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16497                 tgtBlock = block->bbJumpDest;
16498                 break;
16499
16500             case BBJ_NONE:
16501                 multRef |= block->bbNext->bbRefs;
16502                 baseTmp  = block->bbNext->bbStkTempsIn;
16503                 tgtBlock = block->bbNext;
16504                 break;
16505
16506             case BBJ_SWITCH:
16507
16508                 BasicBlock** jmpTab;
16509                 unsigned     jmpCnt;
16510
16511                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16512
16513                 assert(impTreeLast);
16514                 assert(impTreeLast->gtOper == GT_STMT);
16515                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16516
16517                 addStmt     = impTreeLast;
16518                 impTreeLast = impTreeLast->gtPrev;
16519
16520                 jmpCnt = block->bbJumpSwt->bbsCount;
16521                 jmpTab = block->bbJumpSwt->bbsDstTab;
16522
16523                 do
16524                 {
16525                     tgtBlock = (*jmpTab);
16526
16527                     multRef |= tgtBlock->bbRefs;
16528
16529                     // Thanks to spill cliques, we should have assigned all or none
16530                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16531                     baseTmp = tgtBlock->bbStkTempsIn;
16532                     if (multRef > 1)
16533                     {
16534                         break;
16535                     }
16536                 } while (++jmpTab, --jmpCnt);
16537
16538                 break;
16539
16540             case BBJ_CALLFINALLY:
16541             case BBJ_EHCATCHRET:
16542             case BBJ_RETURN:
16543             case BBJ_EHFINALLYRET:
16544             case BBJ_EHFILTERRET:
16545             case BBJ_THROW:
16546                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16547                 break;
16548
16549             default:
16550                 noway_assert(!"Unexpected bbJumpKind");
16551                 break;
16552         }
16553
16554         assert(multRef >= 1);
16555
16556         /* Do we have a base temp number? */
16557
16558         bool newTemps = (baseTmp == NO_BASE_TMP);
16559
16560         if (newTemps)
16561         {
16562             /* Grab enough temps for the whole stack */
16563             baseTmp = impGetSpillTmpBase(block);
16564         }
16565
16566         /* Spill all stack entries into temps */
16567         unsigned level, tempNum;
16568
16569         JITDUMP("\nSpilling stack entries into temps\n");
16570         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16571         {
16572             GenTreePtr tree = verCurrentState.esStack[level].val;
16573
16574             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16575                the other. This should merge to a byref in unverifiable code.
16576                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16577                successor would be imported assuming there was a TYP_I_IMPL on
16578                the stack. Thus the value would not get GC-tracked. Hence,
16579                change the temp to TYP_BYREF and reimport the successors.
16580                Note: We should only allow this in unverifiable code.
16581             */
16582             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16583             {
16584                 lvaTable[tempNum].lvType = TYP_BYREF;
16585                 impReimportMarkSuccessors(block);
16586                 markImport = true;
16587             }
16588
16589 #ifdef _TARGET_64BIT_
16590             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16591             {
16592                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16593                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16594                 {
16595                     // Merge the current state into the entry state of block;
16596                     // the call to verMergeEntryStates must have changed
16597                     // the entry state of the block by merging the int local var
16598                     // and the native-int stack entry.
16599                     bool changed = false;
16600                     if (verMergeEntryStates(tgtBlock, &changed))
16601                     {
16602                         impRetypeEntryStateTemps(tgtBlock);
16603                         impReimportBlockPending(tgtBlock);
16604                         assert(changed);
16605                     }
16606                     else
16607                     {
16608                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16609                         break;
16610                     }
16611                 }
16612
16613                 // Some other block in the spill clique set this to "int", but now we have "native int".
16614                 // Change the type and go back to re-import any blocks that used the wrong type.
16615                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16616                 reimportSpillClique      = true;
16617             }
16618             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16619             {
16620                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16621                 // Insert a sign-extension to "native int" so we match the clique.
16622                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16623             }
16624
16625             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16626             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16627             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16628             // behavior instead of asserting and then generating bad code (where we save/restore the
16629             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16630             // imported already, we need to change the type of the local and reimport the spill clique.
16631             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16632             // the 'byref' size.
16633             if (!tiVerificationNeeded)
16634             {
16635                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16636                 {
16637                     // Some other block in the spill clique set this to "int", but now we have "byref".
16638                     // Change the type and go back to re-import any blocks that used the wrong type.
16639                     lvaTable[tempNum].lvType = TYP_BYREF;
16640                     reimportSpillClique      = true;
16641                 }
16642                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16643                 {
16644                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16645                     // Insert a sign-extension to "native int" so we match the clique size.
16646                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16647                 }
16648             }
16649 #endif // _TARGET_64BIT_
16650
16651 #if FEATURE_X87_DOUBLES
16652             // X87 stack doesn't differentiate between float/double
16653             // so promoting is no big deal.
16654             // For everybody else keep it as float until we have a collision and then promote
16655             // Just like for x64's TYP_INT<->TYP_I_IMPL
16656
16657             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16658             {
16659                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16660             }
16661
16662 #else // !FEATURE_X87_DOUBLES
16663
16664             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16665             {
16666                 // Some other block in the spill clique set this to "float", but now we have "double".
16667                 // Change the type and go back to re-import any blocks that used the wrong type.
16668                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16669                 reimportSpillClique      = true;
16670             }
16671             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16672             {
16673                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16674                 // Insert a cast to "double" so we match the clique.
16675                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16676             }
16677
16678 #endif // FEATURE_X87_DOUBLES
16679
16680             /* If addStmt has a reference to tempNum (can only happen if we
16681                are spilling to the temps already used by a previous block),
16682                we need to spill addStmt */
16683
16684             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16685             {
16686                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
16687
16688                 if (addTree->gtOper == GT_JTRUE)
16689                 {
16690                     GenTreePtr relOp = addTree->gtOp.gtOp1;
16691                     assert(relOp->OperIsCompare());
16692
16693                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16694
16695                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16696                     {
16697                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16698                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16699                         type              = genActualType(lvaTable[temp].TypeGet());
16700                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16701                     }
16702
16703                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16704                     {
16705                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16706                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16707                         type              = genActualType(lvaTable[temp].TypeGet());
16708                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16709                     }
16710                 }
16711                 else
16712                 {
16713                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16714
16715                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16716                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16717                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16718                 }
16719             }
16720
16721             /* Spill the stack entry, and replace with the temp */
16722
16723             if (!impSpillStackEntry(level, tempNum
16724 #ifdef DEBUG
16725                                     ,
16726                                     true, "Spill Stack Entry"
16727 #endif
16728                                     ))
16729             {
16730                 if (markImport)
16731                 {
16732                     BADCODE("bad stack state");
16733                 }
16734
16735                 // Oops. Something went wrong when spilling. Bad code.
16736                 verHandleVerificationFailure(block DEBUGARG(true));
16737
16738                 goto SPILLSTACK;
16739             }
16740         }
16741
16742         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16743
16744         if (addStmt)
16745         {
16746             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16747         }
16748     }
16749
16750     // Some of the append/spill logic works on compCurBB
16751
16752     assert(compCurBB == block);
16753
16754     /* Save the tree list in the block */
16755     impEndTreeList(block);
16756
16757     // impEndTreeList sets BBF_IMPORTED on the block
16758     // We do *NOT* want to set it later than this because
16759     // impReimportSpillClique might clear it if this block is both a
16760     // predecessor and successor in the current spill clique
16761     assert(block->bbFlags & BBF_IMPORTED);
16762
16763     // If we had a int/native int, or float/double collision, we need to re-import
16764     if (reimportSpillClique)
16765     {
16766         // This will re-import all the successors of block (as well as each of their predecessors)
16767         impReimportSpillClique(block);
16768
16769         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16770         const unsigned numSuccs = block->NumSucc();
16771         for (unsigned i = 0; i < numSuccs; i++)
16772         {
16773             BasicBlock* succ = block->GetSucc(i);
16774             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16775             {
16776                 impImportBlockPending(succ);
16777             }
16778         }
16779     }
16780     else // the normal case
16781     {
16782         // otherwise just import the successors of block
16783
16784         /* Does this block jump to any other blocks? */
16785         const unsigned numSuccs = block->NumSucc();
16786         for (unsigned i = 0; i < numSuccs; i++)
16787         {
16788             impImportBlockPending(block->GetSucc(i));
16789         }
16790     }
16791 }
16792 #ifdef _PREFAST_
16793 #pragma warning(pop)
16794 #endif
16795
16796 /*****************************************************************************/
16797 //
16798 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16799 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16800 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16801 // (its "pre-state").
16802
16803 void Compiler::impImportBlockPending(BasicBlock* block)
16804 {
16805 #ifdef DEBUG
16806     if (verbose)
16807     {
16808         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16809     }
16810 #endif
16811
16812     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16813     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16814     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16815
16816     // If the block has not been imported, add to pending set.
16817     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16818
16819     // Initialize bbEntryState just the first time we try to add this block to the pending list
16820     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16821     // We use NULL to indicate the 'common' state to avoid memory allocation
16822     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16823         (impGetPendingBlockMember(block) == 0))
16824     {
16825         verInitBBEntryState(block, &verCurrentState);
16826         assert(block->bbStkDepth == 0);
16827         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16828         assert(addToPending);
16829         assert(impGetPendingBlockMember(block) == 0);
16830     }
16831     else
16832     {
16833         // The stack should have the same height on entry to the block from all its predecessors.
16834         if (block->bbStkDepth != verCurrentState.esStackDepth)
16835         {
16836 #ifdef DEBUG
16837             char buffer[400];
16838             sprintf_s(buffer, sizeof(buffer),
16839                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16840                       "Previous depth was %d, current depth is %d",
16841                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16842                       verCurrentState.esStackDepth);
16843             buffer[400 - 1] = 0;
16844             NO_WAY(buffer);
16845 #else
16846             NO_WAY("Block entered with different stack depths");
16847 #endif
16848         }
16849
16850         // Additionally, if we need to verify, merge the verification state.
16851         if (tiVerificationNeeded)
16852         {
16853             // Merge the current state into the entry state of block; if this does not change the entry state
16854             // by merging, do not add the block to the pending-list.
16855             bool changed = false;
16856             if (!verMergeEntryStates(block, &changed))
16857             {
16858                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16859                 addToPending = true; // We will pop it off, and check the flag set above.
16860             }
16861             else if (changed)
16862             {
16863                 addToPending = true;
16864
16865                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16866             }
16867         }
16868
16869         if (!addToPending)
16870         {
16871             return;
16872         }
16873
16874         if (block->bbStkDepth > 0)
16875         {
16876             // We need to fix the types of any spill temps that might have changed:
16877             //   int->native int, float->double, int->byref, etc.
16878             impRetypeEntryStateTemps(block);
16879         }
16880
16881         // OK, we must add to the pending list, if it's not already in it.
16882         if (impGetPendingBlockMember(block) != 0)
16883         {
16884             return;
16885         }
16886     }
16887
16888     // Get an entry to add to the pending list
16889
16890     PendingDsc* dsc;
16891
16892     if (impPendingFree)
16893     {
16894         // We can reuse one of the freed up dscs.
16895         dsc            = impPendingFree;
16896         impPendingFree = dsc->pdNext;
16897     }
16898     else
16899     {
16900         // We have to create a new dsc
16901         dsc = new (this, CMK_Unknown) PendingDsc;
16902     }
16903
16904     dsc->pdBB                 = block;
16905     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16906     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16907
16908     // Save the stack trees for later
16909
16910     if (verCurrentState.esStackDepth)
16911     {
16912         impSaveStackState(&dsc->pdSavedStack, false);
16913     }
16914
16915     // Add the entry to the pending list
16916
16917     dsc->pdNext    = impPendingList;
16918     impPendingList = dsc;
16919     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16920
16921     // Various assertions require us to now to consider the block as not imported (at least for
16922     // the final time...)
16923     block->bbFlags &= ~BBF_IMPORTED;
16924
16925 #ifdef DEBUG
16926     if (verbose && 0)
16927     {
16928         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16929     }
16930 #endif
16931 }
16932
16933 /*****************************************************************************/
16934 //
16935 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16936 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16937 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16938
16939 void Compiler::impReimportBlockPending(BasicBlock* block)
16940 {
16941     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16942
16943     assert(block->bbFlags & BBF_IMPORTED);
16944
16945     // OK, we must add to the pending list, if it's not already in it.
16946     if (impGetPendingBlockMember(block) != 0)
16947     {
16948         return;
16949     }
16950
16951     // Get an entry to add to the pending list
16952
16953     PendingDsc* dsc;
16954
16955     if (impPendingFree)
16956     {
16957         // We can reuse one of the freed up dscs.
16958         dsc            = impPendingFree;
16959         impPendingFree = dsc->pdNext;
16960     }
16961     else
16962     {
16963         // We have to create a new dsc
16964         dsc = new (this, CMK_ImpStack) PendingDsc;
16965     }
16966
16967     dsc->pdBB = block;
16968
16969     if (block->bbEntryState)
16970     {
16971         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16972         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16973         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16974     }
16975     else
16976     {
16977         dsc->pdThisPtrInit        = TIS_Bottom;
16978         dsc->pdSavedStack.ssDepth = 0;
16979         dsc->pdSavedStack.ssTrees = nullptr;
16980     }
16981
16982     // Add the entry to the pending list
16983
16984     dsc->pdNext    = impPendingList;
16985     impPendingList = dsc;
16986     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16987
16988     // Various assertions require us to now to consider the block as not imported (at least for
16989     // the final time...)
16990     block->bbFlags &= ~BBF_IMPORTED;
16991
16992 #ifdef DEBUG
16993     if (verbose && 0)
16994     {
16995         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16996     }
16997 #endif
16998 }
16999
17000 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17001 {
17002     if (comp->impBlockListNodeFreeList == nullptr)
17003     {
17004         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17005     }
17006     else
17007     {
17008         BlockListNode* res             = comp->impBlockListNodeFreeList;
17009         comp->impBlockListNodeFreeList = res->m_next;
17010         return res;
17011     }
17012 }
17013
17014 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17015 {
17016     node->m_next             = impBlockListNodeFreeList;
17017     impBlockListNodeFreeList = node;
17018 }
17019
17020 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17021 {
17022     bool toDo = true;
17023
17024     noway_assert(!fgComputePredsDone);
17025     if (!fgCheapPredsValid)
17026     {
17027         fgComputeCheapPreds();
17028     }
17029
17030     BlockListNode* succCliqueToDo = nullptr;
17031     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17032     while (toDo)
17033     {
17034         toDo = false;
17035         // Look at the successors of every member of the predecessor to-do list.
17036         while (predCliqueToDo != nullptr)
17037         {
17038             BlockListNode* node = predCliqueToDo;
17039             predCliqueToDo      = node->m_next;
17040             BasicBlock* blk     = node->m_blk;
17041             FreeBlockListNode(node);
17042
17043             const unsigned numSuccs = blk->NumSucc();
17044             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17045             {
17046                 BasicBlock* succ = blk->GetSucc(succNum);
17047                 // If it's not already in the clique, add it, and also add it
17048                 // as a member of the successor "toDo" set.
17049                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17050                 {
17051                     callback->Visit(SpillCliqueSucc, succ);
17052                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17053                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17054                     toDo           = true;
17055                 }
17056             }
17057         }
17058         // Look at the predecessors of every member of the successor to-do list.
17059         while (succCliqueToDo != nullptr)
17060         {
17061             BlockListNode* node = succCliqueToDo;
17062             succCliqueToDo      = node->m_next;
17063             BasicBlock* blk     = node->m_blk;
17064             FreeBlockListNode(node);
17065
17066             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17067             {
17068                 BasicBlock* predBlock = pred->block;
17069                 // If it's not already in the clique, add it, and also add it
17070                 // as a member of the predecessor "toDo" set.
17071                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17072                 {
17073                     callback->Visit(SpillCliquePred, predBlock);
17074                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17075                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17076                     toDo           = true;
17077                 }
17078             }
17079         }
17080     }
17081
17082     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17083     // miss walking back to include the predecessor we started from.
17084     // This most likely cause: missing or out of date bbPreds
17085     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17086 }
17087
17088 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17089 {
17090     if (predOrSucc == SpillCliqueSucc)
17091     {
17092         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17093         blk->bbStkTempsIn = m_baseTmp;
17094     }
17095     else
17096     {
17097         assert(predOrSucc == SpillCliquePred);
17098         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17099         blk->bbStkTempsOut = m_baseTmp;
17100     }
17101 }
17102
17103 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17104 {
17105     // For Preds we could be a little smarter and just find the existing store
17106     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17107     // just re-import the whole block (just like we do for successors)
17108
17109     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17110     {
17111         // If we haven't imported this block and we're not going to (because it isn't on
17112         // the pending list) then just ignore it for now.
17113
17114         // This block has either never been imported (EntryState == NULL) or it failed
17115         // verification. Neither state requires us to force it to be imported now.
17116         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17117         return;
17118     }
17119
17120     // For successors we have a valid verCurrentState, so just mark them for reimport
17121     // the 'normal' way
17122     // Unlike predecessors, we *DO* need to reimport the current block because the
17123     // initial import had the wrong entry state types.
17124     // Similarly, blocks that are currently on the pending list, still need to call
17125     // impImportBlockPending to fixup their entry state.
17126     if (predOrSucc == SpillCliqueSucc)
17127     {
17128         m_pComp->impReimportMarkBlock(blk);
17129
17130         // Set the current stack state to that of the blk->bbEntryState
17131         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17132         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17133
17134         m_pComp->impImportBlockPending(blk);
17135     }
17136     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17137     {
17138         // As described above, we are only visiting predecessors so they can
17139         // add the appropriate casts, since we have already done that for the current
17140         // block, it does not need to be reimported.
17141         // Nor do we need to reimport blocks that are still pending, but not yet
17142         // imported.
17143         //
17144         // For predecessors, we have no state to seed the EntryState, so we just have
17145         // to assume the existing one is correct.
17146         // If the block is also a successor, it will get the EntryState properly
17147         // updated when it is visited as a successor in the above "if" block.
17148         assert(predOrSucc == SpillCliquePred);
17149         m_pComp->impReimportBlockPending(blk);
17150     }
17151 }
17152
17153 // Re-type the incoming lclVar nodes to match the varDsc.
17154 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17155 {
17156     if (blk->bbEntryState != nullptr)
17157     {
17158         EntryState* es = blk->bbEntryState;
17159         for (unsigned level = 0; level < es->esStackDepth; level++)
17160         {
17161             GenTreePtr tree = es->esStack[level].val;
17162             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17163             {
17164                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17165                 noway_assert(lclNum < lvaCount);
17166                 LclVarDsc* varDsc              = lvaTable + lclNum;
17167                 es->esStack[level].val->gtType = varDsc->TypeGet();
17168             }
17169         }
17170     }
17171 }
17172
17173 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17174 {
17175     if (block->bbStkTempsOut != NO_BASE_TMP)
17176     {
17177         return block->bbStkTempsOut;
17178     }
17179
17180 #ifdef DEBUG
17181     if (verbose)
17182     {
17183         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17184     }
17185 #endif // DEBUG
17186
17187     // Otherwise, choose one, and propagate to all members of the spill clique.
17188     // Grab enough temps for the whole stack.
17189     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17190     SetSpillTempsBase callback(baseTmp);
17191
17192     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17193     // to one spill clique, and similarly can only be the sucessor to one spill clique
17194     impWalkSpillCliqueFromPred(block, &callback);
17195
17196     return baseTmp;
17197 }
17198
17199 void Compiler::impReimportSpillClique(BasicBlock* block)
17200 {
17201 #ifdef DEBUG
17202     if (verbose)
17203     {
17204         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17205     }
17206 #endif // DEBUG
17207
17208     // If we get here, it is because this block is already part of a spill clique
17209     // and one predecessor had an outgoing live stack slot of type int, and this
17210     // block has an outgoing live stack slot of type native int.
17211     // We need to reset these before traversal because they have already been set
17212     // by the previous walk to determine all the members of the spill clique.
17213     impInlineRoot()->impSpillCliquePredMembers.Reset();
17214     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17215
17216     ReimportSpillClique callback(this);
17217
17218     impWalkSpillCliqueFromPred(block, &callback);
17219 }
17220
17221 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17222 // a copy of "srcState", cloning tree pointers as required.
17223 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17224 {
17225     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17226     {
17227         block->bbEntryState = nullptr;
17228         return;
17229     }
17230
17231     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17232
17233     // block->bbEntryState.esRefcount = 1;
17234
17235     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17236     block->bbEntryState->thisInitialized = TIS_Bottom;
17237
17238     if (srcState->esStackDepth > 0)
17239     {
17240         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17241         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17242
17243         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17244         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17245         {
17246             GenTreePtr tree                         = srcState->esStack[level].val;
17247             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17248         }
17249     }
17250
17251     if (verTrackObjCtorInitState)
17252     {
17253         verSetThisInit(block, srcState->thisInitialized);
17254     }
17255
17256     return;
17257 }
17258
17259 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17260 {
17261     assert(tis != TIS_Bottom); // Precondition.
17262     if (block->bbEntryState == nullptr)
17263     {
17264         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17265     }
17266
17267     block->bbEntryState->thisInitialized = tis;
17268 }
17269
17270 /*
17271  * Resets the current state to the state at the start of the basic block
17272  */
17273 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17274 {
17275
17276     if (block->bbEntryState == nullptr)
17277     {
17278         destState->esStackDepth    = 0;
17279         destState->thisInitialized = TIS_Bottom;
17280         return;
17281     }
17282
17283     destState->esStackDepth = block->bbEntryState->esStackDepth;
17284
17285     if (destState->esStackDepth > 0)
17286     {
17287         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17288
17289         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17290     }
17291
17292     destState->thisInitialized = block->bbThisOnEntry();
17293
17294     return;
17295 }
17296
17297 ThisInitState BasicBlock::bbThisOnEntry()
17298 {
17299     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17300 }
17301
17302 unsigned BasicBlock::bbStackDepthOnEntry()
17303 {
17304     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17305 }
17306
17307 void BasicBlock::bbSetStack(void* stackBuffer)
17308 {
17309     assert(bbEntryState);
17310     assert(stackBuffer);
17311     bbEntryState->esStack = (StackEntry*)stackBuffer;
17312 }
17313
17314 StackEntry* BasicBlock::bbStackOnEntry()
17315 {
17316     assert(bbEntryState);
17317     return bbEntryState->esStack;
17318 }
17319
17320 void Compiler::verInitCurrentState()
17321 {
17322     verTrackObjCtorInitState        = FALSE;
17323     verCurrentState.thisInitialized = TIS_Bottom;
17324
17325     if (tiVerificationNeeded)
17326     {
17327         // Track this ptr initialization
17328         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17329         {
17330             verTrackObjCtorInitState        = TRUE;
17331             verCurrentState.thisInitialized = TIS_Uninit;
17332         }
17333     }
17334
17335     // initialize stack info
17336
17337     verCurrentState.esStackDepth = 0;
17338     assert(verCurrentState.esStack != nullptr);
17339
17340     // copy current state to entry state of first BB
17341     verInitBBEntryState(fgFirstBB, &verCurrentState);
17342 }
17343
17344 Compiler* Compiler::impInlineRoot()
17345 {
17346     if (impInlineInfo == nullptr)
17347     {
17348         return this;
17349     }
17350     else
17351     {
17352         return impInlineInfo->InlineRoot;
17353     }
17354 }
17355
17356 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17357 {
17358     if (predOrSucc == SpillCliquePred)
17359     {
17360         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17361     }
17362     else
17363     {
17364         assert(predOrSucc == SpillCliqueSucc);
17365         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17366     }
17367 }
17368
17369 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17370 {
17371     if (predOrSucc == SpillCliquePred)
17372     {
17373         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17374     }
17375     else
17376     {
17377         assert(predOrSucc == SpillCliqueSucc);
17378         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17379     }
17380 }
17381
17382 /*****************************************************************************
17383  *
17384  *  Convert the instrs ("import") into our internal format (trees). The
17385  *  basic flowgraph has already been constructed and is passed in.
17386  */
17387
17388 void Compiler::impImport(BasicBlock* method)
17389 {
17390 #ifdef DEBUG
17391     if (verbose)
17392     {
17393         printf("*************** In impImport() for %s\n", info.compFullName);
17394     }
17395 #endif
17396
17397     /* Allocate the stack contents */
17398
17399     if (info.compMaxStack <= _countof(impSmallStack))
17400     {
17401         /* Use local variable, don't waste time allocating on the heap */
17402
17403         impStkSize              = _countof(impSmallStack);
17404         verCurrentState.esStack = impSmallStack;
17405     }
17406     else
17407     {
17408         impStkSize              = info.compMaxStack;
17409         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17410     }
17411
17412     // initialize the entry state at start of method
17413     verInitCurrentState();
17414
17415     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17416     Compiler* inlineRoot = impInlineRoot();
17417     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17418     {
17419         // We have initialized these previously, but to size 0.  Make them larger.
17420         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17421         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17422         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17423     }
17424     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17425     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17426     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17427     impBlockListNodeFreeList = nullptr;
17428
17429 #ifdef DEBUG
17430     impLastILoffsStmt   = nullptr;
17431     impNestedStackSpill = false;
17432 #endif
17433     impBoxTemp = BAD_VAR_NUM;
17434
17435     impPendingList = impPendingFree = nullptr;
17436
17437     /* Add the entry-point to the worker-list */
17438
17439     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17440     // from EH normalization.
17441     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17442     // out.
17443     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17444     {
17445         // Treat these as imported.
17446         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17447         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17448         method->bbFlags |= BBF_IMPORTED;
17449     }
17450
17451     impImportBlockPending(method);
17452
17453     /* Import blocks in the worker-list until there are no more */
17454
17455     while (impPendingList)
17456     {
17457         /* Remove the entry at the front of the list */
17458
17459         PendingDsc* dsc = impPendingList;
17460         impPendingList  = impPendingList->pdNext;
17461         impSetPendingBlockMember(dsc->pdBB, 0);
17462
17463         /* Restore the stack state */
17464
17465         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17466         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17467         if (verCurrentState.esStackDepth)
17468         {
17469             impRestoreStackState(&dsc->pdSavedStack);
17470         }
17471
17472         /* Add the entry to the free list for reuse */
17473
17474         dsc->pdNext    = impPendingFree;
17475         impPendingFree = dsc;
17476
17477         /* Now import the block */
17478
17479         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17480         {
17481
17482 #ifdef _TARGET_64BIT_
17483             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17484             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17485             // method for further explanation on why we raise this exception instead of making the jitted
17486             // code throw the verification exception during execution.
17487             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17488             {
17489                 BADCODE("Basic block marked as not verifiable");
17490             }
17491             else
17492 #endif // _TARGET_64BIT_
17493             {
17494                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17495                 impEndTreeList(dsc->pdBB);
17496             }
17497         }
17498         else
17499         {
17500             impImportBlock(dsc->pdBB);
17501
17502             if (compDonotInline())
17503             {
17504                 return;
17505             }
17506             if (compIsForImportOnly() && !tiVerificationNeeded)
17507             {
17508                 return;
17509             }
17510         }
17511     }
17512
17513 #ifdef DEBUG
17514     if (verbose && info.compXcptnsCount)
17515     {
17516         printf("\nAfter impImport() added block for try,catch,finally");
17517         fgDispBasicBlocks();
17518         printf("\n");
17519     }
17520
17521     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17522     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17523     {
17524         block->bbFlags &= ~BBF_VISITED;
17525     }
17526 #endif
17527
17528     assert(!compIsForInlining() || !tiVerificationNeeded);
17529 }
17530
17531 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17532 // The invariant here is that if it's not a ref or a method and has a class handle
17533 // it's a valuetype
17534 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17535 {
17536     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17537     {
17538         return true;
17539     }
17540     else
17541     {
17542         return false;
17543     }
17544 }
17545
17546 /*****************************************************************************
17547  *  Check to see if the tree is the address of a local or
17548     the address of a field in a local.
17549
17550     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17551
17552  */
17553
17554 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
17555 {
17556     if (tree->gtOper != GT_ADDR)
17557     {
17558         return FALSE;
17559     }
17560
17561     GenTreePtr op = tree->gtOp.gtOp1;
17562     while (op->gtOper == GT_FIELD)
17563     {
17564         op = op->gtField.gtFldObj;
17565         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17566         {
17567             op = op->gtOp.gtOp1;
17568         }
17569         else
17570         {
17571             return false;
17572         }
17573     }
17574
17575     if (op->gtOper == GT_LCL_VAR)
17576     {
17577         *lclVarTreeOut = op;
17578         return TRUE;
17579     }
17580     else
17581     {
17582         return FALSE;
17583     }
17584 }
17585
17586 //------------------------------------------------------------------------
17587 // impMakeDiscretionaryInlineObservations: make observations that help
17588 // determine the profitability of a discretionary inline
17589 //
17590 // Arguments:
17591 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17592 //    inlineResult -- InlineResult accumulating information about this inline
17593 //
17594 // Notes:
17595 //    If inlining or prejitting the root, this method also makes
17596 //    various observations about the method that factor into inline
17597 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17598
17599 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17600 {
17601     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17602            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17603            );
17604
17605     // If we're really inlining, we should just have one result in play.
17606     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17607
17608     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17609     // to the trouble of estimating the native code size. Even if it did, it
17610     // shouldn't be relying on the result of this method.
17611     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17612
17613     // Note if the caller contains NEWOBJ or NEWARR.
17614     Compiler* rootCompiler = impInlineRoot();
17615
17616     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17617     {
17618         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17619     }
17620
17621     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17622     {
17623         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17624     }
17625
17626     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17627     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17628
17629     if (isSpecialMethod)
17630     {
17631         if (calleeIsStatic)
17632         {
17633             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17634         }
17635         else
17636         {
17637             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17638         }
17639     }
17640     else if (!calleeIsStatic)
17641     {
17642         // Callee is an instance method.
17643         //
17644         // Check if the callee has the same 'this' as the root.
17645         if (pInlineInfo != nullptr)
17646         {
17647             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17648             assert(thisArg);
17649             bool isSameThis = impIsThis(thisArg);
17650             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17651         }
17652     }
17653
17654     // Note if the callee's class is a promotable struct
17655     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17656     {
17657         lvaStructPromotionInfo structPromotionInfo;
17658         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17659         if (structPromotionInfo.canPromote)
17660         {
17661             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17662         }
17663     }
17664
17665 #ifdef FEATURE_SIMD
17666
17667     // Note if this method is has SIMD args or return value
17668     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17669     {
17670         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17671     }
17672
17673 #endif // FEATURE_SIMD
17674
17675     // Roughly classify callsite frequency.
17676     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17677
17678     // If this is a prejit root, or a maximally hot block...
17679     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17680     {
17681         frequency = InlineCallsiteFrequency::HOT;
17682     }
17683     // No training data.  Look for loop-like things.
17684     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17685     // However, give it to things nearby.
17686     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17687              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17688     {
17689         frequency = InlineCallsiteFrequency::LOOP;
17690     }
17691     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17692     {
17693         frequency = InlineCallsiteFrequency::WARM;
17694     }
17695     // Now modify the multiplier based on where we're called from.
17696     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17697     {
17698         frequency = InlineCallsiteFrequency::RARE;
17699     }
17700     else
17701     {
17702         frequency = InlineCallsiteFrequency::BORING;
17703     }
17704
17705     // Also capture the block weight of the call site.  In the prejit
17706     // root case, assume there's some hot call site for this method.
17707     unsigned weight = 0;
17708
17709     if (pInlineInfo != nullptr)
17710     {
17711         weight = pInlineInfo->iciBlock->bbWeight;
17712     }
17713     else
17714     {
17715         weight = BB_MAX_WEIGHT;
17716     }
17717
17718     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17719     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17720 }
17721
17722 /*****************************************************************************
17723  This method makes STATIC inlining decision based on the IL code.
17724  It should not make any inlining decision based on the context.
17725  If forceInline is true, then the inlining decision should not depend on
17726  performance heuristics (code size, etc.).
17727  */
17728
17729 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17730                               CORINFO_METHOD_INFO*  methInfo,
17731                               bool                  forceInline,
17732                               InlineResult*         inlineResult)
17733 {
17734     unsigned codeSize = methInfo->ILCodeSize;
17735
17736     // We shouldn't have made up our minds yet...
17737     assert(!inlineResult->IsDecided());
17738
17739     if (methInfo->EHcount)
17740     {
17741         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17742         return;
17743     }
17744
17745     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17746     {
17747         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17748         return;
17749     }
17750
17751     // For now we don't inline varargs (import code can't handle it)
17752
17753     if (methInfo->args.isVarArg())
17754     {
17755         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17756         return;
17757     }
17758
17759     // Reject if it has too many locals.
17760     // This is currently an implementation limit due to fixed-size arrays in the
17761     // inline info, rather than a performance heuristic.
17762
17763     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17764
17765     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17766     {
17767         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17768         return;
17769     }
17770
17771     // Make sure there aren't too many arguments.
17772     // This is currently an implementation limit due to fixed-size arrays in the
17773     // inline info, rather than a performance heuristic.
17774
17775     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17776
17777     if (methInfo->args.numArgs > MAX_INL_ARGS)
17778     {
17779         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17780         return;
17781     }
17782
17783     // Note force inline state
17784
17785     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17786
17787     // Note IL code size
17788
17789     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17790
17791     if (inlineResult->IsFailure())
17792     {
17793         return;
17794     }
17795
17796     // Make sure maxstack is not too big
17797
17798     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17799
17800     if (inlineResult->IsFailure())
17801     {
17802         return;
17803     }
17804 }
17805
17806 /*****************************************************************************
17807  */
17808
17809 void Compiler::impCheckCanInline(GenTreePtr             call,
17810                                  CORINFO_METHOD_HANDLE  fncHandle,
17811                                  unsigned               methAttr,
17812                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17813                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17814                                  InlineResult*          inlineResult)
17815 {
17816     // Either EE or JIT might throw exceptions below.
17817     // If that happens, just don't inline the method.
17818
17819     struct Param
17820     {
17821         Compiler*              pThis;
17822         GenTreePtr             call;
17823         CORINFO_METHOD_HANDLE  fncHandle;
17824         unsigned               methAttr;
17825         CORINFO_CONTEXT_HANDLE exactContextHnd;
17826         InlineResult*          result;
17827         InlineCandidateInfo**  ppInlineCandidateInfo;
17828     } param;
17829     memset(&param, 0, sizeof(param));
17830
17831     param.pThis                 = this;
17832     param.call                  = call;
17833     param.fncHandle             = fncHandle;
17834     param.methAttr              = methAttr;
17835     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17836     param.result                = inlineResult;
17837     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17838
17839     bool success = eeRunWithErrorTrap<Param>(
17840         [](Param* pParam) {
17841             DWORD                  dwRestrictions = 0;
17842             CorInfoInitClassResult initClassResult;
17843
17844 #ifdef DEBUG
17845             const char* methodName;
17846             const char* className;
17847             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17848
17849             if (JitConfig.JitNoInline())
17850             {
17851                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17852                 goto _exit;
17853             }
17854 #endif
17855
17856             /* Try to get the code address/size for the method */
17857
17858             CORINFO_METHOD_INFO methInfo;
17859             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17860             {
17861                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17862                 goto _exit;
17863             }
17864
17865             bool forceInline;
17866             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17867
17868             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17869
17870             if (pParam->result->IsFailure())
17871             {
17872                 assert(pParam->result->IsNever());
17873                 goto _exit;
17874             }
17875
17876             // Speculatively check if initClass() can be done.
17877             // If it can be done, we will try to inline the method. If inlining
17878             // succeeds, then we will do the non-speculative initClass() and commit it.
17879             // If this speculative call to initClass() fails, there is no point
17880             // trying to inline this method.
17881             initClassResult =
17882                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17883                                                            pParam->exactContextHnd /* context */,
17884                                                            TRUE /* speculative */);
17885
17886             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17887             {
17888                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17889                 goto _exit;
17890             }
17891
17892             // Given the EE the final say in whether to inline or not.
17893             // This should be last since for verifiable code, this can be expensive
17894
17895             /* VM Inline check also ensures that the method is verifiable if needed */
17896             CorInfoInline vmResult;
17897             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17898                                                                   &dwRestrictions);
17899
17900             if (vmResult == INLINE_FAIL)
17901             {
17902                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17903             }
17904             else if (vmResult == INLINE_NEVER)
17905             {
17906                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17907             }
17908
17909             if (pParam->result->IsFailure())
17910             {
17911                 // Make sure not to report this one.  It was already reported by the VM.
17912                 pParam->result->SetReported();
17913                 goto _exit;
17914             }
17915
17916             // check for unsupported inlining restrictions
17917             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17918
17919             if (dwRestrictions & INLINE_SAME_THIS)
17920             {
17921                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17922                 assert(thisArg);
17923
17924                 if (!pParam->pThis->impIsThis(thisArg))
17925                 {
17926                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17927                     goto _exit;
17928                 }
17929             }
17930
17931             /* Get the method properties */
17932
17933             CORINFO_CLASS_HANDLE clsHandle;
17934             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17935             unsigned clsAttr;
17936             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17937
17938             /* Get the return type */
17939
17940             var_types fncRetType;
17941             fncRetType = pParam->call->TypeGet();
17942
17943 #ifdef DEBUG
17944             var_types fncRealRetType;
17945             fncRealRetType = JITtype2varType(methInfo.args.retType);
17946
17947             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17948                    // <BUGNUM> VSW 288602 </BUGNUM>
17949                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17950                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17951                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17952 #endif
17953
17954             //
17955             // Allocate an InlineCandidateInfo structure
17956             //
17957             InlineCandidateInfo* pInfo;
17958             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17959
17960             pInfo->dwRestrictions  = dwRestrictions;
17961             pInfo->methInfo        = methInfo;
17962             pInfo->methAttr        = pParam->methAttr;
17963             pInfo->clsHandle       = clsHandle;
17964             pInfo->clsAttr         = clsAttr;
17965             pInfo->fncRetType      = fncRetType;
17966             pInfo->exactContextHnd = pParam->exactContextHnd;
17967             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17968             pInfo->initClassResult = initClassResult;
17969
17970             *(pParam->ppInlineCandidateInfo) = pInfo;
17971
17972         _exit:;
17973         },
17974         &param);
17975     if (!success)
17976     {
17977         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17978     }
17979 }
17980
17981 //------------------------------------------------------------------------
17982 // impInlineRecordArgInfo: record information about an inline candidate argument
17983 //
17984 // Arguments:
17985 //   pInlineInfo - inline info for the inline candidate
17986 //   curArgVal - tree for the caller actual argument value
17987 //   argNum - logical index of this argument
17988 //   inlineResult - result of ongoing inline evaluation
17989 //
17990 // Notes:
17991 //
17992 //   Checks for various inline blocking conditions and makes notes in
17993 //   the inline info arg table about the properties of the actual. These
17994 //   properties are used later by impFetchArg to determine how best to
17995 //   pass the argument into the inlinee.
17996
17997 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17998                                       GenTree*      curArgVal,
17999                                       unsigned      argNum,
18000                                       InlineResult* inlineResult)
18001 {
18002     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18003
18004     if (curArgVal->gtOper == GT_MKREFANY)
18005     {
18006         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18007         return;
18008     }
18009
18010     inlCurArgInfo->argNode = curArgVal;
18011
18012     GenTreePtr lclVarTree;
18013     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18014     {
18015         inlCurArgInfo->argIsByRefToStructLocal = true;
18016 #ifdef FEATURE_SIMD
18017         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18018         {
18019             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18020         }
18021 #endif // FEATURE_SIMD
18022     }
18023
18024     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18025     {
18026         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18027         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18028     }
18029
18030     if (curArgVal->gtOper == GT_LCL_VAR)
18031     {
18032         inlCurArgInfo->argIsLclVar = true;
18033
18034         /* Remember the "original" argument number */
18035         curArgVal->gtLclVar.gtLclILoffs = argNum;
18036     }
18037
18038     if ((curArgVal->OperKind() & GTK_CONST) ||
18039         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18040     {
18041         inlCurArgInfo->argIsInvariant = true;
18042         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18043         {
18044             // Abort inlining at this call site
18045             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18046             return;
18047         }
18048     }
18049
18050     // If the arg is a local that is address-taken, we can't safely
18051     // directly substitute it into the inlinee.
18052     //
18053     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18054     // that has a stronger meaning: that the arg value can change in
18055     // the method body. Using that flag prevents type propagation,
18056     // which is safe in this case.
18057     //
18058     // Instead mark the arg as having a caller local ref.
18059     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18060     {
18061         inlCurArgInfo->argHasCallerLocalRef = true;
18062     }
18063
18064 #ifdef DEBUG
18065     if (verbose)
18066     {
18067         if (inlCurArgInfo->argIsThis)
18068         {
18069             printf("thisArg:");
18070         }
18071         else
18072         {
18073             printf("\nArgument #%u:", argNum);
18074         }
18075         if (inlCurArgInfo->argIsLclVar)
18076         {
18077             printf(" is a local var");
18078         }
18079         if (inlCurArgInfo->argIsInvariant)
18080         {
18081             printf(" is a constant");
18082         }
18083         if (inlCurArgInfo->argHasGlobRef)
18084         {
18085             printf(" has global refs");
18086         }
18087         if (inlCurArgInfo->argHasCallerLocalRef)
18088         {
18089             printf(" has caller local ref");
18090         }
18091         if (inlCurArgInfo->argHasSideEff)
18092         {
18093             printf(" has side effects");
18094         }
18095         if (inlCurArgInfo->argHasLdargaOp)
18096         {
18097             printf(" has ldarga effect");
18098         }
18099         if (inlCurArgInfo->argHasStargOp)
18100         {
18101             printf(" has starg effect");
18102         }
18103         if (inlCurArgInfo->argIsByRefToStructLocal)
18104         {
18105             printf(" is byref to a struct local");
18106         }
18107
18108         printf("\n");
18109         gtDispTree(curArgVal);
18110         printf("\n");
18111     }
18112 #endif
18113 }
18114
18115 //------------------------------------------------------------------------
18116 // impInlineInitVars: setup inline information for inlinee args and locals
18117 //
18118 // Arguments:
18119 //    pInlineInfo - inline info for the inline candidate
18120 //
18121 // Notes:
18122 //    This method primarily adds caller-supplied info to the inlArgInfo
18123 //    and sets up the lclVarInfo table.
18124 //
18125 //    For args, the inlArgInfo records properties of the actual argument
18126 //    including the tree node that produces the arg value. This node is
18127 //    usually the tree node present at the call, but may also differ in
18128 //    various ways:
18129 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18130 //      expr chain for the actual node. Note this will either be the original
18131 //      call (which will be a failed inline by this point), or the return
18132 //      expression from some set of inlines.
18133 //    - when argument type casting is needed the necessary casts are added
18134 //      around the argument node.
18135 //    - if an argment can be simplified by folding then the node here is the
18136 //      folded value.
18137 //
18138 //   The method may make observations that lead to marking this candidate as
18139 //   a failed inline. If this happens the initialization is abandoned immediately
18140 //   to try and reduce the jit time cost for a failed inline.
18141
18142 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18143 {
18144     assert(!compIsForInlining());
18145
18146     GenTreePtr           call         = pInlineInfo->iciCall;
18147     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18148     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18149     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18150     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18151     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18152
18153     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18154
18155     /* init the argument stuct */
18156
18157     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18158
18159     /* Get hold of the 'this' pointer and the argument list proper */
18160
18161     GenTreePtr thisArg = call->gtCall.gtCallObjp;
18162     GenTreePtr argList = call->gtCall.gtCallArgs;
18163     unsigned   argCnt  = 0; // Count of the arguments
18164
18165     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18166
18167     if (thisArg)
18168     {
18169         inlArgInfo[0].argIsThis = true;
18170         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18171         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18172
18173         if (inlineResult->IsFailure())
18174         {
18175             return;
18176         }
18177
18178         /* Increment the argument count */
18179         argCnt++;
18180     }
18181
18182     /* Record some information about each of the arguments */
18183     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18184
18185 #if USER_ARGS_COME_LAST
18186     unsigned typeCtxtArg = thisArg ? 1 : 0;
18187 #else  // USER_ARGS_COME_LAST
18188     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18189 #endif // USER_ARGS_COME_LAST
18190
18191     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18192     {
18193         if (argTmp == argList && hasRetBuffArg)
18194         {
18195             continue;
18196         }
18197
18198         // Ignore the type context argument
18199         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18200         {
18201             pInlineInfo->typeContextArg = typeCtxtArg;
18202             typeCtxtArg                 = 0xFFFFFFFF;
18203             continue;
18204         }
18205
18206         assert(argTmp->gtOper == GT_LIST);
18207         GenTree* arg       = argTmp->gtOp.gtOp1;
18208         GenTree* actualArg = arg->gtRetExprVal();
18209         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18210
18211         if (inlineResult->IsFailure())
18212         {
18213             return;
18214         }
18215
18216         /* Increment the argument count */
18217         argCnt++;
18218     }
18219
18220     /* Make sure we got the arg number right */
18221     assert(argCnt == methInfo->args.totalILArgs());
18222
18223 #ifdef FEATURE_SIMD
18224     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18225 #endif // FEATURE_SIMD
18226
18227     /* We have typeless opcodes, get type information from the signature */
18228
18229     if (thisArg)
18230     {
18231         var_types sigType;
18232
18233         if (clsAttr & CORINFO_FLG_VALUECLASS)
18234         {
18235             sigType = TYP_BYREF;
18236         }
18237         else
18238         {
18239             sigType = TYP_REF;
18240         }
18241
18242         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18243         lclVarInfo[0].lclHasLdlocaOp = false;
18244
18245 #ifdef FEATURE_SIMD
18246         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18247         // the inlining multiplier) for anything in that assembly.
18248         // But we only need to normalize it if it is a TYP_STRUCT
18249         // (which we need to do even if we have already set foundSIMDType).
18250         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18251         {
18252             if (sigType == TYP_STRUCT)
18253             {
18254                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18255             }
18256             foundSIMDType = true;
18257         }
18258 #endif // FEATURE_SIMD
18259         lclVarInfo[0].lclTypeInfo = sigType;
18260
18261         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18262                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18263                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18264
18265         if (genActualType(thisArg->gtType) != genActualType(sigType))
18266         {
18267             if (sigType == TYP_REF)
18268             {
18269                 /* The argument cannot be bashed into a ref (see bug 750871) */
18270                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18271                 return;
18272             }
18273
18274             /* This can only happen with byrefs <-> ints/shorts */
18275
18276             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18277             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18278
18279             if (sigType == TYP_BYREF)
18280             {
18281                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18282             }
18283             else if (thisArg->gtType == TYP_BYREF)
18284             {
18285                 assert(sigType == TYP_I_IMPL);
18286
18287                 /* If possible change the BYREF to an int */
18288                 if (thisArg->IsVarAddr())
18289                 {
18290                     thisArg->gtType              = TYP_I_IMPL;
18291                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18292                 }
18293                 else
18294                 {
18295                     /* Arguments 'int <- byref' cannot be bashed */
18296                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18297                     return;
18298                 }
18299             }
18300         }
18301     }
18302
18303     /* Init the types of the arguments and make sure the types
18304      * from the trees match the types in the signature */
18305
18306     CORINFO_ARG_LIST_HANDLE argLst;
18307     argLst = methInfo->args.args;
18308
18309     unsigned i;
18310     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18311     {
18312         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18313
18314         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18315
18316 #ifdef FEATURE_SIMD
18317         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18318         {
18319             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18320             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18321             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18322             foundSIMDType = true;
18323             if (sigType == TYP_STRUCT)
18324             {
18325                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18326                 sigType              = structType;
18327             }
18328         }
18329 #endif // FEATURE_SIMD
18330
18331         lclVarInfo[i].lclTypeInfo    = sigType;
18332         lclVarInfo[i].lclHasLdlocaOp = false;
18333
18334         /* Does the tree type match the signature type? */
18335
18336         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
18337
18338         if (sigType != inlArgNode->gtType)
18339         {
18340             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18341                but in bad IL cases with caller-callee signature mismatches we can see other types.
18342                Intentionally reject cases with mismatches so the jit is more flexible when
18343                encountering bad IL. */
18344
18345             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18346                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18347                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18348
18349             if (!isPlausibleTypeMatch)
18350             {
18351                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18352                 return;
18353             }
18354
18355             /* Is it a narrowing or widening cast?
18356              * Widening casts are ok since the value computed is already
18357              * normalized to an int (on the IL stack) */
18358
18359             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18360             {
18361                 if (sigType == TYP_BYREF)
18362                 {
18363                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18364                 }
18365                 else if (inlArgNode->gtType == TYP_BYREF)
18366                 {
18367                     assert(varTypeIsIntOrI(sigType));
18368
18369                     /* If possible bash the BYREF to an int */
18370                     if (inlArgNode->IsVarAddr())
18371                     {
18372                         inlArgNode->gtType           = TYP_I_IMPL;
18373                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18374                     }
18375                     else
18376                     {
18377                         /* Arguments 'int <- byref' cannot be changed */
18378                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18379                         return;
18380                     }
18381                 }
18382                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18383                 {
18384                     /* Narrowing cast */
18385
18386                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18387                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18388                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18389                     {
18390                         /* We don't need to insert a cast here as the variable
18391                            was assigned a normalized value of the right type */
18392
18393                         continue;
18394                     }
18395
18396                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
18397
18398                     inlArgInfo[i].argIsLclVar = false;
18399
18400                     /* Try to fold the node in case we have constant arguments */
18401
18402                     if (inlArgInfo[i].argIsInvariant)
18403                     {
18404                         inlArgNode            = gtFoldExprConst(inlArgNode);
18405                         inlArgInfo[i].argNode = inlArgNode;
18406                         assert(inlArgNode->OperIsConst());
18407                     }
18408                 }
18409 #ifdef _TARGET_64BIT_
18410                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18411                 {
18412                     // This should only happen for int -> native int widening
18413                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
18414
18415                     inlArgInfo[i].argIsLclVar = false;
18416
18417                     /* Try to fold the node in case we have constant arguments */
18418
18419                     if (inlArgInfo[i].argIsInvariant)
18420                     {
18421                         inlArgNode            = gtFoldExprConst(inlArgNode);
18422                         inlArgInfo[i].argNode = inlArgNode;
18423                         assert(inlArgNode->OperIsConst());
18424                     }
18425                 }
18426 #endif // _TARGET_64BIT_
18427             }
18428         }
18429     }
18430
18431     /* Init the types of the local variables */
18432
18433     CORINFO_ARG_LIST_HANDLE localsSig;
18434     localsSig = methInfo->locals.args;
18435
18436     for (i = 0; i < methInfo->locals.numArgs; i++)
18437     {
18438         bool      isPinned;
18439         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18440
18441         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18442         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18443         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18444
18445         if (varTypeIsGC(type))
18446         {
18447             pInlineInfo->numberOfGcRefLocals++;
18448         }
18449
18450         if (isPinned)
18451         {
18452             // Pinned locals may cause inlines to fail.
18453             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18454             if (inlineResult->IsFailure())
18455             {
18456                 return;
18457             }
18458         }
18459
18460         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18461
18462         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18463         // out on the inline.
18464         if (type == TYP_STRUCT)
18465         {
18466             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18467             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18468             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18469             {
18470                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18471                 if (inlineResult->IsFailure())
18472                 {
18473                     return;
18474                 }
18475
18476                 // Do further notification in the case where the call site is rare; some policies do
18477                 // not track the relative hotness of call sites for "always" inline cases.
18478                 if (pInlineInfo->iciBlock->isRunRarely())
18479                 {
18480                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18481                     if (inlineResult->IsFailure())
18482                     {
18483
18484                         return;
18485                     }
18486                 }
18487             }
18488         }
18489
18490         localsSig = info.compCompHnd->getArgNext(localsSig);
18491
18492 #ifdef FEATURE_SIMD
18493         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18494         {
18495             foundSIMDType = true;
18496             if (featureSIMD && type == TYP_STRUCT)
18497             {
18498                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18499                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18500             }
18501         }
18502 #endif // FEATURE_SIMD
18503     }
18504
18505 #ifdef FEATURE_SIMD
18506     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
18507     {
18508         foundSIMDType = true;
18509     }
18510     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18511 #endif // FEATURE_SIMD
18512 }
18513
18514 //------------------------------------------------------------------------
18515 // impInlineFetchLocal: get a local var that represents an inlinee local
18516 //
18517 // Arguments:
18518 //    lclNum -- number of the inlinee local
18519 //    reason -- debug string describing purpose of the local var
18520 //
18521 // Returns:
18522 //    Number of the local to use
18523 //
18524 // Notes:
18525 //    This method is invoked only for locals actually used in the
18526 //    inlinee body.
18527 //
18528 //    Allocates a new temp if necessary, and copies key properties
18529 //    over from the inlinee local var info.
18530
18531 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18532 {
18533     assert(compIsForInlining());
18534
18535     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18536
18537     if (tmpNum == BAD_VAR_NUM)
18538     {
18539         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18540         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18541
18542         // The lifetime of this local might span multiple BBs.
18543         // So it is a long lifetime local.
18544         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18545
18546         // Copy over key info
18547         lvaTable[tmpNum].lvType                 = lclTyp;
18548         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18549         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18550         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18551         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18552
18553         // Copy over class handle for ref types. Note this may be a
18554         // shared type -- someday perhaps we can get the exact
18555         // signature and pass in a more precise type.
18556         if (lclTyp == TYP_REF)
18557         {
18558             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18559         }
18560
18561         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18562         {
18563             if (varTypeIsStruct(lclTyp))
18564             {
18565                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18566             }
18567             else
18568             {
18569                 // This is a wrapped primitive.  Make sure the verstate knows that
18570                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18571             }
18572         }
18573
18574 #ifdef DEBUG
18575         // Sanity check that we're properly prepared for gc ref locals.
18576         if (varTypeIsGC(lclTyp))
18577         {
18578             // Since there are gc locals we should have seen them earlier
18579             // and if there was a return value, set up the spill temp.
18580             assert(impInlineInfo->HasGcRefLocals());
18581             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18582         }
18583         else
18584         {
18585             // Make sure all pinned locals count as gc refs.
18586             assert(!inlineeLocal.lclIsPinned);
18587         }
18588 #endif // DEBUG
18589     }
18590
18591     return tmpNum;
18592 }
18593
18594 //------------------------------------------------------------------------
18595 // impInlineFetchArg: return tree node for argument value in an inlinee
18596 //
18597 // Arguments:
18598 //    lclNum -- argument number in inlinee IL
18599 //    inlArgInfo -- argument info for inlinee
18600 //    lclVarInfo -- var info for inlinee
18601 //
18602 // Returns:
18603 //    Tree for the argument's value. Often an inlinee-scoped temp
18604 //    GT_LCL_VAR but can be other tree kinds, if the argument
18605 //    expression from the caller can be directly substituted into the
18606 //    inlinee body.
18607 //
18608 // Notes:
18609 //    Must be used only for arguments -- use impInlineFetchLocal for
18610 //    inlinee locals.
18611 //
18612 //    Direct substitution is performed when the formal argument cannot
18613 //    change value in the inlinee body (no starg or ldarga), and the
18614 //    actual argument expression's value cannot be changed if it is
18615 //    substituted it into the inlinee body.
18616 //
18617 //    Even if an inlinee-scoped temp is returned here, it may later be
18618 //    "bashed" to a caller-supplied tree when arguments are actually
18619 //    passed (see fgInlinePrependStatements). Bashing can happen if
18620 //    the argument ends up being single use and other conditions are
18621 //    met. So the contents of the tree returned here may not end up
18622 //    being the ones ultimately used for the argument.
18623 //
18624 //    This method will side effect inlArgInfo. It should only be called
18625 //    for actual uses of the argument in the inlinee.
18626
18627 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18628 {
18629     // Cache the relevant arg and lcl info for this argument.
18630     // We will modify argInfo but not lclVarInfo.
18631     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18632     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18633     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18634     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18635     GenTreePtr           op1              = nullptr;
18636
18637     if (argInfo.argIsInvariant && !argCanBeModified)
18638     {
18639         // Directly substitute constants or addresses of locals
18640         //
18641         // Clone the constant. Note that we cannot directly use
18642         // argNode in the trees even if !argInfo.argIsUsed as this
18643         // would introduce aliasing between inlArgInfo[].argNode and
18644         // impInlineExpr. Then gtFoldExpr() could change it, causing
18645         // further references to the argument working off of the
18646         // bashed copy.
18647         op1 = gtCloneExpr(argInfo.argNode);
18648         PREFIX_ASSUME(op1 != nullptr);
18649         argInfo.argTmpNum = BAD_VAR_NUM;
18650     }
18651     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18652     {
18653         // Directly substitute unaliased caller locals for args that cannot be modified
18654         //
18655         // Use the caller-supplied node if this is the first use.
18656         op1               = argInfo.argNode;
18657         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18658
18659         // Use an equivalent copy if this is the second or subsequent
18660         // use, or if we need to retype.
18661         //
18662         // Note argument type mismatches that prevent inlining should
18663         // have been caught in impInlineInitVars.
18664         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18665         {
18666             assert(op1->gtOper == GT_LCL_VAR);
18667             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18668
18669             var_types newTyp = lclTyp;
18670
18671             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18672             {
18673                 newTyp = genActualType(lclTyp);
18674             }
18675
18676             // Create a new lcl var node - remember the argument lclNum
18677             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18678         }
18679     }
18680     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18681     {
18682         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18683            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18684            This way we will increase the chance for this byref to be optimized away by
18685            a subsequent "dereference" operation.
18686
18687            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18688            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18689            For example, if the caller is:
18690                 ldloca.s   V_1  // V_1 is a local struct
18691                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18692            and the callee being inlined has:
18693                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18694                     ldarga.s   ptrToInts
18695                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18696            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18697            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18698         */
18699         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18700         op1 = gtCloneExpr(argInfo.argNode);
18701     }
18702     else
18703     {
18704         /* Argument is a complex expression - it must be evaluated into a temp */
18705
18706         if (argInfo.argHasTmp)
18707         {
18708             assert(argInfo.argIsUsed);
18709             assert(argInfo.argTmpNum < lvaCount);
18710
18711             /* Create a new lcl var node - remember the argument lclNum */
18712             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18713
18714             /* This is the second or later use of the this argument,
18715             so we have to use the temp (instead of the actual arg) */
18716             argInfo.argBashTmpNode = nullptr;
18717         }
18718         else
18719         {
18720             /* First time use */
18721             assert(!argInfo.argIsUsed);
18722
18723             /* Reserve a temp for the expression.
18724             * Use a large size node as we may change it later */
18725
18726             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18727
18728             lvaTable[tmpNum].lvType = lclTyp;
18729
18730             // For ref types, determine the type of the temp.
18731             if (lclTyp == TYP_REF)
18732             {
18733                 if (!argCanBeModified)
18734                 {
18735                     // If the arg can't be modified in the method
18736                     // body, use the type of the value, if
18737                     // known. Otherwise, use the declared type.
18738                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18739                 }
18740                 else
18741                 {
18742                     // Arg might be modified, use the declared type of
18743                     // the argument.
18744                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18745                 }
18746             }
18747
18748             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18749             if (argInfo.argHasLdargaOp)
18750             {
18751                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18752             }
18753
18754             if (lclInfo.lclVerTypeInfo.IsStruct())
18755             {
18756                 if (varTypeIsStruct(lclTyp))
18757                 {
18758                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18759                 }
18760                 else
18761                 {
18762                     // This is a wrapped primitive.  Make sure the verstate knows that
18763                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18764                 }
18765             }
18766
18767             argInfo.argHasTmp = true;
18768             argInfo.argTmpNum = tmpNum;
18769
18770             // If we require strict exception order, then arguments must
18771             // be evaluated in sequence before the body of the inlined method.
18772             // So we need to evaluate them to a temp.
18773             // Also, if arguments have global or local references, we need to
18774             // evaluate them to a temp before the inlined body as the
18775             // inlined body may be modifying the global ref.
18776             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18777             // if it is a struct, because it requires some additional handling.
18778
18779             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18780                 !argInfo.argHasCallerLocalRef)
18781             {
18782                 /* Get a *LARGE* LCL_VAR node */
18783                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18784
18785                 /* Record op1 as the very first use of this argument.
18786                 If there are no further uses of the arg, we may be
18787                 able to use the actual arg node instead of the temp.
18788                 If we do see any further uses, we will clear this. */
18789                 argInfo.argBashTmpNode = op1;
18790             }
18791             else
18792             {
18793                 /* Get a small LCL_VAR node */
18794                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18795                 /* No bashing of this argument */
18796                 argInfo.argBashTmpNode = nullptr;
18797             }
18798         }
18799     }
18800
18801     // Mark this argument as used.
18802     argInfo.argIsUsed = true;
18803
18804     return op1;
18805 }
18806
18807 /******************************************************************************
18808  Is this the original "this" argument to the call being inlined?
18809
18810  Note that we do not inline methods with "starg 0", and so we do not need to
18811  worry about it.
18812 */
18813
18814 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
18815 {
18816     assert(compIsForInlining());
18817     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18818 }
18819
18820 //-----------------------------------------------------------------------------
18821 // This function checks if a dereference in the inlinee can guarantee that
18822 // the "this" is non-NULL.
18823 // If we haven't hit a branch or a side effect, and we are dereferencing
18824 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18825 // then we can avoid a separate null pointer check.
18826 //
18827 // "additionalTreesToBeEvaluatedBefore"
18828 // is the set of pending trees that have not yet been added to the statement list,
18829 // and which have been removed from verCurrentState.esStack[]
18830
18831 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
18832                                                                   GenTreePtr  variableBeingDereferenced,
18833                                                                   InlArgInfo* inlArgInfo)
18834 {
18835     assert(compIsForInlining());
18836     assert(opts.OptEnabled(CLFLG_INLINING));
18837
18838     BasicBlock* block = compCurBB;
18839
18840     GenTreePtr stmt;
18841     GenTreePtr expr;
18842
18843     if (block != fgFirstBB)
18844     {
18845         return FALSE;
18846     }
18847
18848     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18849     {
18850         return FALSE;
18851     }
18852
18853     if (additionalTreesToBeEvaluatedBefore &&
18854         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18855     {
18856         return FALSE;
18857     }
18858
18859     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18860     {
18861         expr = stmt->gtStmt.gtStmtExpr;
18862
18863         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18864         {
18865             return FALSE;
18866         }
18867     }
18868
18869     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18870     {
18871         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18872         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18873         {
18874             return FALSE;
18875         }
18876     }
18877
18878     return TRUE;
18879 }
18880
18881 //------------------------------------------------------------------------
18882 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18883 //
18884 // Arguments:
18885 //    callNode -- call under scrutiny
18886 //    exactContextHnd -- context handle for inlining
18887 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18888 //    callInfo -- call info from VM
18889 //
18890 // Notes:
18891 //    If callNode is an inline candidate, this method sets the flag
18892 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18893 //    filled in the associated InlineCandidateInfo.
18894 //
18895 //    If callNode is not an inline candidate, and the reason is
18896 //    something that is inherent to the method being called, the
18897 //    method may be marked as "noinline" to short-circuit any
18898 //    future assessments of calls to this method.
18899
18900 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
18901                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18902                                       bool                   exactContextNeedsRuntimeLookup,
18903                                       CORINFO_CALL_INFO*     callInfo)
18904 {
18905     // Let the strategy know there's another call
18906     impInlineRoot()->m_inlineStrategy->NoteCall();
18907
18908     if (!opts.OptEnabled(CLFLG_INLINING))
18909     {
18910         /* XXX Mon 8/18/2008
18911          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18912          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18913          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18914          * figure out why we did not set MAXOPT for this compile.
18915          */
18916         assert(!compIsForInlining());
18917         return;
18918     }
18919
18920     if (compIsForImportOnly())
18921     {
18922         // Don't bother creating the inline candidate during verification.
18923         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18924         // that leads to the creation of multiple instances of Compiler.
18925         return;
18926     }
18927
18928     GenTreeCall* call = callNode->AsCall();
18929     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18930
18931     // Don't inline if not optimizing root method
18932     if (opts.compDbgCode)
18933     {
18934         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18935         return;
18936     }
18937
18938     // Don't inline if inlining into root method is disabled.
18939     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18940     {
18941         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18942         return;
18943     }
18944
18945     // Inlining candidate determination needs to honor only IL tail prefix.
18946     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18947     if (call->IsTailPrefixedCall())
18948     {
18949         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18950         return;
18951     }
18952
18953     // Tail recursion elimination takes precedence over inlining.
18954     // TODO: We may want to do some of the additional checks from fgMorphCall
18955     // here to reduce the chance we don't inline a call that won't be optimized
18956     // as a fast tail call or turned into a loop.
18957     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18958     {
18959         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18960         return;
18961     }
18962
18963     if (call->IsVirtual())
18964     {
18965         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18966         return;
18967     }
18968
18969     /* Ignore helper calls */
18970
18971     if (call->gtCallType == CT_HELPER)
18972     {
18973         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18974         return;
18975     }
18976
18977     /* Ignore indirect calls */
18978     if (call->gtCallType == CT_INDIRECT)
18979     {
18980         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18981         return;
18982     }
18983
18984     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18985      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18986      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18987
18988     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18989     unsigned              methAttr;
18990
18991     // Reuse method flags from the original callInfo if possible
18992     if (fncHandle == callInfo->hMethod)
18993     {
18994         methAttr = callInfo->methodFlags;
18995     }
18996     else
18997     {
18998         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18999     }
19000
19001 #ifdef DEBUG
19002     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19003     {
19004         methAttr |= CORINFO_FLG_FORCEINLINE;
19005     }
19006 #endif
19007
19008     // Check for COMPlus_AggressiveInlining
19009     if (compDoAggressiveInlining)
19010     {
19011         methAttr |= CORINFO_FLG_FORCEINLINE;
19012     }
19013
19014     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19015     {
19016         /* Don't bother inline blocks that are in the filter region */
19017         if (bbInCatchHandlerILRange(compCurBB))
19018         {
19019 #ifdef DEBUG
19020             if (verbose)
19021             {
19022                 printf("\nWill not inline blocks that are in the catch handler region\n");
19023             }
19024
19025 #endif
19026
19027             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19028             return;
19029         }
19030
19031         if (bbInFilterILRange(compCurBB))
19032         {
19033 #ifdef DEBUG
19034             if (verbose)
19035             {
19036                 printf("\nWill not inline blocks that are in the filter region\n");
19037             }
19038 #endif
19039
19040             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19041             return;
19042         }
19043     }
19044
19045     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19046
19047     if (opts.compNeedSecurityCheck)
19048     {
19049         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19050         return;
19051     }
19052
19053     /* Check if we tried to inline this method before */
19054
19055     if (methAttr & CORINFO_FLG_DONT_INLINE)
19056     {
19057         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19058         return;
19059     }
19060
19061     /* Cannot inline synchronized methods */
19062
19063     if (methAttr & CORINFO_FLG_SYNCH)
19064     {
19065         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19066         return;
19067     }
19068
19069     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19070
19071     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19072     {
19073         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19074         return;
19075     }
19076
19077     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19078     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19079
19080     if (inlineResult.IsFailure())
19081     {
19082         return;
19083     }
19084
19085     // The old value should be NULL
19086     assert(call->gtInlineCandidateInfo == nullptr);
19087
19088     // The new value should not be NULL.
19089     assert(inlineCandidateInfo != nullptr);
19090     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19091
19092     call->gtInlineCandidateInfo = inlineCandidateInfo;
19093
19094     // Mark the call node as inline candidate.
19095     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19096
19097     // Let the strategy know there's another candidate.
19098     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19099
19100     // Since we're not actually inlining yet, and this call site is
19101     // still just an inline candidate, there's nothing to report.
19102     inlineResult.SetReported();
19103 }
19104
19105 /******************************************************************************/
19106 // Returns true if the given intrinsic will be implemented by target-specific
19107 // instructions
19108
19109 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19110 {
19111 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19112     switch (intrinsicId)
19113     {
19114         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19115         // instructions to directly compute round/ceiling/floor.
19116         //
19117         // TODO: Because the x86 backend only targets SSE for floating-point code,
19118         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19119         //       implemented those intrinsics as x87 instructions). If this poses
19120         //       a CQ problem, it may be necessary to change the implementation of
19121         //       the helper calls to decrease call overhead or switch back to the
19122         //       x87 instructions. This is tracked by #7097.
19123         case CORINFO_INTRINSIC_Sqrt:
19124         case CORINFO_INTRINSIC_Abs:
19125             return true;
19126
19127         case CORINFO_INTRINSIC_Round:
19128         case CORINFO_INTRINSIC_Ceiling:
19129         case CORINFO_INTRINSIC_Floor:
19130             return compSupports(InstructionSet_SSE41);
19131
19132         default:
19133             return false;
19134     }
19135 #elif defined(_TARGET_ARM64_)
19136     switch (intrinsicId)
19137     {
19138         case CORINFO_INTRINSIC_Sqrt:
19139         case CORINFO_INTRINSIC_Abs:
19140         case CORINFO_INTRINSIC_Round:
19141         case CORINFO_INTRINSIC_Floor:
19142         case CORINFO_INTRINSIC_Ceiling:
19143             return true;
19144
19145         default:
19146             return false;
19147     }
19148 #elif defined(_TARGET_ARM_)
19149     switch (intrinsicId)
19150     {
19151         case CORINFO_INTRINSIC_Sqrt:
19152         case CORINFO_INTRINSIC_Abs:
19153         case CORINFO_INTRINSIC_Round:
19154             return true;
19155
19156         default:
19157             return false;
19158     }
19159 #elif defined(_TARGET_X86_)
19160     switch (intrinsicId)
19161     {
19162         case CORINFO_INTRINSIC_Sin:
19163         case CORINFO_INTRINSIC_Cos:
19164         case CORINFO_INTRINSIC_Sqrt:
19165         case CORINFO_INTRINSIC_Abs:
19166         case CORINFO_INTRINSIC_Round:
19167             return true;
19168
19169         default:
19170             return false;
19171     }
19172 #else
19173     // TODO: This portion of logic is not implemented for other arch.
19174     // The reason for returning true is that on all other arch the only intrinsic
19175     // enabled are target intrinsics.
19176     return true;
19177 #endif //_TARGET_AMD64_
19178 }
19179
19180 /******************************************************************************/
19181 // Returns true if the given intrinsic will be implemented by calling System.Math
19182 // methods.
19183
19184 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19185 {
19186     // Currently, if an math intrisic is not implemented by target-specific
19187     // intructions, it will be implemented by a System.Math call. In the
19188     // future, if we turn to implementing some of them with helper callers,
19189     // this predicate needs to be revisited.
19190     return !IsTargetIntrinsic(intrinsicId);
19191 }
19192
19193 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19194 {
19195     switch (intrinsicId)
19196     {
19197         case CORINFO_INTRINSIC_Sin:
19198         case CORINFO_INTRINSIC_Cbrt:
19199         case CORINFO_INTRINSIC_Sqrt:
19200         case CORINFO_INTRINSIC_Abs:
19201         case CORINFO_INTRINSIC_Cos:
19202         case CORINFO_INTRINSIC_Round:
19203         case CORINFO_INTRINSIC_Cosh:
19204         case CORINFO_INTRINSIC_Sinh:
19205         case CORINFO_INTRINSIC_Tan:
19206         case CORINFO_INTRINSIC_Tanh:
19207         case CORINFO_INTRINSIC_Asin:
19208         case CORINFO_INTRINSIC_Asinh:
19209         case CORINFO_INTRINSIC_Acos:
19210         case CORINFO_INTRINSIC_Acosh:
19211         case CORINFO_INTRINSIC_Atan:
19212         case CORINFO_INTRINSIC_Atan2:
19213         case CORINFO_INTRINSIC_Atanh:
19214         case CORINFO_INTRINSIC_Log10:
19215         case CORINFO_INTRINSIC_Pow:
19216         case CORINFO_INTRINSIC_Exp:
19217         case CORINFO_INTRINSIC_Ceiling:
19218         case CORINFO_INTRINSIC_Floor:
19219             return true;
19220         default:
19221             return false;
19222     }
19223 }
19224
19225 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
19226 {
19227     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19228 }
19229
19230 //------------------------------------------------------------------------
19231 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19232 //   normal call
19233 //
19234 // Arguments:
19235 //     call -- the call node to examine/modify
19236 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19237 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19238 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19239 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19240 //
19241 // Notes:
19242 //     Virtual calls in IL will always "invoke" the base class method.
19243 //
19244 //     This transformation looks for evidence that the type of 'this'
19245 //     in the call is exactly known, is a final class or would invoke
19246 //     a final method, and if that and other safety checks pan out,
19247 //     modifies the call and the call info to create a direct call.
19248 //
19249 //     This transformation is initially done in the importer and not
19250 //     in some subsequent optimization pass because we want it to be
19251 //     upstream of inline candidate identification.
19252 //
19253 //     However, later phases may supply improved type information that
19254 //     can enable further devirtualization. We currently reinvoke this
19255 //     code after inlining, if the return value of the inlined call is
19256 //     the 'this obj' of a subsequent virtual call.
19257 //
19258 //     If devirtualization succeeds and the call's this object is the
19259 //     result of a box, the jit will ask the EE for the unboxed entry
19260 //     point. If this exists, the jit will see if it can rework the box
19261 //     to instead make a local copy. If that is doable, the call is
19262 //     updated to invoke the unboxed entry on the local copy.
19263 //
19264 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19265                                    CORINFO_METHOD_HANDLE*  method,
19266                                    unsigned*               methodFlags,
19267                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19268                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19269 {
19270     assert(call != nullptr);
19271     assert(method != nullptr);
19272     assert(methodFlags != nullptr);
19273     assert(contextHandle != nullptr);
19274
19275     // This should be a virtual vtable or virtual stub call.
19276     assert(call->IsVirtual());
19277
19278     // Bail if not optimizing
19279     if (opts.MinOpts())
19280     {
19281         return;
19282     }
19283
19284     // Bail if debuggable codegen
19285     if (opts.compDbgCode)
19286     {
19287         return;
19288     }
19289
19290 #if defined(DEBUG)
19291     // Bail if devirt is disabled.
19292     if (JitConfig.JitEnableDevirtualization() == 0)
19293     {
19294         return;
19295     }
19296
19297     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19298 #endif // DEBUG
19299
19300     // Fetch information about the virtual method we're calling.
19301     CORINFO_METHOD_HANDLE baseMethod        = *method;
19302     unsigned              baseMethodAttribs = *methodFlags;
19303
19304     if (baseMethodAttribs == 0)
19305     {
19306         // For late devirt we may not have method attributes, so fetch them.
19307         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19308     }
19309     else
19310     {
19311 #if defined(DEBUG)
19312         // Validate that callInfo has up to date method flags
19313         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19314
19315         // All the base method attributes should agree, save that
19316         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19317         // because of concurrent jitting activity.
19318         //
19319         // Note we don't look at this particular flag bit below, and
19320         // later on (if we do try and inline) we will rediscover why
19321         // the method can't be inlined, so there's no danger here in
19322         // seeing this particular flag bit in different states between
19323         // the cached and fresh values.
19324         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19325         {
19326             assert(!"mismatched method attributes");
19327         }
19328 #endif // DEBUG
19329     }
19330
19331     // In R2R mode, we might see virtual stub calls to
19332     // non-virtuals. For instance cases where the non-virtual method
19333     // is in a different assembly but is called via CALLVIRT. For
19334     // verison resilience we must allow for the fact that the method
19335     // might become virtual in some update.
19336     //
19337     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19338     // regular call+nullcheck upstream, so we won't reach this
19339     // point.
19340     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19341     {
19342         assert(call->IsVirtualStub());
19343         assert(opts.IsReadyToRun());
19344         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19345         return;
19346     }
19347
19348     // See what we know about the type of 'this' in the call.
19349     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19350     GenTree*             actualThisObj = nullptr;
19351     bool                 isExact       = false;
19352     bool                 objIsNonNull  = false;
19353     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19354
19355     // See if we have special knowlege that can get us a type or a better type.
19356     if ((objClass == nullptr) || !isExact)
19357     {
19358         actualThisObj = thisObj;
19359
19360         // Walk back through any return expression placeholders
19361         while (actualThisObj->OperGet() == GT_RET_EXPR)
19362         {
19363             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19364         }
19365
19366         // See if we landed on a call to a special intrinsic method
19367         if (actualThisObj->IsCall())
19368         {
19369             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19370             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19371             {
19372                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19373                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19374                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19375                 if (specialObjClass != nullptr)
19376                 {
19377                     objClass     = specialObjClass;
19378                     isExact      = true;
19379                     objIsNonNull = true;
19380                 }
19381             }
19382         }
19383     }
19384
19385     // Bail if we know nothing.
19386     if (objClass == nullptr)
19387     {
19388         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19389         return;
19390     }
19391
19392     // Fetch information about the class that introduced the virtual method.
19393     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19394     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19395
19396 #if !defined(FEATURE_CORECLR)
19397     // If base class is not beforefieldinit then devirtualizing may
19398     // cause us to miss a base class init trigger. Spec says we don't
19399     // need a trigger for ref class callvirts but desktop seems to
19400     // have one anyways. So defer.
19401     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19402     {
19403         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19404         return;
19405     }
19406 #endif // FEATURE_CORECLR
19407
19408     // Is the call an interface call?
19409     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19410
19411     // If the objClass is sealed (final), then we may be able to devirtualize.
19412     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19413     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19414
19415 #if defined(DEBUG)
19416     const char* callKind       = isInterface ? "interface" : "virtual";
19417     const char* objClassNote   = "[?]";
19418     const char* objClassName   = "?objClass";
19419     const char* baseClassName  = "?baseClass";
19420     const char* baseMethodName = "?baseMethod";
19421
19422     if (verbose || doPrint)
19423     {
19424         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19425         objClassName   = info.compCompHnd->getClassName(objClass);
19426         baseClassName  = info.compCompHnd->getClassName(baseClass);
19427         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19428
19429         if (verbose)
19430         {
19431             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19432                    "    class for 'this' is %s%s (attrib %08x)\n"
19433                    "    base method is %s::%s\n",
19434                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19435         }
19436     }
19437 #endif // defined(DEBUG)
19438
19439     // Bail if obj class is an interface.
19440     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19441     //   IL_021d:  ldloc.0
19442     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19443     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19444     {
19445         JITDUMP("--- obj class is interface, sorry\n");
19446         return;
19447     }
19448
19449     if (isInterface)
19450     {
19451         assert(call->IsVirtualStub());
19452         JITDUMP("--- base class is interface\n");
19453     }
19454
19455     // Fetch the method that would be called based on the declared type of 'this'
19456     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19457     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19458
19459     // If we failed to get a handle, we can't devirtualize.  This can
19460     // happen when prejitting, if the devirtualization crosses
19461     // servicing bubble boundaries.
19462     if (derivedMethod == nullptr)
19463     {
19464         JITDUMP("--- no derived method, sorry\n");
19465         return;
19466     }
19467
19468     // Fetch method attributes to see if method is marked final.
19469     const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19470     const bool  derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19471
19472 #if defined(DEBUG)
19473     const char* derivedClassName  = "?derivedClass";
19474     const char* derivedMethodName = "?derivedMethod";
19475
19476     const char* note = "speculative";
19477     if (isExact)
19478     {
19479         note = "exact";
19480     }
19481     else if (objClassIsFinal)
19482     {
19483         note = "final class";
19484     }
19485     else if (derivedMethodIsFinal)
19486     {
19487         note = "final method";
19488     }
19489
19490     if (verbose || doPrint)
19491     {
19492         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19493         if (verbose)
19494         {
19495             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19496             gtDispTree(call);
19497         }
19498     }
19499 #endif // defined(DEBUG)
19500
19501     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19502     {
19503         // Type is not exact, and neither class or method is final.
19504         //
19505         // We could speculatively devirtualize, but there's no
19506         // reason to believe the derived method is the one that
19507         // is likely to be invoked.
19508         //
19509         // If there's currently no further overriding (that is, at
19510         // the time of jitting, objClass has no subclasses that
19511         // override this method), then perhaps we'd be willing to
19512         // make a bet...?
19513         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19514         return;
19515     }
19516
19517     // For interface calls we must have an exact type or final class.
19518     if (isInterface && !isExact && !objClassIsFinal)
19519     {
19520         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19521         return;
19522     }
19523
19524     JITDUMP("    %s; can devirtualize\n", note);
19525
19526     // Make the updates.
19527     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19528     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19529     call->gtCallMethHnd = derivedMethod;
19530     call->gtCallType    = CT_USER_FUNC;
19531
19532     // Virtual calls include an implicit null check, which we may
19533     // now need to make explicit.
19534     if (!objIsNonNull)
19535     {
19536         call->gtFlags |= GTF_CALL_NULLCHECK;
19537     }
19538
19539     // Clear the inline candidate info (may be non-null since
19540     // it's a union field used for other things by virtual
19541     // stubs)
19542     call->gtInlineCandidateInfo = nullptr;
19543
19544 #if defined(DEBUG)
19545     if (verbose)
19546     {
19547         printf("... after devirt...\n");
19548         gtDispTree(call);
19549     }
19550
19551     if (doPrint)
19552     {
19553         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19554                baseMethodName, derivedClassName, derivedMethodName, note);
19555     }
19556 #endif // defined(DEBUG)
19557
19558     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19559     if (thisObj->IsBoxedValue())
19560     {
19561         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19562
19563         // Note for some shared methods the unboxed entry point requires an extra parameter.
19564         // We defer optimizing if so.
19565         bool                  requiresInstMethodTableArg = false;
19566         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19567             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19568
19569         if (unboxedEntryMethod != nullptr)
19570         {
19571             // Since the call is the only consumer of the box, we know the box can't escape
19572             // since it is being passed an interior pointer.
19573             //
19574             // So, revise the box to simply create a local copy, use the address of that copy
19575             // as the this pointer, and update the entry point to the unboxed entry.
19576             //
19577             // Ideally, we then inline the boxed method and and if it turns out not to modify
19578             // the copy, we can undo the copy too.
19579             if (requiresInstMethodTableArg)
19580             {
19581                 // We can likely handle this case by grabbing the argument passed to
19582                 // the newobj in the box. But defer for now.
19583                 JITDUMP("Found unboxed entry point, but it needs method table arg, deferring\n");
19584             }
19585             else
19586             {
19587                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19588                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19589
19590                 if (localCopyThis != nullptr)
19591                 {
19592                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19593                     call->gtCallObjp    = localCopyThis;
19594                     call->gtCallMethHnd = unboxedEntryMethod;
19595                     derivedMethod       = unboxedEntryMethod;
19596                 }
19597                 else
19598                 {
19599                     JITDUMP("Sorry, failed to undo the box\n");
19600                 }
19601             }
19602         }
19603         else
19604         {
19605             // Many of the low-level methods on value classes won't have unboxed entries,
19606             // as they need access to the type of the object.
19607             //
19608             // Note this may be a cue for us to stack allocate the boxed object, since
19609             // we probably know that these objects don't escape.
19610             JITDUMP("Sorry, failed to find unboxed entry point\n");
19611         }
19612     }
19613
19614     // Fetch the class that introduced the derived method.
19615     //
19616     // Note this may not equal objClass, if there is a
19617     // final method that objClass inherits.
19618     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19619
19620     // Need to update call info too. This is fragile
19621     // but hopefully the derived method conforms to
19622     // the base in most other ways.
19623     *method        = derivedMethod;
19624     *methodFlags   = derivedMethodAttribs;
19625     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19626
19627     // Update context handle.
19628     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19629     {
19630         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19631     }
19632
19633 #ifdef FEATURE_READYTORUN_COMPILER
19634     if (opts.IsReadyToRun())
19635     {
19636         // For R2R, getCallInfo triggers bookkeeping on the zap
19637         // side so we need to call it here.
19638         //
19639         // First, cons up a suitable resolved token.
19640         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19641
19642         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19643         derivedResolvedToken.tokenContext = *contextHandle;
19644         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19645         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19646         derivedResolvedToken.hClass       = derivedClass;
19647         derivedResolvedToken.hMethod      = derivedMethod;
19648
19649         // Look up the new call info.
19650         CORINFO_CALL_INFO derivedCallInfo;
19651         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19652
19653         // Update the call.
19654         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19655         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19656         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19657     }
19658 #endif // FEATURE_READYTORUN_COMPILER
19659 }
19660
19661 //------------------------------------------------------------------------
19662 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19663 //   to an intrinsic returns an exact type
19664 //
19665 // Arguments:
19666 //     methodHnd -- handle for the special intrinsic method
19667 //
19668 // Returns:
19669 //     Exact class handle returned by the intrinsic call, if known.
19670 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19671
19672 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19673 {
19674     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19675
19676     CORINFO_CLASS_HANDLE result = nullptr;
19677
19678     // See what intrinisc we have...
19679     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19680     switch (ni)
19681     {
19682         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19683         {
19684             // Expect one class generic parameter; figure out which it is.
19685             CORINFO_SIG_INFO sig;
19686             info.compCompHnd->getMethodSig(methodHnd, &sig);
19687             assert(sig.sigInst.classInstCount == 1);
19688             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19689             assert(typeHnd != nullptr);
19690
19691             // Lookup can incorrect when we have __Canon as it won't appear
19692             // to implement any interface types.
19693             //
19694             // And if we do not have a final type, devirt & inlining is
19695             // unlikely to result in much simplification.
19696             //
19697             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19698             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19699             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19700
19701             if (isFinalType)
19702             {
19703                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19704                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19705                         result != nullptr ? eeGetClassName(result) : "unknown");
19706             }
19707             else
19708             {
19709                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19710             }
19711
19712             break;
19713         }
19714
19715         default:
19716         {
19717             JITDUMP("This special intrinsic not handled, sorry...\n");
19718             break;
19719         }
19720     }
19721
19722     return result;
19723 }
19724
19725 //------------------------------------------------------------------------
19726 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19727 //
19728 // Arguments:
19729 //    token - init value for the allocated token.
19730 //
19731 // Return Value:
19732 //    pointer to token into jit-allocated memory.
19733 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19734 {
19735     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19736     *memory                        = token;
19737     return memory;
19738 }
19739
19740 //------------------------------------------------------------------------
19741 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19742 //
19743 class SpillRetExprHelper
19744 {
19745 public:
19746     SpillRetExprHelper(Compiler* comp) : comp(comp)
19747     {
19748     }
19749
19750     void StoreRetExprResultsInArgs(GenTreeCall* call)
19751     {
19752         GenTreePtr args = call->gtCallArgs;
19753         if (args != nullptr)
19754         {
19755             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19756         }
19757         GenTreePtr thisArg = call->gtCallObjp;
19758         if (thisArg != nullptr)
19759         {
19760             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19761         }
19762     }
19763
19764 private:
19765     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19766     {
19767         assert((pTree != nullptr) && (*pTree != nullptr));
19768         GenTreePtr tree = *pTree;
19769         if ((tree->gtFlags & GTF_CALL) == 0)
19770         {
19771             // Trees with ret_expr are marked as GTF_CALL.
19772             return Compiler::WALK_SKIP_SUBTREES;
19773         }
19774         if (tree->OperGet() == GT_RET_EXPR)
19775         {
19776             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19777             walker->StoreRetExprAsLocalVar(pTree);
19778         }
19779         return Compiler::WALK_CONTINUE;
19780     }
19781
19782     void StoreRetExprAsLocalVar(GenTreePtr* pRetExpr)
19783     {
19784         GenTreePtr retExpr = *pRetExpr;
19785         assert(retExpr->OperGet() == GT_RET_EXPR);
19786         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19787         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19788         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19789         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19790     }
19791
19792 private:
19793     Compiler* comp;
19794 };
19795
19796 //------------------------------------------------------------------------
19797 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19798 //                         Spill ret_expr in the call node, because they can't be cloned.
19799 //
19800 // Arguments:
19801 //    call - fat calli candidate
19802 //
19803 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19804 {
19805     setMethodHasFatPointer();
19806     call->SetFatPointerCandidate();
19807     SpillRetExprHelper helper(this);
19808     helper.StoreRetExprResultsInArgs(call);
19809 }