Merge pull request #17161 from dotnetrt/testbuild
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (!varTypeIsSmall(lclTyp))
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTree* tree     = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTree* firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTree* tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTree* stmtPrev  = stmtBefore->gtPrev;
676     stmt->gtPrev       = stmtPrev;
677     stmt->gtNext       = stmtBefore;
678     stmtPrev->gtNext   = stmt;
679     stmtBefore->gtPrev = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTree* expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTree* expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTree*    val,
729                                 unsigned    curLevel,
730                                 GenTree**   pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTree* asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTree*             val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTree**            pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTree* asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTree*   temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855 #ifdef DEBUG
856             if (verbose)
857             {
858                 printf("Calling impNormStructVal on:\n");
859                 gtDispTree(temp);
860             }
861 #endif
862             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
863 #ifdef DEBUG
864             if (verbose)
865             {
866                 printf("resulting tree:\n");
867                 gtDispTree(temp);
868             }
869 #endif
870         }
871
872         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873         treeList = gtNewListNode(temp, treeList);
874     }
875
876     if (sig != nullptr)
877     {
878         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880         {
881             // Make sure that all valuetypes (including enums) that we push are loaded.
882             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883             // all valuetypes in the method signature are already loaded.
884             // We need to be able to find the size of the valuetypes, but we cannot
885             // do a class-load from within GC.
886             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
887         }
888
889         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890         CORINFO_CLASS_HANDLE    argClass;
891         CORINFO_CLASS_HANDLE    argRealClass;
892         GenTreeArgList*         args;
893
894         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895         {
896             PREFIX_ASSUME(args != nullptr);
897
898             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899
900             // insert implied casts (from float to double or double to float)
901
902             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903             {
904                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
905             }
906             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907             {
908                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
909             }
910
911             // insert any widening or narrowing casts for backwards compatibility
912
913             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914
915             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917             {
918                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920                 // primitive types.
921                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922                 // details).
923                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924                 {
925                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
926                 }
927
928                 // Make sure that all valuetypes (including enums) that we push are loaded.
929                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930                 // all valuetypes in the method signature are already loaded.
931                 // We need to be able to find the size of the valuetypes, but we cannot
932                 // do a class-load from within GC.
933                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
934             }
935
936             argLst = info.compCompHnd->getArgNext(argLst);
937         }
938     }
939
940     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941     {
942         // Prepend the prefixTree
943
944         // Simple in-place reversal to place treeList
945         // at the end of a reversed prefixTree
946         while (prefixTree != nullptr)
947         {
948             GenTreeArgList* next = prefixTree->Rest();
949             prefixTree->Rest()   = treeList;
950             treeList             = prefixTree;
951             prefixTree           = next;
952         }
953     }
954     return treeList;
955 }
956
957 /*****************************************************************************
958  *
959  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960  *  The first "skipReverseCount" items are not reversed.
961  */
962
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
964
965 {
966     assert(skipReverseCount <= count);
967
968     GenTreeArgList* list = impPopList(count, sig);
969
970     // reverse the list
971     if (list == nullptr || skipReverseCount == count)
972     {
973         return list;
974     }
975
976     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
977     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978
979     if (skipReverseCount == 0)
980     {
981         ptr = list;
982     }
983     else
984     {
985         lastSkipNode = list;
986         // Get to the first node that needs to be reversed
987         for (unsigned i = 0; i < skipReverseCount - 1; i++)
988         {
989             lastSkipNode = lastSkipNode->Rest();
990         }
991
992         PREFIX_ASSUME(lastSkipNode != nullptr);
993         ptr = lastSkipNode->Rest();
994     }
995
996     GenTreeArgList* reversedList = nullptr;
997
998     do
999     {
1000         GenTreeArgList* tmp = ptr->Rest();
1001         ptr->Rest()         = reversedList;
1002         reversedList        = ptr;
1003         ptr                 = tmp;
1004     } while (ptr != nullptr);
1005
1006     if (skipReverseCount)
1007     {
1008         lastSkipNode->Rest() = reversedList;
1009         return list;
1010     }
1011     else
1012     {
1013         return reversedList;
1014     }
1015 }
1016
1017 /*****************************************************************************
1018    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1019    class of type 'clsHnd'.  It returns the tree that should be appended to the
1020    statement list that represents the assignment.
1021    Temp assignments may be appended to impTreeList if spilling is necessary.
1022    curLevel is the stack level for which a spill may be being done.
1023  */
1024
1025 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1026                                    GenTree*             src,
1027                                    CORINFO_CLASS_HANDLE structHnd,
1028                                    unsigned             curLevel,
1029                                    GenTree**            pAfterStmt, /* = NULL */
1030                                    BasicBlock*          block       /* = NULL */
1031                                    )
1032 {
1033     assert(varTypeIsStruct(dest));
1034
1035     while (dest->gtOper == GT_COMMA)
1036     {
1037         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1038
1039         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1040         if (pAfterStmt)
1041         {
1042             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1043         }
1044         else
1045         {
1046             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1047         }
1048
1049         // set dest to the second thing
1050         dest = dest->gtOp.gtOp2;
1051     }
1052
1053     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1055
1056     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1058     {
1059         // Make this a NOP
1060         return gtNewNothingNode();
1061     }
1062
1063     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064     // or re-creating a Blk node if it is.
1065     GenTree* destAddr;
1066
1067     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1068     {
1069         destAddr = dest->gtOp.gtOp1;
1070     }
1071     else
1072     {
1073         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1074     }
1075
1076     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1077 }
1078
1079 /*****************************************************************************/
1080
1081 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1082                                       GenTree*             src,
1083                                       CORINFO_CLASS_HANDLE structHnd,
1084                                       unsigned             curLevel,
1085                                       GenTree**            pAfterStmt, /* = NULL */
1086                                       BasicBlock*          block       /* = NULL */
1087                                       )
1088 {
1089     var_types destType;
1090     GenTree*  dest      = nullptr;
1091     unsigned  destFlags = 0;
1092
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095     // TODO-ARM-BUG: Does ARM need this?
1096     // TODO-ARM64-BUG: Does ARM64 need this?
1097     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100            (src->TypeGet() != TYP_STRUCT &&
1101             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103     assert(varTypeIsStruct(src));
1104
1105     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107            src->gtOper == GT_COMMA ||
1108            (src->TypeGet() != TYP_STRUCT &&
1109             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111     if (destAddr->OperGet() == GT_ADDR)
1112     {
1113         GenTree* destNode = destAddr->gtGetOp1();
1114         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115         // will be morphed, don't insert an OBJ(ADDR).
1116         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1120                 )
1121         {
1122             dest = destNode;
1123         }
1124         destType = destNode->TypeGet();
1125     }
1126     else
1127     {
1128         destType = src->TypeGet();
1129     }
1130
1131     var_types asgType = src->TypeGet();
1132
1133     if (src->gtOper == GT_CALL)
1134     {
1135         if (src->AsCall()->TreatAsHasRetBufArg(this))
1136         {
1137             // Case of call returning a struct via hidden retbuf arg
1138
1139             // insert the return value buffer into the argument list as first byref parameter
1140             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1141
1142             // now returns void, not a struct
1143             src->gtType = TYP_VOID;
1144
1145             // return the morphed call node
1146             return src;
1147         }
1148         else
1149         {
1150             // Case of call returning a struct in one or more registers.
1151
1152             var_types returnType = (var_types)src->gtCall.gtReturnType;
1153
1154             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155             src->gtType = genActualType(returnType);
1156
1157             // First we try to change this to "LclVar/LclFld = call"
1158             //
1159             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1160             {
1161                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162                 // That is, the IR will be of the form lclVar = call for multi-reg return
1163                 //
1164                 GenTree* lcl = destAddr->gtOp.gtOp1;
1165                 if (src->AsCall()->HasMultiRegRetVal())
1166                 {
1167                     // Mark the struct LclVar as used in a MultiReg return context
1168                     //  which currently makes it non promotable.
1169                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170                     // handle multireg returns.
1171                     lcl->gtFlags |= GTF_DONT_CSE;
1172                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1173                 }
1174                 else // The call result is not a multireg return
1175                 {
1176                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177                     lcl->ChangeOper(GT_LCL_FLD);
1178                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179                     lcl->gtType = src->gtType;
1180                     asgType     = src->gtType;
1181                 }
1182
1183                 dest = lcl;
1184
1185 #if defined(_TARGET_ARM_)
1186                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187                 // but that method has not been updadted to include ARM.
1188                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189                 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1193
1194                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196                 // handle multireg returns.
1197                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198                 // non-multireg returns.
1199                 lcl->gtFlags |= GTF_DONT_CSE;
1200                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1201 #endif
1202             }
1203             else // we don't have a GT_ADDR of a GT_LCL_VAR
1204             {
1205                 // !!! The destination could be on stack. !!!
1206                 // This flag will let us choose the correct write barrier.
1207                 asgType   = returnType;
1208                 destFlags = GTF_IND_TGTANYWHERE;
1209             }
1210         }
1211     }
1212     else if (src->gtOper == GT_RET_EXPR)
1213     {
1214         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215         noway_assert(call->gtOper == GT_CALL);
1216
1217         if (call->HasRetBufArg())
1218         {
1219             // insert the return value buffer into the argument list as first byref parameter
1220             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1221
1222             // now returns void, not a struct
1223             src->gtType  = TYP_VOID;
1224             call->gtType = TYP_VOID;
1225
1226             // We already have appended the write to 'dest' GT_CALL's args
1227             // So now we just return an empty node (pruning the GT_RET_EXPR)
1228             return src;
1229         }
1230         else
1231         {
1232             // Case of inline method returning a struct in one or more registers.
1233             //
1234             var_types returnType = (var_types)call->gtReturnType;
1235
1236             // We won't need a return buffer
1237             asgType      = returnType;
1238             src->gtType  = genActualType(returnType);
1239             call->gtType = src->gtType;
1240
1241             // If we've changed the type, and it no longer matches a local destination,
1242             // we must use an indirection.
1243             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1244             {
1245                 dest = nullptr;
1246             }
1247
1248             // !!! The destination could be on stack. !!!
1249             // This flag will let us choose the correct write barrier.
1250             destFlags = GTF_IND_TGTANYWHERE;
1251         }
1252     }
1253     else if (src->OperIsBlk())
1254     {
1255         asgType = impNormStructType(structHnd);
1256         if (src->gtOper == GT_OBJ)
1257         {
1258             assert(src->gtObj.gtClass == structHnd);
1259         }
1260     }
1261     else if (src->gtOper == GT_INDEX)
1262     {
1263         asgType = impNormStructType(structHnd);
1264         assert(src->gtIndex.gtStructElemClass == structHnd);
1265     }
1266     else if (src->gtOper == GT_MKREFANY)
1267     {
1268         // Since we are assigning the result of a GT_MKREFANY,
1269         // "destAddr" must point to a refany.
1270
1271         GenTree* destAddrClone;
1272         destAddr =
1273             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1274
1275         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1281         GenTree* typeSlot =
1282             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1283
1284         // append the assign of the pointer value
1285         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1286         if (pAfterStmt)
1287         {
1288             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1289         }
1290         else
1291         {
1292             impAppendTree(asg, curLevel, impCurStmtOffs);
1293         }
1294
1295         // return the assign of the type value, to be appended
1296         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1297     }
1298     else if (src->gtOper == GT_COMMA)
1299     {
1300         // The second thing is the struct or its address.
1301         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1302         if (pAfterStmt)
1303         {
1304             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1305         }
1306         else
1307         {
1308             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1309         }
1310
1311         // Evaluate the second thing using recursion.
1312         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1313     }
1314     else if (src->IsLocal())
1315     {
1316         asgType = src->TypeGet();
1317     }
1318     else if (asgType == TYP_STRUCT)
1319     {
1320         asgType     = impNormStructType(structHnd);
1321         src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323         if (asgType == TYP_STRUCT)
1324         {
1325             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1327         }
1328 #endif
1329     }
1330     if (dest == nullptr)
1331     {
1332         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333         // if this is a known struct type.
1334         if (asgType == TYP_STRUCT)
1335         {
1336             dest = gtNewObjNode(structHnd, destAddr);
1337             gtSetObjGcInfo(dest->AsObj());
1338             // Although an obj as a call argument was always assumed to be a globRef
1339             // (which is itself overly conservative), that is not true of the operands
1340             // of a block assignment.
1341             dest->gtFlags &= ~GTF_GLOB_REF;
1342             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1343         }
1344         else if (varTypeIsStruct(asgType))
1345         {
1346             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1347         }
1348         else
1349         {
1350             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1351         }
1352     }
1353     else
1354     {
1355         dest->gtType = asgType;
1356     }
1357
1358     dest->gtFlags |= destFlags;
1359     destFlags = dest->gtFlags;
1360
1361     // return an assignment node, to be appended
1362     GenTree* asgNode = gtNewAssignNode(dest, src);
1363     gtBlockOpInit(asgNode, dest, src, false);
1364
1365     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1366     // of assignments.
1367     if ((destFlags & GTF_DONT_CSE) == 0)
1368     {
1369         dest->gtFlags &= ~(GTF_DONT_CSE);
1370     }
1371     return asgNode;
1372 }
1373
1374 /*****************************************************************************
1375    Given a struct value, and the class handle for that structure, return
1376    the expression for the address for that structure value.
1377
1378    willDeref - does the caller guarantee to dereference the pointer.
1379 */
1380
1381 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1382                                     CORINFO_CLASS_HANDLE structHnd,
1383                                     unsigned             curLevel,
1384                                     bool                 willDeref)
1385 {
1386     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1387
1388     var_types type = structVal->TypeGet();
1389
1390     genTreeOps oper = structVal->gtOper;
1391
1392     if (oper == GT_OBJ && willDeref)
1393     {
1394         assert(structVal->gtObj.gtClass == structHnd);
1395         return (structVal->gtObj.Addr());
1396     }
1397     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1398              structVal->OperIsSimdHWIntrinsic())
1399     {
1400         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1401
1402         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1403
1404         // The 'return value' is now the temp itself
1405
1406         type          = genActualType(lvaTable[tmpNum].TypeGet());
1407         GenTree* temp = gtNewLclvNode(tmpNum, type);
1408         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1409         return temp;
1410     }
1411     else if (oper == GT_COMMA)
1412     {
1413         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1414
1415         GenTree* oldTreeLast  = impTreeLast;
1416         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417         structVal->gtType     = TYP_BYREF;
1418
1419         if (oldTreeLast != impTreeLast)
1420         {
1421             // Some temp assignment statement was placed on the statement list
1422             // for Op2, but that would be out of order with op1, so we need to
1423             // spill op1 onto the statement list after whatever was last
1424             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426             structVal->gtOp.gtOp1 = gtNewNothingNode();
1427         }
1428
1429         return (structVal);
1430     }
1431
1432     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1433 }
1434
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 //                    and optionally determine the GC layout of the struct.
1438 //
1439 // Arguments:
1440 //    structHnd       - The class handle for the struct type of interest.
1441 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 //                      into which the gcLayout will be written.
1443 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 //                      which will be set to the number of GC fields in the struct.
1445 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 //                      type, set to the SIMD base type
1447 //
1448 // Return Value:
1449 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1452 //
1453 // Assumptions:
1454 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1456 //
1457 // Notes:
1458 //    Normalizing the type involves examining the struct type to determine if it should
1459 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1460 //    for full enregistration, e.g. TYP_SIMD16.
1461
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1463                                       BYTE*                gcLayout,
1464                                       unsigned*            pNumGCVars,
1465                                       var_types*           pSimdBaseType)
1466 {
1467     assert(structHnd != NO_CLASS_HANDLE);
1468
1469     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470     var_types   structType  = TYP_STRUCT;
1471
1472     // On coreclr the check for GC includes a "may" to account for the special
1473     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1475     // pointer.
1476     const bool mayContainGCPtrs =
1477         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1478
1479 #ifdef FEATURE_SIMD
1480     // Check to see if this is a SIMD type.
1481     if (featureSIMD && !mayContainGCPtrs)
1482     {
1483         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1484
1485         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1486         {
1487             unsigned int sizeBytes;
1488             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489             if (simdBaseType != TYP_UNKNOWN)
1490             {
1491                 assert(sizeBytes == originalSize);
1492                 structType = getSIMDTypeForSize(sizeBytes);
1493                 if (pSimdBaseType != nullptr)
1494                 {
1495                     *pSimdBaseType = simdBaseType;
1496                 }
1497                 // Also indicate that we use floating point registers.
1498                 compFloatingPointUsed = true;
1499             }
1500         }
1501     }
1502 #endif // FEATURE_SIMD
1503
1504     // Fetch GC layout info if requested
1505     if (gcLayout != nullptr)
1506     {
1507         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1508
1509         // Verify that the quick test up above via the class attributes gave a
1510         // safe view of the type's GCness.
1511         //
1512         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513         // does not report any gc fields.
1514
1515         assert(mayContainGCPtrs || (numGCVars == 0));
1516
1517         if (pNumGCVars != nullptr)
1518         {
1519             *pNumGCVars = numGCVars;
1520         }
1521     }
1522     else
1523     {
1524         // Can't safely ask for number of GC pointers without also
1525         // asking for layout.
1526         assert(pNumGCVars == nullptr);
1527     }
1528
1529     return structType;
1530 }
1531
1532 //****************************************************************************
1533 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1535 //
1536 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1537                                     CORINFO_CLASS_HANDLE structHnd,
1538                                     unsigned             curLevel,
1539                                     bool                 forceNormalization /*=false*/)
1540 {
1541     assert(forceNormalization || varTypeIsStruct(structVal));
1542     assert(structHnd != NO_CLASS_HANDLE);
1543     var_types structType = structVal->TypeGet();
1544     bool      makeTemp   = false;
1545     if (structType == TYP_STRUCT)
1546     {
1547         structType = impNormStructType(structHnd);
1548     }
1549     bool                 alreadyNormalized = false;
1550     GenTreeLclVarCommon* structLcl         = nullptr;
1551
1552     genTreeOps oper = structVal->OperGet();
1553     switch (oper)
1554     {
1555         // GT_RETURN and GT_MKREFANY don't capture the handle.
1556         case GT_RETURN:
1557             break;
1558         case GT_MKREFANY:
1559             alreadyNormalized = true;
1560             break;
1561
1562         case GT_CALL:
1563             structVal->gtCall.gtRetClsHnd = structHnd;
1564             makeTemp                      = true;
1565             break;
1566
1567         case GT_RET_EXPR:
1568             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1569             makeTemp                         = true;
1570             break;
1571
1572         case GT_ARGPLACE:
1573             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1574             break;
1575
1576         case GT_INDEX:
1577             // This will be transformed to an OBJ later.
1578             alreadyNormalized                    = true;
1579             structVal->gtIndex.gtStructElemClass = structHnd;
1580             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1581             break;
1582
1583         case GT_FIELD:
1584             // Wrap it in a GT_OBJ.
1585             structVal->gtType = structType;
1586             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1587             break;
1588
1589         case GT_LCL_VAR:
1590         case GT_LCL_FLD:
1591             structLcl = structVal->AsLclVarCommon();
1592             // Wrap it in a GT_OBJ.
1593             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1594             __fallthrough;
1595
1596         case GT_OBJ:
1597         case GT_BLK:
1598         case GT_DYN_BLK:
1599         case GT_ASG:
1600             // These should already have the appropriate type.
1601             assert(structVal->gtType == structType);
1602             alreadyNormalized = true;
1603             break;
1604
1605         case GT_IND:
1606             assert(structVal->gtType == structType);
1607             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608             alreadyNormalized = true;
1609             break;
1610
1611 #ifdef FEATURE_SIMD
1612         case GT_SIMD:
1613             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1614             break;
1615 #endif // FEATURE_SIMD
1616 #ifdef FEATURE_HW_INTRINSICS
1617         case GT_HWIntrinsic:
1618             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1619             break;
1620 #endif
1621
1622         case GT_COMMA:
1623         {
1624             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625             GenTree* blockNode = structVal->gtOp.gtOp2;
1626             assert(blockNode->gtType == structType);
1627
1628             // Is this GT_COMMA(op1, GT_COMMA())?
1629             GenTree* parent = structVal;
1630             if (blockNode->OperGet() == GT_COMMA)
1631             {
1632                 // Find the last node in the comma chain.
1633                 do
1634                 {
1635                     assert(blockNode->gtType == structType);
1636                     parent    = blockNode;
1637                     blockNode = blockNode->gtOp.gtOp2;
1638                 } while (blockNode->OperGet() == GT_COMMA);
1639             }
1640
1641             if (blockNode->OperGet() == GT_FIELD)
1642             {
1643                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1645             }
1646
1647 #ifdef FEATURE_SIMD
1648             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1649             {
1650                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651                 alreadyNormalized  = true;
1652             }
1653             else
1654 #endif
1655             {
1656                 noway_assert(blockNode->OperIsBlk());
1657
1658                 // Sink the GT_COMMA below the blockNode addr.
1659                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1660                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1661                 //
1662                 // In case of a chained GT_COMMA case, we sink the last
1663                 // GT_COMMA below the blockNode addr.
1664                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1665                 assert(blockNodeAddr->gtType == TYP_BYREF);
1666                 GenTree* commaNode    = parent;
1667                 commaNode->gtType     = TYP_BYREF;
1668                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1669                 blockNode->gtOp.gtOp1 = commaNode;
1670                 if (parent == structVal)
1671                 {
1672                     structVal = blockNode;
1673                 }
1674                 alreadyNormalized = true;
1675             }
1676         }
1677         break;
1678
1679         default:
1680             noway_assert(!"Unexpected node in impNormStructVal()");
1681             break;
1682     }
1683     structVal->gtType  = structType;
1684     GenTree* structObj = structVal;
1685
1686     if (!alreadyNormalized || forceNormalization)
1687     {
1688         if (makeTemp)
1689         {
1690             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1691
1692             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1693
1694             // The structVal is now the temp itself
1695
1696             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1697             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1698             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1699         }
1700         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1701         {
1702             // Wrap it in a GT_OBJ
1703             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1704         }
1705     }
1706
1707     if (structLcl != nullptr)
1708     {
1709         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1710         // so we don't set GTF_EXCEPT here.
1711         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1712         {
1713             structObj->gtFlags &= ~GTF_GLOB_REF;
1714         }
1715     }
1716     else
1717     {
1718         // In general a OBJ is an indirection and could raise an exception.
1719         structObj->gtFlags |= GTF_EXCEPT;
1720     }
1721     return (structObj);
1722 }
1723
1724 /******************************************************************************/
1725 // Given a type token, generate code that will evaluate to the correct
1726 // handle representation of that token (type handle, field handle, or method handle)
1727 //
1728 // For most cases, the handle is determined at compile-time, and the code
1729 // generated is simply an embedded handle.
1730 //
1731 // Run-time lookup is required if the enclosing method is shared between instantiations
1732 // and the token refers to formal type parameters whose instantiation is not known
1733 // at compile-time.
1734 //
1735 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1736                                     BOOL*                   pRuntimeLookup /* = NULL */,
1737                                     BOOL                    mustRestoreHandle /* = FALSE */,
1738                                     BOOL                    importParent /* = FALSE */)
1739 {
1740     assert(!fgGlobalMorph);
1741
1742     CORINFO_GENERICHANDLE_RESULT embedInfo;
1743     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1744
1745     if (pRuntimeLookup)
1746     {
1747         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1748     }
1749
1750     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1751     {
1752         switch (embedInfo.handleType)
1753         {
1754             case CORINFO_HANDLETYPE_CLASS:
1755                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1756                 break;
1757
1758             case CORINFO_HANDLETYPE_METHOD:
1759                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1760                 break;
1761
1762             case CORINFO_HANDLETYPE_FIELD:
1763                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1764                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1765                 break;
1766
1767             default:
1768                 break;
1769         }
1770     }
1771
1772     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1773     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1774                                       embedInfo.compileTimeHandle);
1775
1776     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1777     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1780     }
1781
1782     return result;
1783 }
1784
1785 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                    CORINFO_LOOKUP*         pLookup,
1787                                    unsigned                handleFlags,
1788                                    void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                              unsigned              handleFlags,
1828                                              void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1843 }
1844
1845 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1854     {
1855         return nullptr;
1856     }
1857
1858     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1859
1860     op1->setEntryPoint(lookup);
1861
1862     return op1;
1863 }
1864 #endif
1865
1866 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1867 {
1868     GenTree* op1 = nullptr;
1869
1870     switch (pCallInfo->kind)
1871     {
1872         case CORINFO_CALL:
1873             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1874
1875 #ifdef FEATURE_READYTORUN_COMPILER
1876             if (opts.IsReadyToRun())
1877             {
1878                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1879             }
1880             else
1881             {
1882                 op1->gtFptrVal.gtEntryPoint.addr       = nullptr;
1883                 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1884             }
1885 #endif
1886             break;
1887
1888         case CORINFO_CALL_CODE_POINTER:
1889             if (compIsForInlining())
1890             {
1891                 // Don't import runtime lookups when inlining
1892                 // Inlining has to be aborted in such a case
1893                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1894                 return nullptr;
1895             }
1896
1897             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1898             break;
1899
1900         default:
1901             noway_assert(!"unknown call kind");
1902             break;
1903     }
1904
1905     return op1;
1906 }
1907
1908 //------------------------------------------------------------------------
1909 // getRuntimeContextTree: find pointer to context for runtime lookup.
1910 //
1911 // Arguments:
1912 //    kind - lookup kind.
1913 //
1914 // Return Value:
1915 //    Return GenTree pointer to generic shared context.
1916 //
1917 // Notes:
1918 //    Reports about generic context using.
1919
1920 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1921 {
1922     GenTree* ctxTree = nullptr;
1923
1924     // Collectible types requires that for shared generic code, if we use the generic context parameter
1925     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1926     // context parameter is this that we don't need the eager reporting logic.)
1927     lvaGenericsContextUseCount++;
1928
1929     if (kind == CORINFO_LOOKUP_THISOBJ)
1930     {
1931         // this Object
1932         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1933
1934         // Vtable pointer of this object
1935         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1936         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1937         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1938     }
1939     else
1940     {
1941         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1942
1943         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1944     }
1945     return ctxTree;
1946 }
1947
1948 /*****************************************************************************/
1949 /* Import a dictionary lookup to access a handle in code shared between
1950    generic instantiations.
1951    The lookup depends on the typeContext which is only available at
1952    runtime, and not at compile-time.
1953    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1954    The cases are:
1955
1956    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1957       instantiation-specific handle, and the tokens to lookup the handle.
1958    2. pLookup->indirections != CORINFO_USEHELPER :
1959       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1960           to get the handle.
1961       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1962           If it is non-NULL, it is the handle required. Else, call a helper
1963           to lookup the handle.
1964  */
1965
1966 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1967                                           CORINFO_LOOKUP*         pLookup,
1968                                           void*                   compileTimeHandle)
1969 {
1970
1971     // This method can only be called from the importer instance of the Compiler.
1972     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1973     assert(!compIsForInlining());
1974
1975     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1976
1977     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1978     // It's available only via the run-time helper function
1979     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1980     {
1981 #ifdef FEATURE_READYTORUN_COMPILER
1982         if (opts.IsReadyToRun())
1983         {
1984             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1985                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1986         }
1987 #endif
1988         GenTree* argNode =
1989             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1990         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1991
1992         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1993     }
1994
1995     // Slot pointer
1996     GenTree* slotPtrTree = ctxTree;
1997
1998     if (pRuntimeLookup->testForNull)
1999     {
2000         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2001                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2002     }
2003
2004     GenTree* indOffTree = nullptr;
2005
2006     // Applied repeated indirections
2007     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2008     {
2009         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2010         {
2011             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2012                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2013         }
2014
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021
2022         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2023         {
2024             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2025         }
2026
2027         if (pRuntimeLookup->offsets[i] != 0)
2028         {
2029             slotPtrTree =
2030                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2031         }
2032     }
2033
2034     // No null test required
2035     if (!pRuntimeLookup->testForNull)
2036     {
2037         if (pRuntimeLookup->indirections == 0)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2044
2045         if (!pRuntimeLookup->testForFixup)
2046         {
2047             return slotPtrTree;
2048         }
2049
2050         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2051
2052         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2053         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2054
2055         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2056         // downcast the pointer to a TYP_INT on 64-bit targets
2057         slot = impImplicitIorI4Cast(slot, TYP_INT);
2058         // Use a GT_AND to check for the lowest bit and indirect if it is set
2059         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2060         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2061         relop->gtFlags |= GTF_RELOP_QMARK;
2062
2063         // slot = GT_IND(slot - 1)
2064         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2065         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2066         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2067         indir->gtFlags |= GTF_IND_NONFAULTING;
2068         indir->gtFlags |= GTF_IND_INVARIANT;
2069
2070         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2071         GenTree* asg   = gtNewAssignNode(slot, indir);
2072         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2073         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2074         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2075
2076         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2077     }
2078
2079     assert(pRuntimeLookup->indirections != 0);
2080
2081     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2082
2083     // Extract the handle
2084     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2085     handle->gtFlags |= GTF_IND_NONFAULTING;
2086
2087     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2088                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2089
2090     // Call to helper
2091     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2092
2093     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2094     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2095
2096     // Check for null and possibly call helper
2097     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2098     relop->gtFlags |= GTF_RELOP_QMARK;
2099
2100     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2101                                                        gtNewNothingNode(), // do nothing if nonnull
2102                                                        helperCall);
2103
2104     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2105
2106     unsigned tmp;
2107     if (handleCopy->IsLocal())
2108     {
2109         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2110     }
2111     else
2112     {
2113         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2114     }
2115
2116     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2117     return gtNewLclvNode(tmp, TYP_I_IMPL);
2118 }
2119
2120 /******************************************************************************
2121  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2122  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2123  *     else, grab a new temp.
2124  *  For structs (which can be pushed on the stack using obj, etc),
2125  *  special handling is needed
2126  */
2127
2128 struct RecursiveGuard
2129 {
2130 public:
2131     RecursiveGuard()
2132     {
2133         m_pAddress = nullptr;
2134     }
2135
2136     ~RecursiveGuard()
2137     {
2138         if (m_pAddress)
2139         {
2140             *m_pAddress = false;
2141         }
2142     }
2143
2144     void Init(bool* pAddress, bool bInitialize)
2145     {
2146         assert(pAddress && *pAddress == false && "Recursive guard violation");
2147         m_pAddress = pAddress;
2148
2149         if (bInitialize)
2150         {
2151             *m_pAddress = true;
2152         }
2153     }
2154
2155 protected:
2156     bool* m_pAddress;
2157 };
2158
2159 bool Compiler::impSpillStackEntry(unsigned level,
2160                                   unsigned tnum
2161 #ifdef DEBUG
2162                                   ,
2163                                   bool        bAssertOnRecursion,
2164                                   const char* reason
2165 #endif
2166                                   )
2167 {
2168
2169 #ifdef DEBUG
2170     RecursiveGuard guard;
2171     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2172 #endif
2173
2174     GenTree* tree = verCurrentState.esStack[level].val;
2175
2176     /* Allocate a temp if we haven't been asked to use a particular one */
2177
2178     if (tiVerificationNeeded)
2179     {
2180         // Ignore bad temp requests (they will happen with bad code and will be
2181         // catched when importing the destblock)
2182         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2183         {
2184             return false;
2185         }
2186     }
2187     else
2188     {
2189         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2190         {
2191             return false;
2192         }
2193     }
2194
2195     bool isNewTemp = false;
2196
2197     if (tnum == BAD_VAR_NUM)
2198     {
2199         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2200         isNewTemp = true;
2201     }
2202     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2203     {
2204         // if verification is needed and tnum's type is incompatible with
2205         // type on that stack, we grab a new temp. This is safe since
2206         // we will throw a verification exception in the dest block.
2207
2208         var_types valTyp = tree->TypeGet();
2209         var_types dstTyp = lvaTable[tnum].TypeGet();
2210
2211         // if the two types are different, we return. This will only happen with bad code and will
2212         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2213         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2214             !(
2215 #ifndef _TARGET_64BIT_
2216                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2217 #endif // !_TARGET_64BIT_
2218                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2219         {
2220             if (verNeedsVerification())
2221             {
2222                 return false;
2223             }
2224         }
2225     }
2226
2227     /* Assign the spilled entry to the temp */
2228     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2229
2230     // If temp is newly introduced and a ref type, grab what type info we can.
2231     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2232     {
2233         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2234         lvaSetClass(tnum, tree, stkHnd);
2235     }
2236
2237     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2238     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2239     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2240     verCurrentState.esStack[level].val = temp;
2241
2242     return true;
2243 }
2244
2245 /*****************************************************************************
2246  *
2247  *  Ensure that the stack has only spilled values
2248  */
2249
2250 void Compiler::impSpillStackEnsure(bool spillLeaves)
2251 {
2252     assert(!spillLeaves || opts.compDbgCode);
2253
2254     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2255     {
2256         GenTree* tree = verCurrentState.esStack[level].val;
2257
2258         if (!spillLeaves && tree->OperIsLeaf())
2259         {
2260             continue;
2261         }
2262
2263         // Temps introduced by the importer itself don't need to be spilled
2264
2265         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2266
2267         if (isTempLcl)
2268         {
2269             continue;
2270         }
2271
2272         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2273     }
2274 }
2275
2276 void Compiler::impSpillEvalStack()
2277 {
2278     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2279     {
2280         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2281     }
2282 }
2283
2284 /*****************************************************************************
2285  *
2286  *  If the stack contains any trees with side effects in them, assign those
2287  *  trees to temps and append the assignments to the statement list.
2288  *  On return the stack is guaranteed to be empty.
2289  */
2290
2291 inline void Compiler::impEvalSideEffects()
2292 {
2293     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2294     verCurrentState.esStackDepth = 0;
2295 }
2296
2297 /*****************************************************************************
2298  *
2299  *  If the stack contains any trees with side effects in them, assign those
2300  *  trees to temps and replace them on the stack with refs to their temps.
2301  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2302  */
2303
2304 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2305 {
2306     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2307
2308     /* Before we make any appends to the tree list we must spill the
2309      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2310
2311     impSpillSpecialSideEff();
2312
2313     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2314     {
2315         chkLevel = verCurrentState.esStackDepth;
2316     }
2317
2318     assert(chkLevel <= verCurrentState.esStackDepth);
2319
2320     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2321
2322     for (unsigned i = 0; i < chkLevel; i++)
2323     {
2324         GenTree* tree = verCurrentState.esStack[i].val;
2325
2326         GenTree* lclVarTree;
2327
2328         if ((tree->gtFlags & spillFlags) != 0 ||
2329             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2330              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2331              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2332                                            // lvAddrTaken flag.
2333         {
2334             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2335         }
2336     }
2337 }
2338
2339 /*****************************************************************************
2340  *
2341  *  If the stack contains any trees with special side effects in them, assign
2342  *  those trees to temps and replace them on the stack with refs to their temps.
2343  */
2344
2345 inline void Compiler::impSpillSpecialSideEff()
2346 {
2347     // Only exception objects need to be carefully handled
2348
2349     if (!compCurBB->bbCatchTyp)
2350     {
2351         return;
2352     }
2353
2354     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2355     {
2356         GenTree* tree = verCurrentState.esStack[level].val;
2357         // Make sure if we have an exception object in the sub tree we spill ourselves.
2358         if (gtHasCatchArg(tree))
2359         {
2360             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2361         }
2362     }
2363 }
2364
2365 /*****************************************************************************
2366  *
2367  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2368  */
2369
2370 void Compiler::impSpillValueClasses()
2371 {
2372     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2373     {
2374         GenTree* tree = verCurrentState.esStack[level].val;
2375
2376         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2377         {
2378             // Tree walk was aborted, which means that we found a
2379             // value class on the stack.  Need to spill that
2380             // stack entry.
2381
2382             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2383         }
2384     }
2385 }
2386
2387 /*****************************************************************************
2388  *
2389  *  Callback that checks if a tree node is TYP_STRUCT
2390  */
2391
2392 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2393 {
2394     fgWalkResult walkResult = WALK_CONTINUE;
2395
2396     if ((*pTree)->gtType == TYP_STRUCT)
2397     {
2398         // Abort the walk and indicate that we found a value class
2399
2400         walkResult = WALK_ABORT;
2401     }
2402
2403     return walkResult;
2404 }
2405
2406 /*****************************************************************************
2407  *
2408  *  If the stack contains any trees with references to local #lclNum, assign
2409  *  those trees to temps and replace their place on the stack with refs to
2410  *  their temps.
2411  */
2412
2413 void Compiler::impSpillLclRefs(ssize_t lclNum)
2414 {
2415     /* Before we make any appends to the tree list we must spill the
2416      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2417
2418     impSpillSpecialSideEff();
2419
2420     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2421     {
2422         GenTree* tree = verCurrentState.esStack[level].val;
2423
2424         /* If the tree may throw an exception, and the block has a handler,
2425            then we need to spill assignments to the local if the local is
2426            live on entry to the handler.
2427            Just spill 'em all without considering the liveness */
2428
2429         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2430
2431         /* Skip the tree if it doesn't have an affected reference,
2432            unless xcptnCaught */
2433
2434         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2435         {
2436             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2437         }
2438     }
2439 }
2440
2441 /*****************************************************************************
2442  *
2443  *  Push catch arg onto the stack.
2444  *  If there are jumps to the beginning of the handler, insert basic block
2445  *  and spill catch arg to a temp. Update the handler block if necessary.
2446  *
2447  *  Returns the basic block of the actual handler.
2448  */
2449
2450 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2451 {
2452     // Do not inject the basic block twice on reimport. This should be
2453     // hit only under JIT stress. See if the block is the one we injected.
2454     // Note that EH canonicalization can inject internal blocks here. We might
2455     // be able to re-use such a block (but we don't, right now).
2456     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2457         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2458     {
2459         GenTree* tree = hndBlk->bbTreeList;
2460
2461         if (tree != nullptr && tree->gtOper == GT_STMT)
2462         {
2463             tree = tree->gtStmt.gtStmtExpr;
2464             assert(tree != nullptr);
2465
2466             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2467                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2468             {
2469                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2470
2471                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2472
2473                 return hndBlk->bbNext;
2474             }
2475         }
2476
2477         // If we get here, it must have been some other kind of internal block. It's possible that
2478         // someone prepended something to our injected block, but that's unlikely.
2479     }
2480
2481     /* Push the exception address value on the stack */
2482     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2483
2484     /* Mark the node as having a side-effect - i.e. cannot be
2485      * moved around since it is tied to a fixed location (EAX) */
2486     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2487
2488 #if defined(JIT32_GCENCODER)
2489     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2490 #else
2491     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2492 #endif // defined(JIT32_GCENCODER)
2493
2494     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2495     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2496     {
2497         if (hndBlk->bbRefs == 1)
2498         {
2499             hndBlk->bbRefs++;
2500         }
2501
2502         /* Create extra basic block for the spill */
2503         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2504         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2505         newBlk->setBBWeight(hndBlk->bbWeight);
2506         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2507
2508         /* Account for the new link we are about to create */
2509         hndBlk->bbRefs++;
2510
2511         /* Spill into a temp */
2512         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2513         lvaTable[tempNum].lvType = TYP_REF;
2514         arg                      = gtNewTempAssign(tempNum, arg);
2515
2516         hndBlk->bbStkTempsIn = tempNum;
2517
2518         /* Report the debug info. impImportBlockCode won't treat
2519          * the actual handler as exception block and thus won't do it for us. */
2520         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2521         {
2522             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2523             arg            = gtNewStmt(arg, impCurStmtOffs);
2524         }
2525
2526         fgInsertStmtAtEnd(newBlk, arg);
2527
2528         arg = gtNewLclvNode(tempNum, TYP_REF);
2529     }
2530
2531     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2532
2533     return hndBlk;
2534 }
2535
2536 /*****************************************************************************
2537  *
2538  *  Given a tree, clone it. *pClone is set to the cloned tree.
2539  *  Returns the original tree if the cloning was easy,
2540  *   else returns the temp to which the tree had to be spilled to.
2541  *  If the tree has side-effects, it will be spilled to a temp.
2542  */
2543
2544 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2545                                 GenTree**            pClone,
2546                                 CORINFO_CLASS_HANDLE structHnd,
2547                                 unsigned             curLevel,
2548                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2549 {
2550     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2551     {
2552         GenTree* clone = gtClone(tree, true);
2553
2554         if (clone)
2555         {
2556             *pClone = clone;
2557             return tree;
2558         }
2559     }
2560
2561     /* Store the operand in a temp and return the temp */
2562
2563     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2564
2565     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2566     // return a struct type. It also may modify the struct type to a more
2567     // specialized type (e.g. a SIMD type).  So we will get the type from
2568     // the lclVar AFTER calling impAssignTempGen().
2569
2570     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2571     var_types type = genActualType(lvaTable[temp].TypeGet());
2572
2573     *pClone = gtNewLclvNode(temp, type);
2574     return gtNewLclvNode(temp, type);
2575 }
2576
2577 /*****************************************************************************
2578  * Remember the IL offset (including stack-empty info) for the trees we will
2579  * generate now.
2580  */
2581
2582 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2583 {
2584     if (compIsForInlining())
2585     {
2586         GenTree* callStmt = impInlineInfo->iciStmt;
2587         assert(callStmt->gtOper == GT_STMT);
2588         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2589     }
2590     else
2591     {
2592         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2593         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2594         impCurStmtOffs    = offs | stkBit;
2595     }
2596 }
2597
2598 /*****************************************************************************
2599  * Returns current IL offset with stack-empty and call-instruction info incorporated
2600  */
2601 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2602 {
2603     if (compIsForInlining())
2604     {
2605         return BAD_IL_OFFSET;
2606     }
2607     else
2608     {
2609         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2610         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2611         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2612         return offs | stkBit | callInstructionBit;
2613     }
2614 }
2615
2616 //------------------------------------------------------------------------
2617 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2618 //
2619 // Arguments:
2620 //    prevOpcode - last importer opcode
2621 //
2622 // Return Value:
2623 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2624 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2625 {
2626     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2627     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2628     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2629 }
2630
2631 /*****************************************************************************
2632  *
2633  *  Remember the instr offset for the statements
2634  *
2635  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2636  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2637  *  as some of the trees corresponding to code up to impCurOpcOffs might
2638  *  still be sitting on the stack.
2639  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2640  *  This should be called when an opcode finally/explicitly causes
2641  *  impAppendTree(tree) to be called (as opposed to being called because of
2642  *  a spill caused by the opcode)
2643  */
2644
2645 #ifdef DEBUG
2646
2647 void Compiler::impNoteLastILoffs()
2648 {
2649     if (impLastILoffsStmt == nullptr)
2650     {
2651         // We should have added a statement for the current basic block
2652         // Is this assert correct ?
2653
2654         assert(impTreeLast);
2655         assert(impTreeLast->gtOper == GT_STMT);
2656
2657         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2658     }
2659     else
2660     {
2661         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2662         impLastILoffsStmt                          = nullptr;
2663     }
2664 }
2665
2666 #endif // DEBUG
2667
2668 /*****************************************************************************
2669  * We don't create any GenTree (excluding spills) for a branch.
2670  * For debugging info, we need a placeholder so that we can note
2671  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2672  */
2673
2674 void Compiler::impNoteBranchOffs()
2675 {
2676     if (opts.compDbgCode)
2677     {
2678         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2679     }
2680 }
2681
2682 /*****************************************************************************
2683  * Locate the next stmt boundary for which we need to record info.
2684  * We will have to spill the stack at such boundaries if it is not
2685  * already empty.
2686  * Returns the next stmt boundary (after the start of the block)
2687  */
2688
2689 unsigned Compiler::impInitBlockLineInfo()
2690 {
2691     /* Assume the block does not correspond with any IL offset. This prevents
2692        us from reporting extra offsets. Extra mappings can cause confusing
2693        stepping, especially if the extra mapping is a jump-target, and the
2694        debugger does not ignore extra mappings, but instead rewinds to the
2695        nearest known offset */
2696
2697     impCurStmtOffsSet(BAD_IL_OFFSET);
2698
2699     if (compIsForInlining())
2700     {
2701         return ~0;
2702     }
2703
2704     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2705
2706     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2707     {
2708         impCurStmtOffsSet(blockOffs);
2709     }
2710
2711     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2712     {
2713         impCurStmtOffsSet(blockOffs);
2714     }
2715
2716     /* Always report IL offset 0 or some tests get confused.
2717        Probably a good idea anyways */
2718
2719     if (blockOffs == 0)
2720     {
2721         impCurStmtOffsSet(blockOffs);
2722     }
2723
2724     if (!info.compStmtOffsetsCount)
2725     {
2726         return ~0;
2727     }
2728
2729     /* Find the lowest explicit stmt boundary within the block */
2730
2731     /* Start looking at an entry that is based on our instr offset */
2732
2733     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2734
2735     if (index >= info.compStmtOffsetsCount)
2736     {
2737         index = info.compStmtOffsetsCount - 1;
2738     }
2739
2740     /* If we've guessed too far, back up */
2741
2742     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2743     {
2744         index--;
2745     }
2746
2747     /* If we guessed short, advance ahead */
2748
2749     while (info.compStmtOffsets[index] < blockOffs)
2750     {
2751         index++;
2752
2753         if (index == info.compStmtOffsetsCount)
2754         {
2755             return info.compStmtOffsetsCount;
2756         }
2757     }
2758
2759     assert(index < info.compStmtOffsetsCount);
2760
2761     if (info.compStmtOffsets[index] == blockOffs)
2762     {
2763         /* There is an explicit boundary for the start of this basic block.
2764            So we will start with bbCodeOffs. Else we will wait until we
2765            get to the next explicit boundary */
2766
2767         impCurStmtOffsSet(blockOffs);
2768
2769         index++;
2770     }
2771
2772     return index;
2773 }
2774
2775 /*****************************************************************************/
2776
2777 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2778 {
2779     switch (opcode)
2780     {
2781         case CEE_CALL:
2782         case CEE_CALLI:
2783         case CEE_CALLVIRT:
2784             return true;
2785
2786         default:
2787             return false;
2788     }
2789 }
2790
2791 /*****************************************************************************/
2792
2793 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2794 {
2795     switch (opcode)
2796     {
2797         case CEE_CALL:
2798         case CEE_CALLI:
2799         case CEE_CALLVIRT:
2800         case CEE_JMP:
2801         case CEE_NEWOBJ:
2802         case CEE_NEWARR:
2803             return true;
2804
2805         default:
2806             return false;
2807     }
2808 }
2809
2810 /*****************************************************************************/
2811
2812 // One might think it is worth caching these values, but results indicate
2813 // that it isn't.
2814 // In addition, caching them causes SuperPMI to be unable to completely
2815 // encapsulate an individual method context.
2816 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2817 {
2818     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2819     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2820     return refAnyClass;
2821 }
2822
2823 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2824 {
2825     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2826     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2827     return typeHandleClass;
2828 }
2829
2830 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2831 {
2832     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2833     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2834     return argIteratorClass;
2835 }
2836
2837 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2838 {
2839     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2840     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2841     return stringClass;
2842 }
2843
2844 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2845 {
2846     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2847     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2848     return objectClass;
2849 }
2850
2851 /*****************************************************************************
2852  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2853  *  set its type to TYP_BYREF when we create it. We know if it can be
2854  *  changed to TYP_I_IMPL only at the point where we use it
2855  */
2856
2857 /* static */
2858 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2859 {
2860     if (tree1->IsVarAddr())
2861     {
2862         tree1->gtType = TYP_I_IMPL;
2863     }
2864
2865     if (tree2 && tree2->IsVarAddr())
2866     {
2867         tree2->gtType = TYP_I_IMPL;
2868     }
2869 }
2870
2871 /*****************************************************************************
2872  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2873  *  to make that an explicit cast in our trees, so any implicit casts that
2874  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2875  *  turned into explicit casts here.
2876  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2877  */
2878
2879 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2880 {
2881     var_types currType   = genActualType(tree->gtType);
2882     var_types wantedType = genActualType(dstTyp);
2883
2884     if (wantedType != currType)
2885     {
2886         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2887         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2888         {
2889             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2890             {
2891                 tree->gtType = TYP_I_IMPL;
2892             }
2893         }
2894 #ifdef _TARGET_64BIT_
2895         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2896         {
2897             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2898             tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2899         }
2900         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2901         {
2902             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2903             tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2904         }
2905 #endif // _TARGET_64BIT_
2906     }
2907
2908     return tree;
2909 }
2910
2911 /*****************************************************************************
2912  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2913  *  but we want to make that an explicit cast in our trees, so any implicit casts
2914  *  that exist in the IL are turned into explicit casts here.
2915  */
2916
2917 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2918 {
2919 #ifndef LEGACY_BACKEND
2920     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2921     {
2922         tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2923     }
2924 #endif // !LEGACY_BACKEND
2925
2926     return tree;
2927 }
2928
2929 //------------------------------------------------------------------------
2930 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2931 //    with a GT_COPYBLK node.
2932 //
2933 // Arguments:
2934 //    sig - The InitializeArray signature.
2935 //
2936 // Return Value:
2937 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2938 //    nullptr otherwise.
2939 //
2940 // Notes:
2941 //    The function recognizes the following IL pattern:
2942 //      ldc <length> or a list of ldc <lower bound>/<length>
2943 //      newarr or newobj
2944 //      dup
2945 //      ldtoken <field handle>
2946 //      call InitializeArray
2947 //    The lower bounds need not be constant except when the array rank is 1.
2948 //    The function recognizes all kinds of arrays thus enabling a small runtime
2949 //    such as CoreRT to skip providing an implementation for InitializeArray.
2950
2951 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2952 {
2953     assert(sig->numArgs == 2);
2954
2955     GenTree* fieldTokenNode = impStackTop(0).val;
2956     GenTree* arrayLocalNode = impStackTop(1).val;
2957
2958     //
2959     // Verify that the field token is known and valid.  Note that It's also
2960     // possible for the token to come from reflection, in which case we cannot do
2961     // the optimization and must therefore revert to calling the helper.  You can
2962     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2963     //
2964
2965     // Check to see if the ldtoken helper call is what we see here.
2966     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2967         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2968     {
2969         return nullptr;
2970     }
2971
2972     // Strip helper call away
2973     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2974
2975     if (fieldTokenNode->gtOper == GT_IND)
2976     {
2977         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2978     }
2979
2980     // Check for constant
2981     if (fieldTokenNode->gtOper != GT_CNS_INT)
2982     {
2983         return nullptr;
2984     }
2985
2986     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2987     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2988     {
2989         return nullptr;
2990     }
2991
2992     //
2993     // We need to get the number of elements in the array and the size of each element.
2994     // We verify that the newarr statement is exactly what we expect it to be.
2995     // If it's not then we just return NULL and we don't optimize this call
2996     //
2997
2998     //
2999     // It is possible the we don't have any statements in the block yet
3000     //
3001     if (impTreeLast->gtOper != GT_STMT)
3002     {
3003         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3004         return nullptr;
3005     }
3006
3007     //
3008     // We start by looking at the last statement, making sure it's an assignment, and
3009     // that the target of the assignment is the array passed to InitializeArray.
3010     //
3011     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3012     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3013         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3014         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3015     {
3016         return nullptr;
3017     }
3018
3019     //
3020     // Make sure that the object being assigned is a helper call.
3021     //
3022
3023     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3024     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3025     {
3026         return nullptr;
3027     }
3028
3029     //
3030     // Verify that it is one of the new array helpers.
3031     //
3032
3033     bool isMDArray = false;
3034
3035     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3036         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3037         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3038         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3039 #ifdef FEATURE_READYTORUN_COMPILER
3040         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3041         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3042 #endif
3043             )
3044     {
3045         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3046         {
3047             return nullptr;
3048         }
3049
3050         isMDArray = true;
3051     }
3052
3053     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3054
3055     //
3056     // Make sure we found a compile time handle to the array
3057     //
3058
3059     if (!arrayClsHnd)
3060     {
3061         return nullptr;
3062     }
3063
3064     unsigned rank = 0;
3065     S_UINT32 numElements;
3066
3067     if (isMDArray)
3068     {
3069         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3070
3071         if (rank == 0)
3072         {
3073             return nullptr;
3074         }
3075
3076         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3077         assert(tokenArg != nullptr);
3078         GenTreeArgList* numArgsArg = tokenArg->Rest();
3079         assert(numArgsArg != nullptr);
3080         GenTreeArgList* argsArg = numArgsArg->Rest();
3081         assert(argsArg != nullptr);
3082
3083         //
3084         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3085         // so at least one length must be present and the rank can't exceed 32 so there can
3086         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3087         //
3088
3089         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3090             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3091         {
3092             return nullptr;
3093         }
3094
3095         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3096         bool     lowerBoundsSpecified;
3097
3098         if (numArgs == rank * 2)
3099         {
3100             lowerBoundsSpecified = true;
3101         }
3102         else if (numArgs == rank)
3103         {
3104             lowerBoundsSpecified = false;
3105
3106             //
3107             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3108             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3109             // we get a SDArray as well, see the for loop below.
3110             //
3111
3112             if (rank == 1)
3113             {
3114                 isMDArray = false;
3115             }
3116         }
3117         else
3118         {
3119             return nullptr;
3120         }
3121
3122         //
3123         // The rank is known to be at least 1 so we can start with numElements being 1
3124         // to avoid the need to special case the first dimension.
3125         //
3126
3127         numElements = S_UINT32(1);
3128
3129         struct Match
3130         {
3131             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3132             {
3133                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3134                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3135             }
3136
3137             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3138             {
3139                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3140                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3141                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3142             }
3143
3144             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3145             {
3146                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3147                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3148             }
3149
3150             static bool IsComma(GenTree* tree)
3151             {
3152                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3153             }
3154         };
3155
3156         unsigned argIndex = 0;
3157         GenTree* comma;
3158
3159         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3160         {
3161             if (lowerBoundsSpecified)
3162             {
3163                 //
3164                 // In general lower bounds can be ignored because they're not needed to
3165                 // calculate the total number of elements. But for single dimensional arrays
3166                 // we need to know if the lower bound is 0 because in this case the runtime
3167                 // creates a SDArray and this affects the way the array data offset is calculated.
3168                 //
3169
3170                 if (rank == 1)
3171                 {
3172                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3173                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3174                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3175
3176                     if (lowerBoundNode->IsIntegralConst(0))
3177                     {
3178                         isMDArray = false;
3179                     }
3180                 }
3181
3182                 comma = comma->gtGetOp2();
3183                 argIndex++;
3184             }
3185
3186             GenTree* lengthNodeAssign = comma->gtGetOp1();
3187             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3188             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3189
3190             if (!lengthNode->IsCnsIntOrI())
3191             {
3192                 return nullptr;
3193             }
3194
3195             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3196             argIndex++;
3197         }
3198
3199         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3200
3201         if (argIndex != numArgs)
3202         {
3203             return nullptr;
3204         }
3205     }
3206     else
3207     {
3208         //
3209         // Make sure there are exactly two arguments:  the array class and
3210         // the number of elements.
3211         //
3212
3213         GenTree* arrayLengthNode;
3214
3215         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3216 #ifdef FEATURE_READYTORUN_COMPILER
3217         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3218         {
3219             // Array length is 1st argument for readytorun helper
3220             arrayLengthNode = args->Current();
3221         }
3222         else
3223 #endif
3224         {
3225             // Array length is 2nd argument for regular helper
3226             arrayLengthNode = args->Rest()->Current();
3227         }
3228
3229         //
3230         // Make sure that the number of elements look valid.
3231         //
3232         if (arrayLengthNode->gtOper != GT_CNS_INT)
3233         {
3234             return nullptr;
3235         }
3236
3237         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3238
3239         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3240         {
3241             return nullptr;
3242         }
3243     }
3244
3245     CORINFO_CLASS_HANDLE elemClsHnd;
3246     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3247
3248     //
3249     // Note that genTypeSize will return zero for non primitive types, which is exactly
3250     // what we want (size will then be 0, and we will catch this in the conditional below).
3251     // Note that we don't expect this to fail for valid binaries, so we assert in the
3252     // non-verification case (the verification case should not assert but rather correctly
3253     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3254     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3255     // why.
3256     //
3257
3258     S_UINT32 elemSize(genTypeSize(elementType));
3259     S_UINT32 size = elemSize * S_UINT32(numElements);
3260
3261     if (size.IsOverflow())
3262     {
3263         return nullptr;
3264     }
3265
3266     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3267     {
3268         assert(verNeedsVerification());
3269         return nullptr;
3270     }
3271
3272     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3273     if (!initData)
3274     {
3275         return nullptr;
3276     }
3277
3278     //
3279     // At this point we are ready to commit to implementing the InitializeArray
3280     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3281     // return the struct assignment node.
3282     //
3283
3284     impPopStack();
3285     impPopStack();
3286
3287     const unsigned blkSize = size.Value();
3288     unsigned       dataOffset;
3289
3290     if (isMDArray)
3291     {
3292         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3293     }
3294     else
3295     {
3296         dataOffset = eeGetArrayDataOffset(elementType);
3297     }
3298
3299     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3300     GenTree* blk = gtNewBlockVal(dst, blkSize);
3301     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3302
3303     return gtNewBlkOpNode(blk,     // dst
3304                           src,     // src
3305                           blkSize, // size
3306                           false,   // volatil
3307                           true);   // copyBlock
3308 }
3309
3310 //------------------------------------------------------------------------
3311 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3312 //
3313 // Arguments:
3314 //    newobjThis - for constructor calls, the tree for the newly allocated object
3315 //    clsHnd - handle for the intrinsic method's class
3316 //    method - handle for the intrinsic method
3317 //    sig    - signature of the intrinsic method
3318 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3319 //    memberRef - the token for the intrinsic method
3320 //    readonlyCall - true if call has a readonly prefix
3321 //    tailCall - true if call is in tail position
3322 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3323 //       if call is not constrained
3324 //    constraintCallThisTransform -- this transform to apply for a constrained call
3325 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3326 //       for "traditional" jit intrinsics
3327 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3328 //       that is amenable to special downstream optimization opportunities
3329 //
3330 // Returns:
3331 //    IR tree to use in place of the call, or nullptr if the jit should treat
3332 //    the intrinsic call like a normal call.
3333 //
3334 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3335 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3336 //
3337 //    isSpecial set true if the expansion is subject to special
3338 //    optimizations later in the jit processing
3339 //
3340 // Notes:
3341 //    On success the IR tree may be a call to a different method or an inline
3342 //    sequence. If it is a call, then the intrinsic processing here is responsible
3343 //    for handling all the special cases, as upon return to impImportCall
3344 //    expanded intrinsics bypass most of the normal call processing.
3345 //
3346 //    Intrinsics are generally not recognized in minopts and debug codegen.
3347 //
3348 //    However, certain traditional intrinsics are identifed as "must expand"
3349 //    if there is no fallback implmentation to invoke; these must be handled
3350 //    in all codegen modes.
3351 //
3352 //    New style intrinsics (where the fallback implementation is in IL) are
3353 //    identified as "must expand" if they are invoked from within their
3354 //    own method bodies.
3355 //
3356
3357 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3358                                 CORINFO_CLASS_HANDLE    clsHnd,
3359                                 CORINFO_METHOD_HANDLE   method,
3360                                 CORINFO_SIG_INFO*       sig,
3361                                 unsigned                methodFlags,
3362                                 int                     memberRef,
3363                                 bool                    readonlyCall,
3364                                 bool                    tailCall,
3365                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3366                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3367                                 CorInfoIntrinsics*      pIntrinsicID,
3368                                 bool*                   isSpecialIntrinsic)
3369 {
3370     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3371
3372     bool              mustExpand  = false;
3373     bool              isSpecial   = false;
3374     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3375     NamedIntrinsic    ni          = NI_Illegal;
3376
3377     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3378     {
3379         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3380     }
3381
3382     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3383     {
3384         // The recursive calls to Jit intrinsics are must-expand by convention.
3385         mustExpand = mustExpand || gtIsRecursiveCall(method);
3386
3387         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3388         {
3389             ni = lookupNamedIntrinsic(method);
3390
3391 #ifdef FEATURE_HW_INTRINSICS
3392             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3393             {
3394                 return impHWIntrinsic(ni, method, sig, mustExpand);
3395             }
3396 #endif // FEATURE_HW_INTRINSICS
3397         }
3398     }
3399
3400     *pIntrinsicID = intrinsicID;
3401
3402 #ifndef _TARGET_ARM_
3403     genTreeOps interlockedOperator;
3404 #endif
3405
3406     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3407     {
3408         // must be done regardless of DbgCode and MinOpts
3409         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3410     }
3411 #ifdef _TARGET_64BIT_
3412     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3413     {
3414         // must be done regardless of DbgCode and MinOpts
3415         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3416     }
3417 #else
3418     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3419 #endif
3420
3421     GenTree* retNode = nullptr;
3422
3423     // Under debug and minopts, only expand what is required.
3424     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3425     {
3426         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3427         return retNode;
3428     }
3429
3430     var_types callType = JITtype2varType(sig->retType);
3431
3432     /* First do the intrinsics which are always smaller than a call */
3433
3434     switch (intrinsicID)
3435     {
3436         GenTree* op1;
3437         GenTree* op2;
3438
3439         case CORINFO_INTRINSIC_Sin:
3440         case CORINFO_INTRINSIC_Cbrt:
3441         case CORINFO_INTRINSIC_Sqrt:
3442         case CORINFO_INTRINSIC_Abs:
3443         case CORINFO_INTRINSIC_Cos:
3444         case CORINFO_INTRINSIC_Round:
3445         case CORINFO_INTRINSIC_Cosh:
3446         case CORINFO_INTRINSIC_Sinh:
3447         case CORINFO_INTRINSIC_Tan:
3448         case CORINFO_INTRINSIC_Tanh:
3449         case CORINFO_INTRINSIC_Asin:
3450         case CORINFO_INTRINSIC_Asinh:
3451         case CORINFO_INTRINSIC_Acos:
3452         case CORINFO_INTRINSIC_Acosh:
3453         case CORINFO_INTRINSIC_Atan:
3454         case CORINFO_INTRINSIC_Atan2:
3455         case CORINFO_INTRINSIC_Atanh:
3456         case CORINFO_INTRINSIC_Log10:
3457         case CORINFO_INTRINSIC_Pow:
3458         case CORINFO_INTRINSIC_Exp:
3459         case CORINFO_INTRINSIC_Ceiling:
3460         case CORINFO_INTRINSIC_Floor:
3461             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3462             break;
3463
3464 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3465         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3466         case CORINFO_INTRINSIC_InterlockedAdd32:
3467             interlockedOperator = GT_LOCKADD;
3468             goto InterlockedBinOpCommon;
3469         case CORINFO_INTRINSIC_InterlockedXAdd32:
3470             interlockedOperator = GT_XADD;
3471             goto InterlockedBinOpCommon;
3472         case CORINFO_INTRINSIC_InterlockedXchg32:
3473             interlockedOperator = GT_XCHG;
3474             goto InterlockedBinOpCommon;
3475
3476 #ifdef _TARGET_64BIT_
3477         case CORINFO_INTRINSIC_InterlockedAdd64:
3478             interlockedOperator = GT_LOCKADD;
3479             goto InterlockedBinOpCommon;
3480         case CORINFO_INTRINSIC_InterlockedXAdd64:
3481             interlockedOperator = GT_XADD;
3482             goto InterlockedBinOpCommon;
3483         case CORINFO_INTRINSIC_InterlockedXchg64:
3484             interlockedOperator = GT_XCHG;
3485             goto InterlockedBinOpCommon;
3486 #endif // _TARGET_AMD64_
3487
3488         InterlockedBinOpCommon:
3489             assert(callType != TYP_STRUCT);
3490             assert(sig->numArgs == 2);
3491
3492             op2 = impPopStack().val;
3493             op1 = impPopStack().val;
3494
3495             // This creates:
3496             //   val
3497             // XAdd
3498             //   addr
3499             //     field (for example)
3500             //
3501             // In the case where the first argument is the address of a local, we might
3502             // want to make this *not* make the var address-taken -- but atomic instructions
3503             // on a local are probably pretty useless anyway, so we probably don't care.
3504
3505             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3506             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3507             retNode = op1;
3508             break;
3509 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3510
3511         case CORINFO_INTRINSIC_MemoryBarrier:
3512
3513             assert(sig->numArgs == 0);
3514
3515             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3516             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3517             retNode = op1;
3518             break;
3519
3520 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3521         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3522         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3523 #ifdef _TARGET_64BIT_
3524         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3525 #endif
3526         {
3527             assert(callType != TYP_STRUCT);
3528             assert(sig->numArgs == 3);
3529             GenTree* op3;
3530
3531             op3 = impPopStack().val; // comparand
3532             op2 = impPopStack().val; // value
3533             op1 = impPopStack().val; // location
3534
3535             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3536
3537             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3538             retNode = node;
3539             break;
3540         }
3541 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3542
3543         case CORINFO_INTRINSIC_StringLength:
3544             op1 = impPopStack().val;
3545             if (!opts.MinOpts() && !opts.compDbgCode)
3546             {
3547                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3548                 op1                   = arrLen;
3549             }
3550             else
3551             {
3552                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3553                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3554                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3555                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3556             }
3557
3558             // Getting the length of a null string should throw
3559             op1->gtFlags |= GTF_EXCEPT;
3560
3561             retNode = op1;
3562             break;
3563
3564         case CORINFO_INTRINSIC_StringGetChar:
3565             op2 = impPopStack().val;
3566             op1 = impPopStack().val;
3567             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3568             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3569             retNode = op1;
3570             break;
3571
3572         case CORINFO_INTRINSIC_InitializeArray:
3573             retNode = impInitializeArrayIntrinsic(sig);
3574             break;
3575
3576         case CORINFO_INTRINSIC_Array_Address:
3577         case CORINFO_INTRINSIC_Array_Get:
3578         case CORINFO_INTRINSIC_Array_Set:
3579             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3580             break;
3581
3582         case CORINFO_INTRINSIC_GetTypeFromHandle:
3583             op1 = impStackTop(0).val;
3584             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3585                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3586             {
3587                 op1 = impPopStack().val;
3588                 // Change call to return RuntimeType directly.
3589                 op1->gtType = TYP_REF;
3590                 retNode     = op1;
3591             }
3592             // Call the regular function.
3593             break;
3594
3595         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3596             op1 = impStackTop(0).val;
3597             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3598                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3599             {
3600                 // Old tree
3601                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3602                 //
3603                 // New tree
3604                 // TreeToGetNativeTypeHandle
3605
3606                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3607                 // to that helper.
3608
3609                 op1 = impPopStack().val;
3610
3611                 // Get native TypeHandle argument to old helper
3612                 op1 = op1->gtCall.gtCallArgs;
3613                 assert(op1->OperIsList());
3614                 assert(op1->gtOp.gtOp2 == nullptr);
3615                 op1     = op1->gtOp.gtOp1;
3616                 retNode = op1;
3617             }
3618             // Call the regular function.
3619             break;
3620
3621 #ifndef LEGACY_BACKEND
3622         case CORINFO_INTRINSIC_Object_GetType:
3623         {
3624             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3625             op1 = impStackTop(0).val;
3626
3627             // If we're calling GetType on a boxed value, just get the type directly.
3628             if (op1->IsBoxedValue())
3629             {
3630                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3631
3632                 // Try and clean up the box. Obtain the handle we
3633                 // were going to pass to the newobj.
3634                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3635
3636                 if (boxTypeHandle != nullptr)
3637                 {
3638                     // Note we don't need to play the TYP_STRUCT games here like
3639                     // do for LDTOKEN since the return value of this operator is Type,
3640                     // not RuntimeTypeHandle.
3641                     impPopStack();
3642                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3643                     GenTree*        runtimeType =
3644                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3645                     retNode = runtimeType;
3646                 }
3647             }
3648
3649             // If we have a constrained callvirt with a "box this" transform
3650             // we know we have a value class and hence an exact type.
3651             //
3652             // If so, instead of boxing and then extracting the type, just
3653             // construct the type directly.
3654             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3655                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3656             {
3657                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3658                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3659                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3660
3661                 if (isSafeToOptimize)
3662                 {
3663                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3664                     impPopStack();
3665                     GenTree* typeHandleOp =
3666                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3667                     if (typeHandleOp == nullptr)
3668                     {
3669                         assert(compDonotInline());
3670                         return nullptr;
3671                     }
3672                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3673                     GenTree*        runtimeType =
3674                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3675                     retNode = runtimeType;
3676                 }
3677             }
3678
3679 #ifdef DEBUG
3680             if (retNode != nullptr)
3681             {
3682                 JITDUMP("Optimized result for call to GetType is\n");
3683                 if (verbose)
3684                 {
3685                     gtDispTree(retNode);
3686                 }
3687             }
3688 #endif
3689
3690             // Else expand as an intrinsic, unless the call is constrained,
3691             // in which case we defer expansion to allow impImportCall do the
3692             // special constraint processing.
3693             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3694             {
3695                 JITDUMP("Expanding as special intrinsic\n");
3696                 impPopStack();
3697                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3698
3699                 // Set the CALL flag to indicate that the operator is implemented by a call.
3700                 // Set also the EXCEPTION flag because the native implementation of
3701                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3702                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3703                 retNode = op1;
3704                 // Might be further optimizable, so arrange to leave a mark behind
3705                 isSpecial = true;
3706             }
3707
3708             if (retNode == nullptr)
3709             {
3710                 JITDUMP("Leaving as normal call\n");
3711                 // Might be further optimizable, so arrange to leave a mark behind
3712                 isSpecial = true;
3713             }
3714
3715             break;
3716         }
3717
3718 #endif
3719         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3720         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3721         // substitution.  The parameter byref will be assigned into the newly allocated object.
3722         case CORINFO_INTRINSIC_ByReference_Ctor:
3723         {
3724             // Remove call to constructor and directly assign the byref passed
3725             // to the call to the first slot of the ByReference struct.
3726             op1                                    = impPopStack().val;
3727             GenTree*             thisptr           = newobjThis;
3728             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3729             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3730             GenTree*             assign            = gtNewAssignNode(field, op1);
3731             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3732             assert(byReferenceStruct != nullptr);
3733             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3734             retNode = assign;
3735             break;
3736         }
3737         // Implement ptr value getter for ByReference struct.
3738         case CORINFO_INTRINSIC_ByReference_Value:
3739         {
3740             op1                         = impPopStack().val;
3741             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3742             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3743             retNode                     = field;
3744             break;
3745         }
3746         case CORINFO_INTRINSIC_Span_GetItem:
3747         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3748         {
3749             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3750             //
3751             // For Span<T>
3752             //   Comma
3753             //     BoundsCheck(index, s->_length)
3754             //     s->_pointer + index * sizeof(T)
3755             //
3756             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3757             //
3758             // Signature should show one class type parameter, which
3759             // we need to examine.
3760             assert(sig->sigInst.classInstCount == 1);
3761             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3762             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3763             assert(elemSize > 0);
3764
3765             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3766
3767             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3768                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3769
3770             GenTree* index          = impPopStack().val;
3771             GenTree* ptrToSpan      = impPopStack().val;
3772             GenTree* indexClone     = nullptr;
3773             GenTree* ptrToSpanClone = nullptr;
3774
3775 #if defined(DEBUG)
3776             if (verbose)
3777             {
3778                 printf("with ptr-to-span\n");
3779                 gtDispTree(ptrToSpan);
3780                 printf("and index\n");
3781                 gtDispTree(index);
3782             }
3783 #endif // defined(DEBUG)
3784
3785             // We need to use both index and ptr-to-span twice, so clone or spill.
3786             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3787                                  nullptr DEBUGARG("Span.get_Item index"));
3788             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3789                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3790
3791             // Bounds check
3792             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3793             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3794             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3795             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3796                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3797
3798             // Element access
3799             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3800             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3801             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3802             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3803             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3804             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3805             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3806
3807             // Prepare result
3808             var_types resultType = JITtype2varType(sig->retType);
3809             assert(resultType == result->TypeGet());
3810             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3811
3812             break;
3813         }
3814
3815         case CORINFO_INTRINSIC_GetRawHandle:
3816         {
3817             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3818             CORINFO_RESOLVED_TOKEN resolvedToken;
3819             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3820             resolvedToken.tokenScope   = info.compScopeHnd;
3821             resolvedToken.token        = memberRef;
3822             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3823
3824             CORINFO_GENERICHANDLE_RESULT embedInfo;
3825             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3826
3827             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3828                                                  embedInfo.compileTimeHandle);
3829             if (rawHandle == nullptr)
3830             {
3831                 return nullptr;
3832             }
3833
3834             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3835
3836             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3837             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3838
3839             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3840             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3841             var_types resultType = JITtype2varType(sig->retType);
3842             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3843
3844             break;
3845         }
3846
3847         case CORINFO_INTRINSIC_TypeEQ:
3848         case CORINFO_INTRINSIC_TypeNEQ:
3849         {
3850             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3851             op1              = impStackTop(1).val;
3852             op2              = impStackTop(0).val;
3853             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3854             if (optTree != nullptr)
3855             {
3856                 // Success, clean up the evaluation stack.
3857                 impPopStack();
3858                 impPopStack();
3859
3860                 // See if we can optimize even further, to a handle compare.
3861                 optTree = gtFoldTypeCompare(optTree);
3862
3863                 // See if we can now fold a handle compare to a constant.
3864                 optTree = gtFoldExpr(optTree);
3865
3866                 retNode = optTree;
3867             }
3868             else
3869             {
3870                 // Retry optimizing these later
3871                 isSpecial = true;
3872             }
3873             break;
3874         }
3875
3876         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3877         case CORINFO_INTRINSIC_GetManagedThreadId:
3878         {
3879             // Retry optimizing these during morph
3880             isSpecial = true;
3881             break;
3882         }
3883
3884         default:
3885             /* Unknown intrinsic */
3886             intrinsicID = CORINFO_INTRINSIC_Illegal;
3887             break;
3888     }
3889
3890     // Look for new-style jit intrinsics by name
3891     if (ni != NI_Illegal)
3892     {
3893         assert(retNode == nullptr);
3894         switch (ni)
3895         {
3896             case NI_System_Enum_HasFlag:
3897             {
3898                 GenTree* thisOp  = impStackTop(1).val;
3899                 GenTree* flagOp  = impStackTop(0).val;
3900                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3901
3902                 if (optTree != nullptr)
3903                 {
3904                     // Optimization successful. Pop the stack for real.
3905                     impPopStack();
3906                     impPopStack();
3907                     retNode = optTree;
3908                 }
3909                 else
3910                 {
3911                     // Retry optimizing this during morph.
3912                     isSpecial = true;
3913                 }
3914
3915                 break;
3916             }
3917
3918             case NI_MathF_Round:
3919             case NI_Math_Round:
3920             {
3921                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3922                 // to simplify the transition, we will just treat it as if it was still the
3923                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3924                 // everywhere else.
3925
3926                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3927                 break;
3928             }
3929
3930             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3931             {
3932                 // Flag for later handling during devirtualization.
3933                 isSpecial = true;
3934                 break;
3935             }
3936
3937             default:
3938                 break;
3939         }
3940     }
3941
3942     if (mustExpand)
3943     {
3944         if (retNode == nullptr)
3945         {
3946             NO_WAY("JIT must expand the intrinsic!");
3947         }
3948     }
3949
3950     // Optionally report if this intrinsic is special
3951     // (that is, potentially re-optimizable during morph).
3952     if (isSpecialIntrinsic != nullptr)
3953     {
3954         *isSpecialIntrinsic = isSpecial;
3955     }
3956
3957     return retNode;
3958 }
3959
3960 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3961                                     CORINFO_SIG_INFO*     sig,
3962                                     var_types             callType,
3963                                     CorInfoIntrinsics     intrinsicID,
3964                                     bool                  tailCall)
3965 {
3966     GenTree* op1;
3967     GenTree* op2;
3968
3969     assert(callType != TYP_STRUCT);
3970     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3971            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3972            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3973            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3974            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3975            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3976            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3977            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3980            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3981
3982     op1 = nullptr;
3983
3984 #if defined(LEGACY_BACKEND)
3985     if (IsTargetIntrinsic(intrinsicID))
3986 #elif !defined(_TARGET_X86_)
3987     // Intrinsics that are not implemented directly by target instructions will
3988     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3989     // don't do this optimization, because
3990     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3991     //  b) It will be non-trivial task or too late to re-materialize a surviving
3992     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3993     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3994 #else
3995     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3996     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3997     // code generation for certain EH constructs.
3998     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3999 #endif
4000     {
4001         switch (sig->numArgs)
4002         {
4003             case 1:
4004                 op1 = impPopStack().val;
4005
4006 #if FEATURE_X87_DOUBLES
4007
4008                 // X87 stack doesn't differentiate between float/double
4009                 // so it doesn't need a cast, but everybody else does
4010                 // Just double check it is at least a FP type
4011                 noway_assert(varTypeIsFloating(op1));
4012
4013 #else // FEATURE_X87_DOUBLES
4014                 assert(varTypeIsFloating(op1));
4015
4016                 if (op1->TypeGet() != callType)
4017                 {
4018                     op1 = gtNewCastNode(callType, op1, false, callType);
4019                 }
4020
4021 #endif // FEATURE_X87_DOUBLES
4022
4023                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4024                 break;
4025
4026             case 2:
4027                 op2 = impPopStack().val;
4028                 op1 = impPopStack().val;
4029
4030 #if FEATURE_X87_DOUBLES
4031
4032                 // X87 stack doesn't differentiate between float/double
4033                 // so it doesn't need a cast, but everybody else does
4034                 // Just double check it is at least a FP type
4035                 noway_assert(varTypeIsFloating(op2));
4036                 noway_assert(varTypeIsFloating(op1));
4037
4038 #else // FEATURE_X87_DOUBLES
4039                 assert(varTypeIsFloating(op1));
4040                 assert(varTypeIsFloating(op2));
4041
4042                 if (op2->TypeGet() != callType)
4043                 {
4044                     op2 = gtNewCastNode(callType, op2, false, callType);
4045                 }
4046                 if (op1->TypeGet() != callType)
4047                 {
4048                     op1 = gtNewCastNode(callType, op1, false, callType);
4049                 }
4050
4051 #endif // FEATURE_X87_DOUBLES
4052
4053                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4054                 break;
4055
4056             default:
4057                 NO_WAY("Unsupported number of args for Math Instrinsic");
4058         }
4059
4060 #ifndef LEGACY_BACKEND
4061         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4062         {
4063             op1->gtFlags |= GTF_CALL;
4064         }
4065 #endif
4066     }
4067
4068     return op1;
4069 }
4070
4071 //------------------------------------------------------------------------
4072 // lookupNamedIntrinsic: map method to jit named intrinsic value
4073 //
4074 // Arguments:
4075 //    method -- method handle for method
4076 //
4077 // Return Value:
4078 //    Id for the named intrinsic, or Illegal if none.
4079 //
4080 // Notes:
4081 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4082 //    otherwise it is not a named jit intrinsic.
4083 //
4084
4085 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4086 {
4087     NamedIntrinsic result = NI_Illegal;
4088
4089     const char* className     = nullptr;
4090     const char* namespaceName = nullptr;
4091     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4092
4093     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4094     {
4095         return result;
4096     }
4097
4098     if (strcmp(namespaceName, "System") == 0)
4099     {
4100         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4101         {
4102             result = NI_System_Enum_HasFlag;
4103         }
4104         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4105         {
4106             result = NI_MathF_Round;
4107         }
4108         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4109         {
4110             result = NI_Math_Round;
4111         }
4112     }
4113     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4114     {
4115         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4116         {
4117             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4118         }
4119     }
4120
4121 #ifdef FEATURE_HW_INTRINSICS
4122 #if defined(_TARGET_XARCH_)
4123     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4124     {
4125         InstructionSet isa = lookupHWIntrinsicISA(className);
4126         result             = lookupHWIntrinsic(methodName, isa);
4127     }
4128 #elif defined(_TARGET_ARM64_)
4129     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4130     {
4131         result = lookupHWIntrinsic(className, methodName);
4132     }
4133 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4134 #error Unsupported platform
4135 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4136 #endif // FEATURE_HW_INTRINSICS
4137     return result;
4138 }
4139
4140 /*****************************************************************************/
4141
4142 GenTree* Compiler::impArrayAccessIntrinsic(
4143     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4144 {
4145     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4146        the following, as it generates fatter code.
4147     */
4148
4149     if (compCodeOpt() == SMALL_CODE)
4150     {
4151         return nullptr;
4152     }
4153
4154     /* These intrinsics generate fatter (but faster) code and are only
4155        done if we don't need SMALL_CODE */
4156
4157     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4158
4159     // The rank 1 case is special because it has to handle two array formats
4160     // we will simply not do that case
4161     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4162     {
4163         return nullptr;
4164     }
4165
4166     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4167     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4168
4169     // For the ref case, we will only be able to inline if the types match
4170     // (verifier checks for this, we don't care for the nonverified case and the
4171     // type is final (so we don't need to do the cast)
4172     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4173     {
4174         // Get the call site signature
4175         CORINFO_SIG_INFO LocalSig;
4176         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4177         assert(LocalSig.hasThis());
4178
4179         CORINFO_CLASS_HANDLE actualElemClsHnd;
4180
4181         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4182         {
4183             // Fetch the last argument, the one that indicates the type we are setting.
4184             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4185             for (unsigned r = 0; r < rank; r++)
4186             {
4187                 argType = info.compCompHnd->getArgNext(argType);
4188             }
4189
4190             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4191             actualElemClsHnd = argInfo.GetClassHandle();
4192         }
4193         else
4194         {
4195             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4196
4197             // Fetch the return type
4198             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4199             assert(retInfo.IsByRef());
4200             actualElemClsHnd = retInfo.GetClassHandle();
4201         }
4202
4203         // if it's not final, we can't do the optimization
4204         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4205         {
4206             return nullptr;
4207         }
4208     }
4209
4210     unsigned arrayElemSize;
4211     if (elemType == TYP_STRUCT)
4212     {
4213         assert(arrElemClsHnd);
4214
4215         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4216     }
4217     else
4218     {
4219         arrayElemSize = genTypeSize(elemType);
4220     }
4221
4222     if ((unsigned char)arrayElemSize != arrayElemSize)
4223     {
4224         // arrayElemSize would be truncated as an unsigned char.
4225         // This means the array element is too large. Don't do the optimization.
4226         return nullptr;
4227     }
4228
4229     GenTree* val = nullptr;
4230
4231     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4232     {
4233         // Assignment of a struct is more work, and there are more gets than sets.
4234         if (elemType == TYP_STRUCT)
4235         {
4236             return nullptr;
4237         }
4238
4239         val = impPopStack().val;
4240         assert(genActualType(elemType) == genActualType(val->gtType) ||
4241                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4242                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4243                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4244     }
4245
4246     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4247
4248     GenTree* inds[GT_ARR_MAX_RANK];
4249     for (unsigned k = rank; k > 0; k--)
4250     {
4251         inds[k - 1] = impPopStack().val;
4252     }
4253
4254     GenTree* arr = impPopStack().val;
4255     assert(arr->gtType == TYP_REF);
4256
4257     GenTree* arrElem =
4258         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4259                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4260
4261     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4262     {
4263         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4264     }
4265
4266     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4267     {
4268         assert(val != nullptr);
4269         return gtNewAssignNode(arrElem, val);
4270     }
4271     else
4272     {
4273         return arrElem;
4274     }
4275 }
4276
4277 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4278 {
4279     unsigned i;
4280
4281     // do some basic checks first
4282     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4283     {
4284         return FALSE;
4285     }
4286
4287     if (verCurrentState.esStackDepth > 0)
4288     {
4289         // merge stack types
4290         StackEntry* parentStack = block->bbStackOnEntry();
4291         StackEntry* childStack  = verCurrentState.esStack;
4292
4293         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4294         {
4295             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4296             {
4297                 return FALSE;
4298             }
4299         }
4300     }
4301
4302     // merge initialization status of this ptr
4303
4304     if (verTrackObjCtorInitState)
4305     {
4306         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4307         assert(verCurrentState.thisInitialized != TIS_Bottom);
4308
4309         // If the successor block's thisInit state is unknown, copy it from the current state.
4310         if (block->bbThisOnEntry() == TIS_Bottom)
4311         {
4312             *changed = true;
4313             verSetThisInit(block, verCurrentState.thisInitialized);
4314         }
4315         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4316         {
4317             if (block->bbThisOnEntry() != TIS_Top)
4318             {
4319                 *changed = true;
4320                 verSetThisInit(block, TIS_Top);
4321
4322                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4323                 {
4324                     // The block is bad. Control can flow through the block to any handler that catches the
4325                     // verification exception, but the importer ignores bad blocks and therefore won't model
4326                     // this flow in the normal way. To complete the merge into the bad block, the new state
4327                     // needs to be manually pushed to the handlers that may be reached after the verification
4328                     // exception occurs.
4329                     //
4330                     // Usually, the new state was already propagated to the relevant handlers while processing
4331                     // the predecessors of the bad block. The exception is when the bad block is at the start
4332                     // of a try region, meaning it is protected by additional handlers that do not protect its
4333                     // predecessors.
4334                     //
4335                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4336                     {
4337                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4338                         // recursive calls back into this code path (if successors of the current bad block are
4339                         // also bad blocks).
4340                         //
4341                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4342                         verCurrentState.thisInitialized = TIS_Top;
4343                         impVerifyEHBlock(block, true);
4344                         verCurrentState.thisInitialized = origTIS;
4345                     }
4346                 }
4347             }
4348         }
4349     }
4350     else
4351     {
4352         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4353     }
4354
4355     return TRUE;
4356 }
4357
4358 /*****************************************************************************
4359  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4360  *   already logged it (presumably in a more detailed fashion than done here)
4361  * 'bVerificationException' is true for a verification exception, false for a
4362  *   "call unauthorized by host" exception.
4363  */
4364
4365 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4366 {
4367     block->bbJumpKind = BBJ_THROW;
4368     block->bbFlags |= BBF_FAILED_VERIFICATION;
4369
4370     impCurStmtOffsSet(block->bbCodeOffs);
4371
4372 #ifdef DEBUG
4373     // we need this since BeginTreeList asserts otherwise
4374     impTreeList = impTreeLast = nullptr;
4375     block->bbFlags &= ~BBF_IMPORTED;
4376
4377     if (logMsg)
4378     {
4379         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4380                 block->bbCodeOffs, block->bbCodeOffsEnd));
4381         if (verbose)
4382         {
4383             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4384         }
4385     }
4386
4387     if (JitConfig.DebugBreakOnVerificationFailure())
4388     {
4389         DebugBreak();
4390     }
4391 #endif
4392
4393     impBeginTreeList();
4394
4395     // if the stack is non-empty evaluate all the side-effects
4396     if (verCurrentState.esStackDepth > 0)
4397     {
4398         impEvalSideEffects();
4399     }
4400     assert(verCurrentState.esStackDepth == 0);
4401
4402     GenTree* op1 =
4403         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4404     // verCurrentState.esStackDepth = 0;
4405     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4406
4407     // The inliner is not able to handle methods that require throw block, so
4408     // make sure this methods never gets inlined.
4409     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4410 }
4411
4412 /*****************************************************************************
4413  *
4414  */
4415 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4416
4417 {
4418     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4419     // slightly different mechanism in which it calls the JIT to perform IL verification:
4420     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4421     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4422     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4423     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4424     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4425     // to fail upon runtime of the jitted method.
4426     //
4427     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4428     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4429     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4430     // we detect these two conditions, instead of generating a throw statement inside the offending
4431     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4432     // to return false and make RyuJIT behave the same way JIT64 does.
4433     //
4434     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4435     // RyuJIT for the time being until we completely replace JIT64.
4436     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4437
4438     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4439     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4440     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4441     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4442     // be turned off during importation).
4443     CLANG_FORMAT_COMMENT_ANCHOR;
4444
4445 #ifdef _TARGET_64BIT_
4446
4447 #ifdef DEBUG
4448     bool canSkipVerificationResult =
4449         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4450     assert(tiVerificationNeeded || canSkipVerificationResult);
4451 #endif // DEBUG
4452
4453     // Add the non verifiable flag to the compiler
4454     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4455     {
4456         tiIsVerifiableCode = FALSE;
4457     }
4458 #endif //_TARGET_64BIT_
4459     verResetCurrentState(block, &verCurrentState);
4460     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4461
4462 #ifdef DEBUG
4463     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4464 #endif                   // DEBUG
4465 }
4466
4467 /******************************************************************************/
4468 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4469 {
4470     assert(ciType < CORINFO_TYPE_COUNT);
4471
4472     typeInfo tiResult;
4473     switch (ciType)
4474     {
4475         case CORINFO_TYPE_STRING:
4476         case CORINFO_TYPE_CLASS:
4477             tiResult = verMakeTypeInfo(clsHnd);
4478             if (!tiResult.IsType(TI_REF))
4479             { // type must be consistent with element type
4480                 return typeInfo();
4481             }
4482             break;
4483
4484 #ifdef _TARGET_64BIT_
4485         case CORINFO_TYPE_NATIVEINT:
4486         case CORINFO_TYPE_NATIVEUINT:
4487             if (clsHnd)
4488             {
4489                 // If we have more precise information, use it
4490                 return verMakeTypeInfo(clsHnd);
4491             }
4492             else
4493             {
4494                 return typeInfo::nativeInt();
4495             }
4496             break;
4497 #endif // _TARGET_64BIT_
4498
4499         case CORINFO_TYPE_VALUECLASS:
4500         case CORINFO_TYPE_REFANY:
4501             tiResult = verMakeTypeInfo(clsHnd);
4502             // type must be constant with element type;
4503             if (!tiResult.IsValueClass())
4504             {
4505                 return typeInfo();
4506             }
4507             break;
4508         case CORINFO_TYPE_VAR:
4509             return verMakeTypeInfo(clsHnd);
4510
4511         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4512         case CORINFO_TYPE_VOID:
4513             return typeInfo();
4514             break;
4515
4516         case CORINFO_TYPE_BYREF:
4517         {
4518             CORINFO_CLASS_HANDLE childClassHandle;
4519             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4520             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4521         }
4522         break;
4523
4524         default:
4525             if (clsHnd)
4526             { // If we have more precise information, use it
4527                 return typeInfo(TI_STRUCT, clsHnd);
4528             }
4529             else
4530             {
4531                 return typeInfo(JITtype2tiType(ciType));
4532             }
4533     }
4534     return tiResult;
4535 }
4536
4537 /******************************************************************************/
4538
4539 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4540 {
4541     if (clsHnd == nullptr)
4542     {
4543         return typeInfo();
4544     }
4545
4546     // Byrefs should only occur in method and local signatures, which are accessed
4547     // using ICorClassInfo and ICorClassInfo.getChildType.
4548     // So findClass() and getClassAttribs() should not be called for byrefs
4549
4550     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4551     {
4552         assert(!"Did findClass() return a Byref?");
4553         return typeInfo();
4554     }
4555
4556     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4557
4558     if (attribs & CORINFO_FLG_VALUECLASS)
4559     {
4560         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4561
4562         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4563         // not occur here, so we may want to change this to an assert instead.
4564         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4565         {
4566             return typeInfo();
4567         }
4568
4569 #ifdef _TARGET_64BIT_
4570         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4571         {
4572             return typeInfo::nativeInt();
4573         }
4574 #endif // _TARGET_64BIT_
4575
4576         if (t != CORINFO_TYPE_UNDEF)
4577         {
4578             return (typeInfo(JITtype2tiType(t)));
4579         }
4580         else if (bashStructToRef)
4581         {
4582             return (typeInfo(TI_REF, clsHnd));
4583         }
4584         else
4585         {
4586             return (typeInfo(TI_STRUCT, clsHnd));
4587         }
4588     }
4589     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4590     {
4591         // See comment in _typeInfo.h for why we do it this way.
4592         return (typeInfo(TI_REF, clsHnd, true));
4593     }
4594     else
4595     {
4596         return (typeInfo(TI_REF, clsHnd));
4597     }
4598 }
4599
4600 /******************************************************************************/
4601 BOOL Compiler::verIsSDArray(typeInfo ti)
4602 {
4603     if (ti.IsNullObjRef())
4604     { // nulls are SD arrays
4605         return TRUE;
4606     }
4607
4608     if (!ti.IsType(TI_REF))
4609     {
4610         return FALSE;
4611     }
4612
4613     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4614     {
4615         return FALSE;
4616     }
4617     return TRUE;
4618 }
4619
4620 /******************************************************************************/
4621 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4622 /* Returns an error type if anything goes wrong */
4623
4624 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4625 {
4626     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4627
4628     if (!verIsSDArray(arrayObjectType))
4629     {
4630         return typeInfo();
4631     }
4632
4633     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4634     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4635
4636     return verMakeTypeInfo(ciType, childClassHandle);
4637 }
4638
4639 /*****************************************************************************
4640  */
4641 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4642 {
4643     CORINFO_CLASS_HANDLE classHandle;
4644     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4645
4646     var_types type = JITtype2varType(ciType);
4647     if (varTypeIsGC(type))
4648     {
4649         // For efficiency, getArgType only returns something in classHandle for
4650         // value types.  For other types that have addition type info, you
4651         // have to call back explicitly
4652         classHandle = info.compCompHnd->getArgClass(sig, args);
4653         if (!classHandle)
4654         {
4655             NO_WAY("Could not figure out Class specified in argument or local signature");
4656         }
4657     }
4658
4659     return verMakeTypeInfo(ciType, classHandle);
4660 }
4661
4662 /*****************************************************************************/
4663
4664 // This does the expensive check to figure out whether the method
4665 // needs to be verified. It is called only when we fail verification,
4666 // just before throwing the verification exception.
4667
4668 BOOL Compiler::verNeedsVerification()
4669 {
4670     // If we have previously determined that verification is NOT needed
4671     // (for example in Compiler::compCompile), that means verification is really not needed.
4672     // Return the same decision we made before.
4673     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4674
4675     if (!tiVerificationNeeded)
4676     {
4677         return tiVerificationNeeded;
4678     }
4679
4680     assert(tiVerificationNeeded);
4681
4682     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4683     // obtain the answer.
4684     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4685         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4686
4687     // canSkipVerification will return one of the following three values:
4688     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4689     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4690     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4691     //     but need to insert a callout to the VM to ask during runtime
4692     //     whether to skip verification or not.
4693
4694     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4695     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4696     {
4697         tiRuntimeCalloutNeeded = true;
4698     }
4699
4700     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4701     {
4702         // Dev10 706080 - Testers don't like the assert, so just silence it
4703         // by not using the macros that invoke debugAssert.
4704         badCode();
4705     }
4706
4707     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4708     // The following line means we will NOT do jit time verification if canSkipVerification
4709     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4710     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4711     return tiVerificationNeeded;
4712 }
4713
4714 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4715 {
4716     if (ti.IsByRef())
4717     {
4718         return TRUE;
4719     }
4720     if (!ti.IsType(TI_STRUCT))
4721     {
4722         return FALSE;
4723     }
4724     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4725 }
4726
4727 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4728 {
4729     if (ti.IsPermanentHomeByRef())
4730     {
4731         return TRUE;
4732     }
4733     else
4734     {
4735         return FALSE;
4736     }
4737 }
4738
4739 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4740 {
4741     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4742             || ti.IsUnboxedGenericTypeVar() ||
4743             (ti.IsType(TI_STRUCT) &&
4744              // exclude byreflike structs
4745              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4746 }
4747
4748 // Is it a boxed value type?
4749 bool Compiler::verIsBoxedValueType(typeInfo ti)
4750 {
4751     if (ti.GetType() == TI_REF)
4752     {
4753         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4754         return !!eeIsValueClass(clsHnd);
4755     }
4756     else
4757     {
4758         return false;
4759     }
4760 }
4761
4762 /*****************************************************************************
4763  *
4764  *  Check if a TailCall is legal.
4765  */
4766
4767 bool Compiler::verCheckTailCallConstraint(
4768     OPCODE                  opcode,
4769     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4770     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4771     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4772                                                        // return false to the caller.
4773                                                        // If false, it will throw.
4774     )
4775 {
4776     DWORD            mflags;
4777     CORINFO_SIG_INFO sig;
4778     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4779                                    // this counter is used to keep track of how many items have been
4780                                    // virtually popped
4781
4782     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4783     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4784     unsigned              methodClassFlgs = 0;
4785
4786     assert(impOpcodeIsCallOpcode(opcode));
4787
4788     if (compIsForInlining())
4789     {
4790         return false;
4791     }
4792
4793     // for calli, VerifyOrReturn that this is not a virtual method
4794     if (opcode == CEE_CALLI)
4795     {
4796         /* Get the call sig */
4797         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4798
4799         // We don't know the target method, so we have to infer the flags, or
4800         // assume the worst-case.
4801         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4802     }
4803     else
4804     {
4805         methodHnd = pResolvedToken->hMethod;
4806
4807         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4808
4809         // When verifying generic code we pair the method handle with its
4810         // owning class to get the exact method signature.
4811         methodClassHnd = pResolvedToken->hClass;
4812         assert(methodClassHnd);
4813
4814         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4815
4816         // opcode specific check
4817         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4818     }
4819
4820     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4821     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4822
4823     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4824     {
4825         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4826     }
4827
4828     // check compatibility of the arguments
4829     unsigned int argCount;
4830     argCount = sig.numArgs;
4831     CORINFO_ARG_LIST_HANDLE args;
4832     args = sig.args;
4833     while (argCount--)
4834     {
4835         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4836
4837         // check that the argument is not a byref for tailcalls
4838         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4839
4840         // For unsafe code, we might have parameters containing pointer to the stack location.
4841         // Disallow the tailcall for this kind.
4842         CORINFO_CLASS_HANDLE classHandle;
4843         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4844         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4845
4846         args = info.compCompHnd->getArgNext(args);
4847     }
4848
4849     // update popCount
4850     popCount += sig.numArgs;
4851
4852     // check for 'this' which is on non-static methods, not called via NEWOBJ
4853     if (!(mflags & CORINFO_FLG_STATIC))
4854     {
4855         // Always update the popCount.
4856         // This is crucial for the stack calculation to be correct.
4857         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4858         popCount++;
4859
4860         if (opcode == CEE_CALLI)
4861         {
4862             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4863             // on the stack.
4864             if (tiThis.IsValueClass())
4865             {
4866                 tiThis.MakeByRef();
4867             }
4868             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4869         }
4870         else
4871         {
4872             // Check type compatibility of the this argument
4873             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4874             if (tiDeclaredThis.IsValueClass())
4875             {
4876                 tiDeclaredThis.MakeByRef();
4877             }
4878
4879             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4880         }
4881     }
4882
4883     // Tail calls on constrained calls should be illegal too:
4884     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4885     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4886
4887     // Get the exact view of the signature for an array method
4888     if (sig.retType != CORINFO_TYPE_VOID)
4889     {
4890         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4891         {
4892             assert(opcode != CEE_CALLI);
4893             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4894         }
4895     }
4896
4897     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4898     typeInfo tiCallerRetType =
4899         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4900
4901     // void return type gets morphed into the error type, so we have to treat them specially here
4902     if (sig.retType == CORINFO_TYPE_VOID)
4903     {
4904         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4905                                   speculative);
4906     }
4907     else
4908     {
4909         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4910                                                    NormaliseForStack(tiCallerRetType), true),
4911                                   "tailcall return mismatch", speculative);
4912     }
4913
4914     // for tailcall, stack must be empty
4915     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4916
4917     return true; // Yes, tailcall is legal
4918 }
4919
4920 /*****************************************************************************
4921  *
4922  *  Checks the IL verification rules for the call
4923  */
4924
4925 void Compiler::verVerifyCall(OPCODE                  opcode,
4926                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4927                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4928                              bool                    tailCall,
4929                              bool                    readonlyCall,
4930                              const BYTE*             delegateCreateStart,
4931                              const BYTE*             codeAddr,
4932                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4933 {
4934     DWORD             mflags;
4935     CORINFO_SIG_INFO* sig      = nullptr;
4936     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4937                                     // this counter is used to keep track of how many items have been
4938                                     // virtually popped
4939
4940     // for calli, VerifyOrReturn that this is not a virtual method
4941     if (opcode == CEE_CALLI)
4942     {
4943         Verify(false, "Calli not verifiable");
4944         return;
4945     }
4946
4947     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4948     mflags = callInfo->verMethodFlags;
4949
4950     sig = &callInfo->verSig;
4951
4952     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4953     {
4954         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4955     }
4956
4957     // opcode specific check
4958     unsigned methodClassFlgs = callInfo->classFlags;
4959     switch (opcode)
4960     {
4961         case CEE_CALLVIRT:
4962             // cannot do callvirt on valuetypes
4963             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4964             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4965             break;
4966
4967         case CEE_NEWOBJ:
4968         {
4969             assert(!tailCall); // Importer should not allow this
4970             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4971                            "newobj must be on instance");
4972
4973             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4974             {
4975                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4976                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4977                 typeInfo tiDeclaredFtn =
4978                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4979                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4980
4981                 assert(popCount == 0);
4982                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4983                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4984
4985                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4986                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4987                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4988                                "delegate object type mismatch");
4989
4990                 CORINFO_CLASS_HANDLE objTypeHandle =
4991                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4992
4993                 // the method signature must be compatible with the delegate's invoke method
4994
4995                 // check that for virtual functions, the type of the object used to get the
4996                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4997                 // since this is a bit of work to determine in general, we pattern match stylized
4998                 // code sequences
4999
5000                 // the delegate creation code check, which used to be done later, is now done here
5001                 // so we can read delegateMethodRef directly from
5002                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5003                 // we then use it in our call to isCompatibleDelegate().
5004
5005                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5006                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5007                                "must create delegates with certain IL");
5008
5009                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5010                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5011                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5012                 delegateResolvedToken.token        = delegateMethodRef;
5013                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5014                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5015
5016                 CORINFO_CALL_INFO delegateCallInfo;
5017                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5018                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5019
5020                 BOOL isOpenDelegate = FALSE;
5021                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5022                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5023                                                                       &isOpenDelegate),
5024                                "function incompatible with delegate");
5025
5026                 // check the constraints on the target method
5027                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5028                                "delegate target has unsatisfied class constraints");
5029                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5030                                                                             tiActualFtn.GetMethod()),
5031                                "delegate target has unsatisfied method constraints");
5032
5033                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5034                 // for additional verification rules for delegates
5035                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5036                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5037                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5038                 {
5039
5040                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5041 #ifdef DEBUG
5042                         && StrictCheckForNonVirtualCallToVirtualMethod()
5043 #endif
5044                             )
5045                     {
5046                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5047                         {
5048                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5049                                                verIsBoxedValueType(tiActualObj),
5050                                            "The 'this' parameter to the call must be either the calling method's "
5051                                            "'this' parameter or "
5052                                            "a boxed value type.");
5053                         }
5054                     }
5055                 }
5056
5057                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5058                 {
5059                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5060
5061                     Verify(targetIsStatic || !isOpenDelegate,
5062                            "Unverifiable creation of an open instance delegate for a protected member.");
5063
5064                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5065                                                                 ? info.compClassHnd
5066                                                                 : tiActualObj.GetClassHandleForObjRef();
5067
5068                     // In the case of protected methods, it is a requirement that the 'this'
5069                     // pointer be a subclass of the current context.  Perform this check.
5070                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5071                            "Accessing protected method through wrong type.");
5072                 }
5073                 goto DONE_ARGS;
5074             }
5075         }
5076         // fall thru to default checks
5077         default:
5078             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5079     }
5080     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5081                    "can only newobj a delegate constructor");
5082
5083     // check compatibility of the arguments
5084     unsigned int argCount;
5085     argCount = sig->numArgs;
5086     CORINFO_ARG_LIST_HANDLE args;
5087     args = sig->args;
5088     while (argCount--)
5089     {
5090         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5091
5092         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5093         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5094
5095         args = info.compCompHnd->getArgNext(args);
5096     }
5097
5098 DONE_ARGS:
5099
5100     // update popCount
5101     popCount += sig->numArgs;
5102
5103     // check for 'this' which are is non-static methods, not called via NEWOBJ
5104     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5105     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5106     {
5107         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5108         popCount++;
5109
5110         // If it is null, we assume we can access it (since it will AV shortly)
5111         // If it is anything but a reference class, there is no hierarchy, so
5112         // again, we don't need the precise instance class to compute 'protected' access
5113         if (tiThis.IsType(TI_REF))
5114         {
5115             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5116         }
5117
5118         // Check type compatibility of the this argument
5119         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5120         if (tiDeclaredThis.IsValueClass())
5121         {
5122             tiDeclaredThis.MakeByRef();
5123         }
5124
5125         // If this is a call to the base class .ctor, set thisPtr Init for
5126         // this block.
5127         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5128         {
5129             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5130                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5131             {
5132                 assert(verCurrentState.thisInitialized !=
5133                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5134                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5135                                "Call to base class constructor when 'this' is possibly initialized");
5136                 // Otherwise, 'this' is now initialized.
5137                 verCurrentState.thisInitialized = TIS_Init;
5138                 tiThis.SetInitialisedObjRef();
5139             }
5140             else
5141             {
5142                 // We allow direct calls to value type constructors
5143                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5144                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5145                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5146                                "Bad call to a constructor");
5147             }
5148         }
5149
5150         if (pConstrainedResolvedToken != nullptr)
5151         {
5152             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5153
5154             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5155
5156             // We just dereference this and test for equality
5157             tiThis.DereferenceByRef();
5158             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5159                            "this type mismatch with constrained type operand");
5160
5161             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5162             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5163         }
5164
5165         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5166         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5167         {
5168             tiDeclaredThis.SetIsReadonlyByRef();
5169         }
5170
5171         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5172
5173         if (tiThis.IsByRef())
5174         {
5175             // Find the actual type where the method exists (as opposed to what is declared
5176             // in the metadata). This is to prevent passing a byref as the "this" argument
5177             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5178
5179             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5180             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5181                            "Call to base type of valuetype (which is never a valuetype)");
5182         }
5183
5184         // Rules for non-virtual call to a non-final virtual method:
5185
5186         // Define:
5187         // The "this" pointer is considered to be "possibly written" if
5188         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5189         //   (or)
5190         //   2. It has been stored to (STARG.0) anywhere in the method.
5191
5192         // A non-virtual call to a non-final virtual method is only allowed if
5193         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5194         //   (or)
5195         //   2. The this pointer passed to the callee is the current method's this pointer.
5196         //      (and) The current method's this pointer is not "possibly written".
5197
5198         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5199         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5200         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5201         // hard and more error prone.
5202
5203         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5204 #ifdef DEBUG
5205             && StrictCheckForNonVirtualCallToVirtualMethod()
5206 #endif
5207                 )
5208         {
5209             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5210             {
5211                 VerifyOrReturn(
5212                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5213                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5214                     "a boxed value type.");
5215             }
5216         }
5217     }
5218
5219     // check any constraints on the callee's class and type parameters
5220     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5221                    "method has unsatisfied class constraints");
5222     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5223                    "method has unsatisfied method constraints");
5224
5225     if (mflags & CORINFO_FLG_PROTECTED)
5226     {
5227         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5228                        "Can't access protected method");
5229     }
5230
5231     // Get the exact view of the signature for an array method
5232     if (sig->retType != CORINFO_TYPE_VOID)
5233     {
5234         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5235     }
5236
5237     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5238     // The methods supported by array types are under the control of the EE
5239     // so we can trust that only the Address operation returns a byref.
5240     if (readonlyCall)
5241     {
5242         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5243         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5244                        "unexpected use of readonly prefix");
5245     }
5246
5247     // Verify the tailcall
5248     if (tailCall)
5249     {
5250         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5251     }
5252 }
5253
5254 /*****************************************************************************
5255  *  Checks that a delegate creation is done using the following pattern:
5256  *     dup
5257  *     ldvirtftn targetMemberRef
5258  *  OR
5259  *     ldftn targetMemberRef
5260  *
5261  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5262  *  not in this basic block)
5263  *
5264  *  targetMemberRef is read from the code sequence.
5265  *  targetMemberRef is validated iff verificationNeeded.
5266  */
5267
5268 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5269                                         const BYTE*  codeAddr,
5270                                         mdMemberRef& targetMemberRef)
5271 {
5272     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5273     {
5274         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5275         return TRUE;
5276     }
5277     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5278     {
5279         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5280         return TRUE;
5281     }
5282
5283     return FALSE;
5284 }
5285
5286 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5287 {
5288     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5289     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5290     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5291     if (!tiCompatibleWith(value, normPtrVal, true))
5292     {
5293         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5294         compUnsafeCastUsed = true;
5295     }
5296     return ptrVal;
5297 }
5298
5299 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5300 {
5301     assert(!instrType.IsStruct());
5302
5303     typeInfo ptrVal;
5304     if (ptr.IsByRef())
5305     {
5306         ptrVal = DereferenceByRef(ptr);
5307         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5308         {
5309             Verify(false, "bad pointer");
5310             compUnsafeCastUsed = true;
5311         }
5312         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5313         {
5314             Verify(false, "pointer not consistent with instr");
5315             compUnsafeCastUsed = true;
5316         }
5317     }
5318     else
5319     {
5320         Verify(false, "pointer not byref");
5321         compUnsafeCastUsed = true;
5322     }
5323
5324     return ptrVal;
5325 }
5326
5327 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5328 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5329 // ld*flda or a st*fld.
5330 // 'enclosingClass' is given if we are accessing a field in some specific type.
5331
5332 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5333                               const CORINFO_FIELD_INFO& fieldInfo,
5334                               const typeInfo*           tiThis,
5335                               BOOL                      mutator,
5336                               BOOL                      allowPlainStructAsThis)
5337 {
5338     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5339     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5340     CORINFO_CLASS_HANDLE instanceClass =
5341         info.compClassHnd; // for statics, we imagine the instance is the current class.
5342
5343     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5344     if (mutator)
5345     {
5346         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5347         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5348         {
5349             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5350                        info.compIsStatic == isStaticField,
5351                    "bad use of initonly field (set or address taken)");
5352         }
5353     }
5354
5355     if (tiThis == nullptr)
5356     {
5357         Verify(isStaticField, "used static opcode with non-static field");
5358     }
5359     else
5360     {
5361         typeInfo tThis = *tiThis;
5362
5363         if (allowPlainStructAsThis && tThis.IsValueClass())
5364         {
5365             tThis.MakeByRef();
5366         }
5367
5368         // If it is null, we assume we can access it (since it will AV shortly)
5369         // If it is anything but a refernce class, there is no hierarchy, so
5370         // again, we don't need the precise instance class to compute 'protected' access
5371         if (tiThis->IsType(TI_REF))
5372         {
5373             instanceClass = tiThis->GetClassHandleForObjRef();
5374         }
5375
5376         // Note that even if the field is static, we require that the this pointer
5377         // satisfy the same constraints as a non-static field  This happens to
5378         // be simpler and seems reasonable
5379         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5380         if (tiDeclaredThis.IsValueClass())
5381         {
5382             tiDeclaredThis.MakeByRef();
5383
5384             // we allow read-only tThis, on any field access (even stores!), because if the
5385             // class implementor wants to prohibit stores he should make the field private.
5386             // we do this by setting the read-only bit on the type we compare tThis to.
5387             tiDeclaredThis.SetIsReadonlyByRef();
5388         }
5389         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5390         {
5391             // Any field access is legal on "uninitialized" this pointers.
5392             // The easiest way to implement this is to simply set the
5393             // initialized bit for the duration of the type check on the
5394             // field access only.  It does not change the state of the "this"
5395             // for the function as a whole. Note that the "tThis" is a copy
5396             // of the original "this" type (*tiThis) passed in.
5397             tThis.SetInitialisedObjRef();
5398         }
5399
5400         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5401     }
5402
5403     // Presently the JIT does not check that we don't store or take the address of init-only fields
5404     // since we cannot guarantee their immutability and it is not a security issue.
5405
5406     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5407     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5408                    "field has unsatisfied class constraints");
5409     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5410     {
5411         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5412                "Accessing protected method through wrong type.");
5413     }
5414 }
5415
5416 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5417 {
5418     if (tiOp1.IsNumberType())
5419     {
5420 #ifdef _TARGET_64BIT_
5421         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5422 #else  // _TARGET_64BIT
5423         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5424         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5425         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5426         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5427 #endif // !_TARGET_64BIT_
5428     }
5429     else if (tiOp1.IsObjRef())
5430     {
5431         switch (opcode)
5432         {
5433             case CEE_BEQ_S:
5434             case CEE_BEQ:
5435             case CEE_BNE_UN_S:
5436             case CEE_BNE_UN:
5437             case CEE_CEQ:
5438             case CEE_CGT_UN:
5439                 break;
5440             default:
5441                 Verify(FALSE, "Cond not allowed on object types");
5442         }
5443         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5444     }
5445     else if (tiOp1.IsByRef())
5446     {
5447         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5448     }
5449     else
5450     {
5451         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5452     }
5453 }
5454
5455 void Compiler::verVerifyThisPtrInitialised()
5456 {
5457     if (verTrackObjCtorInitState)
5458     {
5459         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5460     }
5461 }
5462
5463 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5464 {
5465     // Either target == context, in this case calling an alternate .ctor
5466     // Or target is the immediate parent of context
5467
5468     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5469 }
5470
5471 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5472                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5473                                       CORINFO_CALL_INFO*      pCallInfo)
5474 {
5475     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5476     {
5477         NO_WAY("Virtual call to a function added via EnC is not supported");
5478     }
5479
5480     // CoreRT generic virtual method
5481     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5482     {
5483         GenTree* runtimeMethodHandle = nullptr;
5484         if (pCallInfo->exactContextNeedsRuntimeLookup)
5485         {
5486             runtimeMethodHandle =
5487                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5488         }
5489         else
5490         {
5491             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5492         }
5493         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5494                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5495     }
5496
5497 #ifdef FEATURE_READYTORUN_COMPILER
5498     if (opts.IsReadyToRun())
5499     {
5500         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5501         {
5502             GenTreeCall* call =
5503                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5504
5505             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5506
5507             return call;
5508         }
5509
5510         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5511         if (IsTargetAbi(CORINFO_CORERT_ABI))
5512         {
5513             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5514
5515             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5516                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5517         }
5518     }
5519 #endif
5520
5521     // Get the exact descriptor for the static callsite
5522     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5523     if (exactTypeDesc == nullptr)
5524     { // compDonotInline()
5525         return nullptr;
5526     }
5527
5528     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5529     if (exactMethodDesc == nullptr)
5530     { // compDonotInline()
5531         return nullptr;
5532     }
5533
5534     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5535
5536     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5537
5538     helpArgs = gtNewListNode(thisPtr, helpArgs);
5539
5540     // Call helper function.  This gets the target address of the final destination callsite.
5541
5542     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5543 }
5544
5545 //------------------------------------------------------------------------
5546 // impImportAndPushBox: build and import a value-type box
5547 //
5548 // Arguments:
5549 //   pResolvedToken - resolved token from the box operation
5550 //
5551 // Return Value:
5552 //   None.
5553 //
5554 // Side Effects:
5555 //   The value to be boxed is popped from the stack, and a tree for
5556 //   the boxed value is pushed. This method may create upstream
5557 //   statements, spill side effecting trees, and create new temps.
5558 //
5559 //   If importing an inlinee, we may also discover the inline must
5560 //   fail. If so there is no new value pushed on the stack. Callers
5561 //   should use CompDoNotInline after calling this method to see if
5562 //   ongoing importation should be aborted.
5563 //
5564 // Notes:
5565 //   Boxing of ref classes results in the same value as the value on
5566 //   the top of the stack, so is handled inline in impImportBlockCode
5567 //   for the CEE_BOX case. Only value or primitive type boxes make it
5568 //   here.
5569 //
5570 //   Boxing for nullable types is done via a helper call; boxing
5571 //   of other value types is expanded inline or handled via helper
5572 //   call, depending on the jit's codegen mode.
5573 //
5574 //   When the jit is operating in size and time constrained modes,
5575 //   using a helper call here can save jit time and code size. But it
5576 //   also may inhibit cleanup optimizations that could have also had a
5577 //   even greater benefit effect on code size and jit time. An optimal
5578 //   strategy may need to peek ahead and see if it is easy to tell how
5579 //   the box is being used. For now, we defer.
5580
5581 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5582 {
5583     // Spill any special side effects
5584     impSpillSpecialSideEff();
5585
5586     // Get get the expression to box from the stack.
5587     GenTree*             op1       = nullptr;
5588     GenTree*             op2       = nullptr;
5589     StackEntry           se        = impPopStack();
5590     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5591     GenTree*             exprToBox = se.val;
5592
5593     // Look at what helper we should use.
5594     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5595
5596     // Determine what expansion to prefer.
5597     //
5598     // In size/time/debuggable constrained modes, the helper call
5599     // expansion for box is generally smaller and is preferred, unless
5600     // the value to box is a struct that comes from a call. In that
5601     // case the call can construct its return value directly into the
5602     // box payload, saving possibly some up-front zeroing.
5603     //
5604     // Currently primitive type boxes always get inline expanded. We may
5605     // want to do the same for small structs if they don't come from
5606     // calls and don't have GC pointers, since explicitly copying such
5607     // structs is cheap.
5608     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5609     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5610     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5611     bool expandInline    = canExpandInline && !optForSize;
5612
5613     if (expandInline)
5614     {
5615         JITDUMP(" inline allocate/copy sequence\n");
5616
5617         // we are doing 'normal' boxing.  This means that we can inline the box operation
5618         // Box(expr) gets morphed into
5619         // temp = new(clsHnd)
5620         // cpobj(temp+4, expr, clsHnd)
5621         // push temp
5622         // The code paths differ slightly below for structs and primitives because
5623         // "cpobj" differs in these cases.  In one case you get
5624         //    impAssignStructPtr(temp+4, expr, clsHnd)
5625         // and the other you get
5626         //    *(temp+4) = expr
5627
5628         if (opts.MinOpts() || opts.compDbgCode)
5629         {
5630             // For minopts/debug code, try and minimize the total number
5631             // of box temps by reusing an existing temp when possible.
5632             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5633             {
5634                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5635             }
5636         }
5637         else
5638         {
5639             // When optimizing, use a new temp for each box operation
5640             // since we then know the exact class of the box temp.
5641             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5642             lvaTable[impBoxTemp].lvType = TYP_REF;
5643             const bool isExact          = true;
5644             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5645         }
5646
5647         // needs to stay in use until this box expression is appended
5648         // some other node.  We approximate this by keeping it alive until
5649         // the opcode stack becomes empty
5650         impBoxTempInUse = true;
5651
5652 #ifdef FEATURE_READYTORUN_COMPILER
5653         bool usingReadyToRunHelper = false;
5654
5655         if (opts.IsReadyToRun())
5656         {
5657             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5658             usingReadyToRunHelper = (op1 != nullptr);
5659         }
5660
5661         if (!usingReadyToRunHelper)
5662 #endif
5663         {
5664             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5665             // and the newfast call with a single call to a dynamic R2R cell that will:
5666             //      1) Load the context
5667             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5668             //      3) Allocate and return the new object for boxing
5669             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5670
5671             // Ensure that the value class is restored
5672             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5673             if (op2 == nullptr)
5674             {
5675                 // We must be backing out of an inline.
5676                 assert(compDonotInline());
5677                 return;
5678             }
5679
5680             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5681                                     pResolvedToken->hClass, TYP_REF, op2);
5682         }
5683
5684         /* Remember that this basic block contains 'new' of an object, and so does this method */
5685         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5686         optMethodFlags |= OMF_HAS_NEWOBJ;
5687
5688         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5689
5690         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5691
5692         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5693         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5694         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5695
5696         if (varTypeIsStruct(exprToBox))
5697         {
5698             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5699             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5700         }
5701         else
5702         {
5703             var_types lclTyp = exprToBox->TypeGet();
5704             if (lclTyp == TYP_BYREF)
5705             {
5706                 lclTyp = TYP_I_IMPL;
5707             }
5708             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5709             if (impIsPrimitive(jitType))
5710             {
5711                 lclTyp = JITtype2varType(jitType);
5712             }
5713             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5714                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5715             var_types srcTyp = exprToBox->TypeGet();
5716             var_types dstTyp = lclTyp;
5717
5718             if (srcTyp != dstTyp)
5719             {
5720                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5721                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5722                 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
5723             }
5724             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5725         }
5726
5727         // Spill eval stack to flush out any pending side effects.
5728         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5729
5730         // Set up this copy as a second assignment.
5731         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5732
5733         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5734
5735         // Record that this is a "box" node and keep track of the matching parts.
5736         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5737
5738         // If it is a value class, mark the "box" node.  We can use this information
5739         // to optimise several cases:
5740         //    "box(x) == null" --> false
5741         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5742         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5743
5744         op1->gtFlags |= GTF_BOX_VALUE;
5745         assert(op1->IsBoxedValue());
5746         assert(asg->gtOper == GT_ASG);
5747     }
5748     else
5749     {
5750         // Don't optimize, just call the helper and be done with it.
5751         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5752         assert(operCls != nullptr);
5753
5754         // Ensure that the value class is restored
5755         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5756         if (op2 == nullptr)
5757         {
5758             // We must be backing out of an inline.
5759             assert(compDonotInline());
5760             return;
5761         }
5762
5763         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5764         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5765     }
5766
5767     /* Push the result back on the stack, */
5768     /* even if clsHnd is a value class we want the TI_REF */
5769     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5770     impPushOnStack(op1, tiRetVal);
5771 }
5772
5773 //------------------------------------------------------------------------
5774 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5775 //
5776 // Arguments:
5777 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5778 //                     by a call to CEEInfo::resolveToken().
5779 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5780 //                by a call to CEEInfo::getCallInfo().
5781 //
5782 // Assumptions:
5783 //    The multi-dimensional array constructor arguments (array dimensions) are
5784 //    pushed on the IL stack on entry to this method.
5785 //
5786 // Notes:
5787 //    Multi-dimensional array constructors are imported as calls to a JIT
5788 //    helper, not as regular calls.
5789
5790 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5791 {
5792     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5793     if (classHandle == nullptr)
5794     { // compDonotInline()
5795         return;
5796     }
5797
5798     assert(pCallInfo->sig.numArgs);
5799
5800     GenTree*        node;
5801     GenTreeArgList* args;
5802
5803     //
5804     // There are two different JIT helpers that can be used to allocate
5805     // multi-dimensional arrays:
5806     //
5807     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5808     //      This variant is deprecated. It should be eventually removed.
5809     //
5810     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5811     //      pointer to block of int32s. This variant is more portable.
5812     //
5813     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5814     // unconditionally would require ReadyToRun version bump.
5815     //
5816     CLANG_FORMAT_COMMENT_ANCHOR;
5817
5818     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5819     {
5820
5821         // Reuse the temp used to pass the array dimensions to avoid bloating
5822         // the stack frame in case there are multiple calls to multi-dim array
5823         // constructors within a single method.
5824         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5825         {
5826             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5827             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5828             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5829         }
5830
5831         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5832         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5833         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5834             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5835
5836         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5837         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5838         // to one allocation at a time.
5839         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5840
5841         //
5842         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5843         //  - Array class handle
5844         //  - Number of dimension arguments
5845         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5846         //
5847
5848         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5849         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5850
5851         // Pop dimension arguments from the stack one at a time and store it
5852         // into lvaNewObjArrayArgs temp.
5853         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5854         {
5855             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5856
5857             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5858             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5859             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5860                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5861             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5862
5863             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5864         }
5865
5866         args = gtNewArgList(node);
5867
5868         // pass number of arguments to the helper
5869         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5870
5871         args = gtNewListNode(classHandle, args);
5872
5873         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5874     }
5875     else
5876     {
5877         //
5878         // The varargs helper needs the type and method handles as last
5879         // and  last-1 param (this is a cdecl call, so args will be
5880         // pushed in reverse order on the CPU stack)
5881         //
5882
5883         args = gtNewArgList(classHandle);
5884
5885         // pass number of arguments to the helper
5886         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5887
5888         unsigned argFlags = 0;
5889         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5890
5891         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5892
5893         // varargs, so we pop the arguments
5894         node->gtFlags |= GTF_CALL_POP_ARGS;
5895
5896 #ifdef DEBUG
5897         // At the present time we don't track Caller pop arguments
5898         // that have GC references in them
5899         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5900         {
5901             assert(temp->Current()->gtType != TYP_REF);
5902         }
5903 #endif
5904     }
5905
5906     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5907     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5908
5909     // Remember that this basic block contains 'new' of a md array
5910     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5911
5912     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5913 }
5914
5915 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
5916                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5917                                     CORINFO_THIS_TRANSFORM  transform)
5918 {
5919     switch (transform)
5920     {
5921         case CORINFO_DEREF_THIS:
5922         {
5923             GenTree* obj = thisPtr;
5924
5925             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5926             impBashVarAddrsToI(obj);
5927             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5928             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5929
5930             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5931             // ldind could point anywhere, example a boxed class static int
5932             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5933
5934             return obj;
5935         }
5936
5937         case CORINFO_BOX_THIS:
5938         {
5939             // Constraint calls where there might be no
5940             // unboxed entry point require us to implement the call via helper.
5941             // These only occur when a possible target of the call
5942             // may have inherited an implementation of an interface
5943             // method from System.Object or System.ValueType.  The EE does not provide us with
5944             // "unboxed" versions of these methods.
5945
5946             GenTree* obj = thisPtr;
5947
5948             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5949             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5950             obj->gtFlags |= GTF_EXCEPT;
5951
5952             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5953             var_types   objType = JITtype2varType(jitTyp);
5954             if (impIsPrimitive(jitTyp))
5955             {
5956                 if (obj->OperIsBlk())
5957                 {
5958                     obj->ChangeOperUnchecked(GT_IND);
5959
5960                     // Obj could point anywhere, example a boxed class static int
5961                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5962                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5963                 }
5964
5965                 obj->gtType = JITtype2varType(jitTyp);
5966                 assert(varTypeIsArithmetic(obj->gtType));
5967             }
5968
5969             // This pushes on the dereferenced byref
5970             // This is then used immediately to box.
5971             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5972
5973             // This pops off the byref-to-a-value-type remaining on the stack and
5974             // replaces it with a boxed object.
5975             // This is then used as the object to the virtual call immediately below.
5976             impImportAndPushBox(pConstrainedResolvedToken);
5977             if (compDonotInline())
5978             {
5979                 return nullptr;
5980             }
5981
5982             obj = impPopStack().val;
5983             return obj;
5984         }
5985         case CORINFO_NO_THIS_TRANSFORM:
5986         default:
5987             return thisPtr;
5988     }
5989 }
5990
5991 //------------------------------------------------------------------------
5992 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5993 //
5994 // Return Value:
5995 //    true if PInvoke inlining should be enabled in current method, false otherwise
5996 //
5997 // Notes:
5998 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5999
6000 bool Compiler::impCanPInvokeInline()
6001 {
6002     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6003            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6004         ;
6005 }
6006
6007 //------------------------------------------------------------------------
6008 // impCanPInvokeInlineCallSite: basic legality checks using information
6009 // from a call to see if the call qualifies as an inline pinvoke.
6010 //
6011 // Arguments:
6012 //    block      - block contaning the call, or for inlinees, block
6013 //                 containing the call being inlined
6014 //
6015 // Return Value:
6016 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6017 //
6018 // Notes:
6019 //    For runtimes that support exception handling interop there are
6020 //    restrictions on using inline pinvoke in handler regions.
6021 //
6022 //    * We have to disable pinvoke inlining inside of filters because
6023 //    in case the main execution (i.e. in the try block) is inside
6024 //    unmanaged code, we cannot reuse the inlined stub (we still need
6025 //    the original state until we are in the catch handler)
6026 //
6027 //    * We disable pinvoke inlining inside handlers since the GSCookie
6028 //    is in the inlined Frame (see
6029 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6030 //    this would not protect framelets/return-address of handlers.
6031 //
6032 //    These restrictions are currently also in place for CoreCLR but
6033 //    can be relaxed when coreclr/#8459 is addressed.
6034
6035 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6036 {
6037     if (block->hasHndIndex())
6038     {
6039         return false;
6040     }
6041
6042     // The remaining limitations do not apply to CoreRT
6043     if (IsTargetAbi(CORINFO_CORERT_ABI))
6044     {
6045         return true;
6046     }
6047
6048 #ifdef _TARGET_AMD64_
6049     // On x64, we disable pinvoke inlining inside of try regions.
6050     // Here is the comment from JIT64 explaining why:
6051     //
6052     //   [VSWhidbey: 611015] - because the jitted code links in the
6053     //   Frame (instead of the stub) we rely on the Frame not being
6054     //   'active' until inside the stub.  This normally happens by the
6055     //   stub setting the return address pointer in the Frame object
6056     //   inside the stub.  On a normal return, the return address
6057     //   pointer is zeroed out so the Frame can be safely re-used, but
6058     //   if an exception occurs, nobody zeros out the return address
6059     //   pointer.  Thus if we re-used the Frame object, it would go
6060     //   'active' as soon as we link it into the Frame chain.
6061     //
6062     //   Technically we only need to disable PInvoke inlining if we're
6063     //   in a handler or if we're in a try body with a catch or
6064     //   filter/except where other non-handler code in this method
6065     //   might run and try to re-use the dirty Frame object.
6066     //
6067     //   A desktop test case where this seems to matter is
6068     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6069     if (block->hasTryIndex())
6070     {
6071         return false;
6072     }
6073 #endif // _TARGET_AMD64_
6074
6075     return true;
6076 }
6077
6078 //------------------------------------------------------------------------
6079 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6080 // if it can be expressed as an inline pinvoke.
6081 //
6082 // Arguments:
6083 //    call       - tree for the call
6084 //    methHnd    - handle for the method being called (may be null)
6085 //    sig        - signature of the method being called
6086 //    mflags     - method flags for the method being called
6087 //    block      - block contaning the call, or for inlinees, block
6088 //                 containing the call being inlined
6089 //
6090 // Notes:
6091 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6092 //
6093 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6094 //   call passes a combination of legality and profitabilty checks.
6095 //
6096 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6097
6098 void Compiler::impCheckForPInvokeCall(
6099     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6100 {
6101     CorInfoUnmanagedCallConv unmanagedCallConv;
6102
6103     // If VM flagged it as Pinvoke, flag the call node accordingly
6104     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6105     {
6106         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6107     }
6108
6109     if (methHnd)
6110     {
6111         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6112         {
6113             return;
6114         }
6115
6116         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6117     }
6118     else
6119     {
6120         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6121         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6122         {
6123             // Used by the IL Stubs.
6124             callConv = CORINFO_CALLCONV_C;
6125         }
6126         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6127         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6128         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6129         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6130
6131         assert(!call->gtCallCookie);
6132     }
6133
6134     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6135         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6136     {
6137         return;
6138     }
6139     optNativeCallCount++;
6140
6141     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6142     {
6143         // PInvoke CALLI in IL stubs must be inlined
6144     }
6145     else
6146     {
6147         // Check legality
6148         if (!impCanPInvokeInlineCallSite(block))
6149         {
6150             return;
6151         }
6152
6153         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6154         // profitability checks
6155         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6156         {
6157             if (!impCanPInvokeInline())
6158             {
6159                 return;
6160             }
6161
6162             // Size-speed tradeoff: don't use inline pinvoke at rarely
6163             // executed call sites.  The non-inline version is more
6164             // compact.
6165             if (block->isRunRarely())
6166             {
6167                 return;
6168             }
6169         }
6170
6171         // The expensive check should be last
6172         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6173         {
6174             return;
6175         }
6176     }
6177
6178     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6179
6180     call->gtFlags |= GTF_CALL_UNMANAGED;
6181     info.compCallUnmanaged++;
6182
6183     // AMD64 convention is same for native and managed
6184     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6185     {
6186         call->gtFlags |= GTF_CALL_POP_ARGS;
6187     }
6188
6189     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6190     {
6191         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6192     }
6193 }
6194
6195 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6196 {
6197     var_types callRetTyp = JITtype2varType(sig->retType);
6198
6199     /* The function pointer is on top of the stack - It may be a
6200      * complex expression. As it is evaluated after the args,
6201      * it may cause registered args to be spilled. Simply spill it.
6202      */
6203
6204     // Ignore this trivial case.
6205     if (impStackTop().val->gtOper != GT_LCL_VAR)
6206     {
6207         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6208                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6209     }
6210
6211     /* Get the function pointer */
6212
6213     GenTree* fptr = impPopStack().val;
6214
6215     // The function pointer is typically a sized to match the target pointer size
6216     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6217     // See ILCodeStream::LowerOpcode
6218     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6219
6220 #ifdef DEBUG
6221     // This temporary must never be converted to a double in stress mode,
6222     // because that can introduce a call to the cast helper after the
6223     // arguments have already been evaluated.
6224
6225     if (fptr->OperGet() == GT_LCL_VAR)
6226     {
6227         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6228     }
6229 #endif
6230
6231     /* Create the call node */
6232
6233     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6234
6235     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6236
6237     return call;
6238 }
6239
6240 /*****************************************************************************/
6241
6242 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6243 {
6244     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6245
6246     /* Since we push the arguments in reverse order (i.e. right -> left)
6247      * spill any side effects from the stack
6248      *
6249      * OBS: If there is only one side effect we do not need to spill it
6250      *      thus we have to spill all side-effects except last one
6251      */
6252
6253     unsigned lastLevelWithSideEffects = UINT_MAX;
6254
6255     unsigned argsToReverse = sig->numArgs;
6256
6257     // For "thiscall", the first argument goes in a register. Since its
6258     // order does not need to be changed, we do not need to spill it
6259
6260     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6261     {
6262         assert(argsToReverse);
6263         argsToReverse--;
6264     }
6265
6266 #ifndef _TARGET_X86_
6267     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6268     argsToReverse = 0;
6269 #endif
6270
6271     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6272     {
6273         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6274         {
6275             assert(lastLevelWithSideEffects == UINT_MAX);
6276
6277             impSpillStackEntry(level,
6278                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6279         }
6280         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6281         {
6282             if (lastLevelWithSideEffects != UINT_MAX)
6283             {
6284                 /* We had a previous side effect - must spill it */
6285                 impSpillStackEntry(lastLevelWithSideEffects,
6286                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6287
6288                 /* Record the level for the current side effect in case we will spill it */
6289                 lastLevelWithSideEffects = level;
6290             }
6291             else
6292             {
6293                 /* This is the first side effect encountered - record its level */
6294
6295                 lastLevelWithSideEffects = level;
6296             }
6297         }
6298     }
6299
6300     /* The argument list is now "clean" - no out-of-order side effects
6301      * Pop the argument list in reverse order */
6302
6303     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6304
6305     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6306     {
6307         GenTree* thisPtr = args->Current();
6308         impBashVarAddrsToI(thisPtr);
6309         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6310     }
6311
6312     if (args)
6313     {
6314         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6315     }
6316 }
6317
6318 //------------------------------------------------------------------------
6319 // impInitClass: Build a node to initialize the class before accessing the
6320 //               field if necessary
6321 //
6322 // Arguments:
6323 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6324 //                     by a call to CEEInfo::resolveToken().
6325 //
6326 // Return Value: If needed, a pointer to the node that will perform the class
6327 //               initializtion.  Otherwise, nullptr.
6328 //
6329
6330 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6331 {
6332     CorInfoInitClassResult initClassResult =
6333         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6334
6335     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6336     {
6337         return nullptr;
6338     }
6339     BOOL runtimeLookup;
6340
6341     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6342
6343     if (node == nullptr)
6344     {
6345         assert(compDonotInline());
6346         return nullptr;
6347     }
6348
6349     if (runtimeLookup)
6350     {
6351         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6352     }
6353     else
6354     {
6355         // Call the shared non gc static helper, as its the fastest
6356         node = fgGetSharedCCtor(pResolvedToken->hClass);
6357     }
6358
6359     return node;
6360 }
6361
6362 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6363 {
6364     GenTree* op1 = nullptr;
6365
6366     switch (lclTyp)
6367     {
6368         int     ival;
6369         __int64 lval;
6370         double  dval;
6371
6372         case TYP_BOOL:
6373             ival = *((bool*)fldAddr);
6374             goto IVAL_COMMON;
6375
6376         case TYP_BYTE:
6377             ival = *((signed char*)fldAddr);
6378             goto IVAL_COMMON;
6379
6380         case TYP_UBYTE:
6381             ival = *((unsigned char*)fldAddr);
6382             goto IVAL_COMMON;
6383
6384         case TYP_SHORT:
6385             ival = *((short*)fldAddr);
6386             goto IVAL_COMMON;
6387
6388         case TYP_USHORT:
6389             ival = *((unsigned short*)fldAddr);
6390             goto IVAL_COMMON;
6391
6392         case TYP_UINT:
6393         case TYP_INT:
6394             ival = *((int*)fldAddr);
6395         IVAL_COMMON:
6396             op1 = gtNewIconNode(ival);
6397             break;
6398
6399         case TYP_LONG:
6400         case TYP_ULONG:
6401             lval = *((__int64*)fldAddr);
6402             op1  = gtNewLconNode(lval);
6403             break;
6404
6405         case TYP_FLOAT:
6406             dval = *((float*)fldAddr);
6407             op1  = gtNewDconNode(dval);
6408 #if !FEATURE_X87_DOUBLES
6409             // X87 stack doesn't differentiate between float/double
6410             // so R4 is treated as R8, but everybody else does
6411             op1->gtType = TYP_FLOAT;
6412 #endif // FEATURE_X87_DOUBLES
6413             break;
6414
6415         case TYP_DOUBLE:
6416             dval = *((double*)fldAddr);
6417             op1  = gtNewDconNode(dval);
6418             break;
6419
6420         default:
6421             assert(!"Unexpected lclTyp");
6422             break;
6423     }
6424
6425     return op1;
6426 }
6427
6428 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6429                                               CORINFO_ACCESS_FLAGS    access,
6430                                               CORINFO_FIELD_INFO*     pFieldInfo,
6431                                               var_types               lclTyp)
6432 {
6433     GenTree* op1;
6434
6435     switch (pFieldInfo->fieldAccessor)
6436     {
6437         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6438         {
6439             assert(!compIsForInlining());
6440
6441             // We first call a special helper to get the statics base pointer
6442             op1 = impParentClassTokenToHandle(pResolvedToken);
6443
6444             // compIsForInlining() is false so we should not neve get NULL here
6445             assert(op1 != nullptr);
6446
6447             var_types type = TYP_BYREF;
6448
6449             switch (pFieldInfo->helper)
6450             {
6451                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6452                     type = TYP_I_IMPL;
6453                     break;
6454                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6455                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6456                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6457                     break;
6458                 default:
6459                     assert(!"unknown generic statics helper");
6460                     break;
6461             }
6462
6463             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6464
6465             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6466             op1              = gtNewOperNode(GT_ADD, type, op1,
6467                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6468         }
6469         break;
6470
6471         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6472         {
6473 #ifdef FEATURE_READYTORUN_COMPILER
6474             if (opts.IsReadyToRun())
6475             {
6476                 unsigned callFlags = 0;
6477
6478                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6479                 {
6480                     callFlags |= GTF_CALL_HOISTABLE;
6481                 }
6482
6483                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6484                 op1->gtFlags |= callFlags;
6485
6486                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6487             }
6488             else
6489 #endif
6490             {
6491                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6492             }
6493
6494             {
6495                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6496                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6497                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6498             }
6499             break;
6500         }
6501
6502         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6503         {
6504 #ifdef FEATURE_READYTORUN_COMPILER
6505             noway_assert(opts.IsReadyToRun());
6506             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6507             assert(kind.needsRuntimeLookup);
6508
6509             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6510             GenTreeArgList* args    = gtNewArgList(ctxTree);
6511
6512             unsigned callFlags = 0;
6513
6514             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6515             {
6516                 callFlags |= GTF_CALL_HOISTABLE;
6517             }
6518             var_types type = TYP_BYREF;
6519             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6520             op1->gtFlags |= callFlags;
6521
6522             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6523             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6524             op1              = gtNewOperNode(GT_ADD, type, op1,
6525                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6526 #else
6527             unreached();
6528 #endif // FEATURE_READYTORUN_COMPILER
6529         }
6530         break;
6531
6532         default:
6533         {
6534             if (!(access & CORINFO_ACCESS_ADDRESS))
6535             {
6536                 // In future, it may be better to just create the right tree here instead of folding it later.
6537                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6538
6539                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6540                 {
6541                     op1->gtFlags |= GTF_FLD_INITCLASS;
6542                 }
6543
6544                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6545                 {
6546                     op1->gtType = TYP_REF; // points at boxed object
6547                     FieldSeqNode* firstElemFldSeq =
6548                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6549                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6550                                         new (this, GT_CNS_INT)
6551                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6552
6553                     if (varTypeIsStruct(lclTyp))
6554                     {
6555                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6556                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6557                     }
6558                     else
6559                     {
6560                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6561                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6562                     }
6563                 }
6564
6565                 return op1;
6566             }
6567             else
6568             {
6569                 void** pFldAddr = nullptr;
6570                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6571
6572                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6573
6574                 /* Create the data member node */
6575                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6576                                           fldSeq);
6577
6578                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6579                 {
6580                     op1->gtFlags |= GTF_ICON_INITCLASS;
6581                 }
6582
6583                 if (pFldAddr != nullptr)
6584                 {
6585                     // There are two cases here, either the static is RVA based,
6586                     // in which case the type of the FIELD node is not a GC type
6587                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6588                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6589                     // because handles to statics now go into the large object heap
6590
6591                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6592                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6593                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6594                 }
6595             }
6596             break;
6597         }
6598     }
6599
6600     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6601     {
6602         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6603
6604         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6605
6606         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6607                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6608     }
6609
6610     if (!(access & CORINFO_ACCESS_ADDRESS))
6611     {
6612         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6613         op1->gtFlags |= GTF_GLOB_REF;
6614     }
6615
6616     return op1;
6617 }
6618
6619 // In general try to call this before most of the verification work.  Most people expect the access
6620 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6621 // out if you can't access something we also think that you're unverifiable for other reasons.
6622 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6623 {
6624     if (result != CORINFO_ACCESS_ALLOWED)
6625     {
6626         impHandleAccessAllowedInternal(result, helperCall);
6627     }
6628 }
6629
6630 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6631 {
6632     switch (result)
6633     {
6634         case CORINFO_ACCESS_ALLOWED:
6635             break;
6636         case CORINFO_ACCESS_ILLEGAL:
6637             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6638             // method is verifiable.  Otherwise, delay the exception to runtime.
6639             if (compIsForImportOnly())
6640             {
6641                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6642             }
6643             else
6644             {
6645                 impInsertHelperCall(helperCall);
6646             }
6647             break;
6648         case CORINFO_ACCESS_RUNTIME_CHECK:
6649             impInsertHelperCall(helperCall);
6650             break;
6651     }
6652 }
6653
6654 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6655 {
6656     // Construct the argument list
6657     GenTreeArgList* args = nullptr;
6658     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6659     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6660     {
6661         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6662         GenTree*                  currentArg = nullptr;
6663         switch (helperArg.argType)
6664         {
6665             case CORINFO_HELPER_ARG_TYPE_Field:
6666                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6667                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6668                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6669                 break;
6670             case CORINFO_HELPER_ARG_TYPE_Method:
6671                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6672                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6673                 break;
6674             case CORINFO_HELPER_ARG_TYPE_Class:
6675                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6676                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6677                 break;
6678             case CORINFO_HELPER_ARG_TYPE_Module:
6679                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6680                 break;
6681             case CORINFO_HELPER_ARG_TYPE_Const:
6682                 currentArg = gtNewIconNode(helperArg.constant);
6683                 break;
6684             default:
6685                 NO_WAY("Illegal helper arg type");
6686         }
6687         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6688     }
6689
6690     /* TODO-Review:
6691      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6692      * Also, consider sticking this in the first basic block.
6693      */
6694     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6695     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6696 }
6697
6698 // Checks whether the return types of caller and callee are compatible
6699 // so that callee can be tail called. Note that here we don't check
6700 // compatibility in IL Verifier sense, but on the lines of return type
6701 // sizes are equal and get returned in the same return register.
6702 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6703                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6704                                             var_types            calleeRetType,
6705                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6706 {
6707     // Note that we can not relax this condition with genActualType() as the
6708     // calling convention dictates that the caller of a function with a small
6709     // typed return value is responsible for normalizing the return val.
6710     if (callerRetType == calleeRetType)
6711     {
6712         return true;
6713     }
6714
6715     // If the class handles are the same and not null, the return types are compatible.
6716     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6717     {
6718         return true;
6719     }
6720
6721 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6722     // Jit64 compat:
6723     if (callerRetType == TYP_VOID)
6724     {
6725         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6726         //     tail.call
6727         //     pop
6728         //     ret
6729         //
6730         // Note that the above IL pattern is not valid as per IL verification rules.
6731         // Therefore, only full trust code can take advantage of this pattern.
6732         return true;
6733     }
6734
6735     // These checks return true if the return value type sizes are the same and
6736     // get returned in the same return register i.e. caller doesn't need to normalize
6737     // return value. Some of the tail calls permitted by below checks would have
6738     // been rejected by IL Verifier before we reached here.  Therefore, only full
6739     // trust code can make those tail calls.
6740     unsigned callerRetTypeSize = 0;
6741     unsigned calleeRetTypeSize = 0;
6742     bool     isCallerRetTypMBEnreg =
6743         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6744     bool isCalleeRetTypMBEnreg =
6745         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6746
6747     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6748     {
6749         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6750     }
6751 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6752
6753     return false;
6754 }
6755
6756 // For prefixFlags
6757 enum
6758 {
6759     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6760     PREFIX_TAILCALL_IMPLICIT =
6761         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6762     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6763     PREFIX_VOLATILE    = 0x00000100,
6764     PREFIX_UNALIGNED   = 0x00001000,
6765     PREFIX_CONSTRAINED = 0x00010000,
6766     PREFIX_READONLY    = 0x00100000
6767 };
6768
6769 /********************************************************************************
6770  *
6771  * Returns true if the current opcode and and the opcodes following it correspond
6772  * to a supported tail call IL pattern.
6773  *
6774  */
6775 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6776                                       OPCODE      curOpcode,
6777                                       const BYTE* codeAddrOfNextOpcode,
6778                                       const BYTE* codeEnd,
6779                                       bool        isRecursive,
6780                                       bool*       isCallPopAndRet /* = nullptr */)
6781 {
6782     // Bail out if the current opcode is not a call.
6783     if (!impOpcodeIsCallOpcode(curOpcode))
6784     {
6785         return false;
6786     }
6787
6788 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6789     // If shared ret tail opt is not enabled, we will enable
6790     // it for recursive methods.
6791     if (isRecursive)
6792 #endif
6793     {
6794         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6795         // sequence. Make sure we don't go past the end of the IL however.
6796         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6797     }
6798
6799     // Bail out if there is no next opcode after call
6800     if (codeAddrOfNextOpcode >= codeEnd)
6801     {
6802         return false;
6803     }
6804
6805     // Scan the opcodes to look for the following IL patterns if either
6806     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6807     //  ii) if tail prefixed, IL verification is not needed for the method.
6808     //
6809     // Only in the above two cases we can allow the below tail call patterns
6810     // violating ECMA spec.
6811     //
6812     // Pattern1:
6813     //       call
6814     //       nop*
6815     //       ret
6816     //
6817     // Pattern2:
6818     //       call
6819     //       nop*
6820     //       pop
6821     //       nop*
6822     //       ret
6823     int    cntPop = 0;
6824     OPCODE nextOpcode;
6825
6826 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6827     do
6828     {
6829         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6830         codeAddrOfNextOpcode += sizeof(__int8);
6831     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6832              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6833              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6834                                                                                          // one pop seen so far.
6835 #else
6836     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6837 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6838
6839     if (isCallPopAndRet)
6840     {
6841         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6842         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6843     }
6844
6845 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6846     // Jit64 Compat:
6847     // Tail call IL pattern could be either of the following
6848     // 1) call/callvirt/calli + ret
6849     // 2) call/callvirt/calli + pop + ret in a method returning void.
6850     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6851 #else
6852     return (nextOpcode == CEE_RET) && (cntPop == 0);
6853 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6854 }
6855
6856 /*****************************************************************************
6857  *
6858  * Determine whether the call could be converted to an implicit tail call
6859  *
6860  */
6861 bool Compiler::impIsImplicitTailCallCandidate(
6862     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6863 {
6864
6865 #if FEATURE_TAILCALL_OPT
6866     if (!opts.compTailCallOpt)
6867     {
6868         return false;
6869     }
6870
6871     if (opts.compDbgCode || opts.MinOpts())
6872     {
6873         return false;
6874     }
6875
6876     // must not be tail prefixed
6877     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6878     {
6879         return false;
6880     }
6881
6882 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6883     // the block containing call is marked as BBJ_RETURN
6884     // We allow shared ret tail call optimization on recursive calls even under
6885     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6886     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6887         return false;
6888 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6889
6890     // must be call+ret or call+pop+ret
6891     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6892     {
6893         return false;
6894     }
6895
6896     return true;
6897 #else
6898     return false;
6899 #endif // FEATURE_TAILCALL_OPT
6900 }
6901
6902 //------------------------------------------------------------------------
6903 // impImportCall: import a call-inspiring opcode
6904 //
6905 // Arguments:
6906 //    opcode                    - opcode that inspires the call
6907 //    pResolvedToken            - resolved token for the call target
6908 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6909 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6910 //    prefixFlags               - IL prefix flags for the call
6911 //    callInfo                  - EE supplied info for the call
6912 //    rawILOffset               - IL offset of the opcode
6913 //
6914 // Returns:
6915 //    Type of the call's return value.
6916 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6917 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6918 //
6919 //
6920 // Notes:
6921 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6922 //
6923 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6924 //    uninitalized object.
6925
6926 #ifdef _PREFAST_
6927 #pragma warning(push)
6928 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6929 #endif
6930
6931 var_types Compiler::impImportCall(OPCODE                  opcode,
6932                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6933                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6934                                   GenTree*                newobjThis,
6935                                   int                     prefixFlags,
6936                                   CORINFO_CALL_INFO*      callInfo,
6937                                   IL_OFFSET               rawILOffset)
6938 {
6939     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6940
6941     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6942     var_types              callRetTyp                     = TYP_COUNT;
6943     CORINFO_SIG_INFO*      sig                            = nullptr;
6944     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6945     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6946     unsigned               clsFlags                       = 0;
6947     unsigned               mflags                         = 0;
6948     unsigned               argFlags                       = 0;
6949     GenTree*               call                           = nullptr;
6950     GenTreeArgList*        args                           = nullptr;
6951     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6952     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6953     bool                   exactContextNeedsRuntimeLookup = false;
6954     bool                   canTailCall                    = true;
6955     const char*            szCanTailCallFailReason        = nullptr;
6956     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6957     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6958
6959     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6960
6961     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6962     // do that before tailcalls, but that is probably not the intended
6963     // semantic. So just disallow tailcalls from synchronized methods.
6964     // Also, popping arguments in a varargs function is more work and NYI
6965     // If we have a security object, we have to keep our frame around for callers
6966     // to see any imperative security.
6967     if (info.compFlags & CORINFO_FLG_SYNCH)
6968     {
6969         canTailCall             = false;
6970         szCanTailCallFailReason = "Caller is synchronized";
6971     }
6972 #if !FEATURE_FIXED_OUT_ARGS
6973     else if (info.compIsVarArgs)
6974     {
6975         canTailCall             = false;
6976         szCanTailCallFailReason = "Caller is varargs";
6977     }
6978 #endif // FEATURE_FIXED_OUT_ARGS
6979     else if (opts.compNeedSecurityCheck)
6980     {
6981         canTailCall             = false;
6982         szCanTailCallFailReason = "Caller requires a security check.";
6983     }
6984
6985     // We only need to cast the return value of pinvoke inlined calls that return small types
6986
6987     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6988     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6989     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6990     // the time being that the callee might be compiled by the other JIT and thus the return
6991     // value will need to be widened by us (or not widened at all...)
6992
6993     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6994
6995     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6996     bool bIntrinsicImported = false;
6997
6998     CORINFO_SIG_INFO calliSig;
6999     GenTreeArgList*  extraArg = nullptr;
7000
7001     /*-------------------------------------------------------------------------
7002      * First create the call node
7003      */
7004
7005     if (opcode == CEE_CALLI)
7006     {
7007         /* Get the call site sig */
7008         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7009
7010         callRetTyp = JITtype2varType(calliSig.retType);
7011
7012         call = impImportIndirectCall(&calliSig, ilOffset);
7013
7014         // We don't know the target method, so we have to infer the flags, or
7015         // assume the worst-case.
7016         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7017
7018 #ifdef DEBUG
7019         if (verbose)
7020         {
7021             unsigned structSize =
7022                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7023             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7024                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7025         }
7026 #endif
7027         // This should be checked in impImportBlockCode.
7028         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7029
7030         sig = &calliSig;
7031
7032 #ifdef DEBUG
7033         // We cannot lazily obtain the signature of a CALLI call because it has no method
7034         // handle that we can use, so we need to save its full call signature here.
7035         assert(call->gtCall.callSig == nullptr);
7036         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7037         *call->gtCall.callSig = calliSig;
7038 #endif // DEBUG
7039
7040         if (IsTargetAbi(CORINFO_CORERT_ABI))
7041         {
7042             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7043                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7044                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7045                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7046             if (managedCall)
7047             {
7048                 addFatPointerCandidate(call->AsCall());
7049             }
7050         }
7051     }
7052     else // (opcode != CEE_CALLI)
7053     {
7054         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7055
7056         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7057         // supply the instantiation parameters necessary to make direct calls to underlying
7058         // shared generic code, rather than calling through instantiating stubs.  If the
7059         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7060         // must indeed pass an instantiation parameter.
7061
7062         methHnd = callInfo->hMethod;
7063
7064         sig        = &(callInfo->sig);
7065         callRetTyp = JITtype2varType(sig->retType);
7066
7067         mflags = callInfo->methodFlags;
7068
7069 #ifdef DEBUG
7070         if (verbose)
7071         {
7072             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7073             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7074                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7075         }
7076 #endif
7077         if (compIsForInlining())
7078         {
7079             /* Does this call site have security boundary restrictions? */
7080
7081             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7082             {
7083                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7084                 return TYP_UNDEF;
7085             }
7086
7087             /* Does the inlinee need a security check token on the frame */
7088
7089             if (mflags & CORINFO_FLG_SECURITYCHECK)
7090             {
7091                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7092                 return TYP_UNDEF;
7093             }
7094
7095             /* Does the inlinee use StackCrawlMark */
7096
7097             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7098             {
7099                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7100                 return TYP_UNDEF;
7101             }
7102
7103             /* For now ignore delegate invoke */
7104
7105             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7106             {
7107                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7108                 return TYP_UNDEF;
7109             }
7110
7111             /* For now ignore varargs */
7112             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7113             {
7114                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7115                 return TYP_UNDEF;
7116             }
7117
7118             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7119             {
7120                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7121                 return TYP_UNDEF;
7122             }
7123
7124             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7125             {
7126                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7127                 return TYP_UNDEF;
7128             }
7129         }
7130
7131         clsHnd = pResolvedToken->hClass;
7132
7133         clsFlags = callInfo->classFlags;
7134
7135 #ifdef DEBUG
7136         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7137
7138         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7139         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7140         const char* modName;
7141         const char* className;
7142         const char* methodName;
7143         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7144             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7145             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7146         {
7147             return impImportJitTestLabelMark(sig->numArgs);
7148         }
7149 #endif // DEBUG
7150
7151         // <NICE> Factor this into getCallInfo </NICE>
7152         bool isSpecialIntrinsic = false;
7153         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7154         {
7155             const bool isTail = canTailCall && (tailCall != 0);
7156
7157             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7158                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7159
7160             if (compDonotInline())
7161             {
7162                 return TYP_UNDEF;
7163             }
7164
7165             if (call != nullptr)
7166             {
7167                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7168                        (clsFlags & CORINFO_FLG_FINAL));
7169
7170 #ifdef FEATURE_READYTORUN_COMPILER
7171                 if (call->OperGet() == GT_INTRINSIC)
7172                 {
7173                     if (opts.IsReadyToRun())
7174                     {
7175                         noway_assert(callInfo->kind == CORINFO_CALL);
7176                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7177                     }
7178                     else
7179                     {
7180                         call->gtIntrinsic.gtEntryPoint.addr       = nullptr;
7181                         call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7182                     }
7183                 }
7184 #endif
7185
7186                 bIntrinsicImported = true;
7187                 goto DONE_CALL;
7188             }
7189         }
7190
7191 #ifdef FEATURE_SIMD
7192         if (featureSIMD)
7193         {
7194             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7195             if (call != nullptr)
7196             {
7197                 bIntrinsicImported = true;
7198                 goto DONE_CALL;
7199             }
7200         }
7201 #endif // FEATURE_SIMD
7202
7203         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7204         {
7205             NO_WAY("Virtual call to a function added via EnC is not supported");
7206         }
7207
7208         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7209             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7210             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7211         {
7212             BADCODE("Bad calling convention");
7213         }
7214
7215         //-------------------------------------------------------------------------
7216         //  Construct the call node
7217         //
7218         // Work out what sort of call we're making.
7219         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7220
7221         constraintCallThisTransform    = callInfo->thisTransform;
7222         exactContextHnd                = callInfo->contextHandle;
7223         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7224
7225         // Recursive call is treated as a loop to the begining of the method.
7226         if (gtIsRecursiveCall(methHnd))
7227         {
7228 #ifdef DEBUG
7229             if (verbose)
7230             {
7231                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7232                         fgFirstBB->bbNum, compCurBB->bbNum);
7233             }
7234 #endif
7235             fgMarkBackwardJump(fgFirstBB, compCurBB);
7236         }
7237
7238         switch (callInfo->kind)
7239         {
7240
7241             case CORINFO_VIRTUALCALL_STUB:
7242             {
7243                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7244                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7245                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7246                 {
7247
7248                     if (compIsForInlining())
7249                     {
7250                         // Don't import runtime lookups when inlining
7251                         // Inlining has to be aborted in such a case
7252                         /* XXX Fri 3/20/2009
7253                          * By the way, this would never succeed.  If the handle lookup is into the generic
7254                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7255                          * inlined code will crash.
7256                          *
7257                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7258                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7259                          * failing here.
7260                          */
7261                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7262                         return TYP_UNDEF;
7263                     }
7264
7265                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7266                     assert(!compDonotInline());
7267
7268                     // This is the rough code to set up an indirect stub call
7269                     assert(stubAddr != nullptr);
7270
7271                     // The stubAddr may be a
7272                     // complex expression. As it is evaluated after the args,
7273                     // it may cause registered args to be spilled. Simply spill it.
7274
7275                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7276                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7277                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7278
7279                     // Create the actual call node
7280
7281                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7282                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7283
7284                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7285
7286                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7287                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7288
7289 #ifdef _TARGET_X86_
7290                     // No tailcalls allowed for these yet...
7291                     canTailCall             = false;
7292                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7293 #endif
7294                 }
7295                 else
7296                 {
7297                     // ok, the stub is available at compile type.
7298
7299                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7300                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7301                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7302                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
7303                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7304                     {
7305                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7306                     }
7307                 }
7308
7309 #ifdef FEATURE_READYTORUN_COMPILER
7310                 if (opts.IsReadyToRun())
7311                 {
7312                     // Null check is sometimes needed for ready to run to handle
7313                     // non-virtual <-> virtual changes between versions
7314                     if (callInfo->nullInstanceCheck)
7315                     {
7316                         call->gtFlags |= GTF_CALL_NULLCHECK;
7317                     }
7318                 }
7319 #endif
7320
7321                 break;
7322             }
7323
7324             case CORINFO_VIRTUALCALL_VTABLE:
7325             {
7326                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7327                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7328                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7329                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7330                 break;
7331             }
7332
7333             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7334             {
7335                 if (compIsForInlining())
7336                 {
7337                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7338                     return TYP_UNDEF;
7339                 }
7340
7341                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7342                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7343                 // OK, We've been told to call via LDVIRTFTN, so just
7344                 // take the call now....
7345
7346                 args = impPopList(sig->numArgs, sig);
7347
7348                 GenTree* thisPtr = impPopStack().val;
7349                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7350                 assert(thisPtr != nullptr);
7351
7352                 // Clone the (possibly transformed) "this" pointer
7353                 GenTree* thisPtrCopy;
7354                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7355                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7356
7357                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7358                 assert(fptr != nullptr);
7359
7360                 thisPtr = nullptr; // can't reuse it
7361
7362                 // Now make an indirect call through the function pointer
7363
7364                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7365                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7366                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7367
7368                 // Create the actual call node
7369
7370                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7371                 call->gtCall.gtCallObjp = thisPtrCopy;
7372                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7373
7374                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7375                 {
7376                     // CoreRT generic virtual method: need to handle potential fat function pointers
7377                     addFatPointerCandidate(call->AsCall());
7378                 }
7379 #ifdef FEATURE_READYTORUN_COMPILER
7380                 if (opts.IsReadyToRun())
7381                 {
7382                     // Null check is needed for ready to run to handle
7383                     // non-virtual <-> virtual changes between versions
7384                     call->gtFlags |= GTF_CALL_NULLCHECK;
7385                 }
7386 #endif
7387
7388                 // Sine we are jumping over some code, check that its OK to skip that code
7389                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7390                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7391                 goto DONE;
7392             }
7393
7394             case CORINFO_CALL:
7395             {
7396                 // This is for a non-virtual, non-interface etc. call
7397                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7398
7399                 // We remove the nullcheck for the GetType call instrinsic.
7400                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7401                 // and instrinsics.
7402                 if (callInfo->nullInstanceCheck &&
7403                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7404                 {
7405                     call->gtFlags |= GTF_CALL_NULLCHECK;
7406                 }
7407
7408 #ifdef FEATURE_READYTORUN_COMPILER
7409                 if (opts.IsReadyToRun())
7410                 {
7411                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7412                 }
7413 #endif
7414                 break;
7415             }
7416
7417             case CORINFO_CALL_CODE_POINTER:
7418             {
7419                 // The EE has asked us to call by computing a code pointer and then doing an
7420                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7421
7422                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7423                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7424
7425                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7426                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7427
7428                 GenTree* fptr =
7429                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7430
7431                 if (compDonotInline())
7432                 {
7433                     return TYP_UNDEF;
7434                 }
7435
7436                 // Now make an indirect call through the function pointer
7437
7438                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7439                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7440                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7441
7442                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7443                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7444                 if (callInfo->nullInstanceCheck)
7445                 {
7446                     call->gtFlags |= GTF_CALL_NULLCHECK;
7447                 }
7448
7449                 break;
7450             }
7451
7452             default:
7453                 assert(!"unknown call kind");
7454                 break;
7455         }
7456
7457         //-------------------------------------------------------------------------
7458         // Set more flags
7459
7460         PREFIX_ASSUME(call != nullptr);
7461
7462         if (mflags & CORINFO_FLG_NOGCCHECK)
7463         {
7464             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7465         }
7466
7467         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7468         if (isSpecialIntrinsic)
7469         {
7470             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7471         }
7472     }
7473     assert(sig);
7474     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7475
7476     /* Some sanity checks */
7477
7478     // CALL_VIRT and NEWOBJ must have a THIS pointer
7479     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7480     // static bit and hasThis are negations of one another
7481     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7482     assert(call != nullptr);
7483
7484     /*-------------------------------------------------------------------------
7485      * Check special-cases etc
7486      */
7487
7488     /* Special case - Check if it is a call to Delegate.Invoke(). */
7489
7490     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7491     {
7492         assert(!compIsForInlining());
7493         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7494         assert(mflags & CORINFO_FLG_FINAL);
7495
7496         /* Set the delegate flag */
7497         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7498
7499         if (callInfo->secureDelegateInvoke)
7500         {
7501             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7502         }
7503
7504         if (opcode == CEE_CALLVIRT)
7505         {
7506             assert(mflags & CORINFO_FLG_FINAL);
7507
7508             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7509             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7510             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7511         }
7512     }
7513
7514     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7515     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7516     if (varTypeIsStruct(callRetTyp))
7517     {
7518         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7519         call->gtType = callRetTyp;
7520     }
7521
7522 #if !FEATURE_VARARG
7523     /* Check for varargs */
7524     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7525         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7526     {
7527         BADCODE("Varargs not supported.");
7528     }
7529 #endif // !FEATURE_VARARG
7530
7531 #ifdef UNIX_X86_ABI
7532     if (call->gtCall.callSig == nullptr)
7533     {
7534         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7535         *call->gtCall.callSig = *sig;
7536     }
7537 #endif // UNIX_X86_ABI
7538
7539     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7540         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7541     {
7542         assert(!compIsForInlining());
7543
7544         /* Set the right flags */
7545
7546         call->gtFlags |= GTF_CALL_POP_ARGS;
7547         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7548
7549         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7550            will be expecting to pop a certain number of arguments, but if we
7551            tailcall to a function with a different number of arguments, we
7552            are hosed. There are ways around this (caller remembers esp value,
7553            varargs is not caller-pop, etc), but not worth it. */
7554         CLANG_FORMAT_COMMENT_ANCHOR;
7555
7556 #ifdef _TARGET_X86_
7557         if (canTailCall)
7558         {
7559             canTailCall             = false;
7560             szCanTailCallFailReason = "Callee is varargs";
7561         }
7562 #endif
7563
7564         /* Get the total number of arguments - this is already correct
7565          * for CALLI - for methods we have to get it from the call site */
7566
7567         if (opcode != CEE_CALLI)
7568         {
7569 #ifdef DEBUG
7570             unsigned numArgsDef = sig->numArgs;
7571 #endif
7572             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7573
7574 #ifdef DEBUG
7575             // We cannot lazily obtain the signature of a vararg call because using its method
7576             // handle will give us only the declared argument list, not the full argument list.
7577             assert(call->gtCall.callSig == nullptr);
7578             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7579             *call->gtCall.callSig = *sig;
7580 #endif
7581
7582             // For vararg calls we must be sure to load the return type of the
7583             // method actually being called, as well as the return types of the
7584             // specified in the vararg signature. With type equivalency, these types
7585             // may not be the same.
7586             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7587             {
7588                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7589                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7590                     sig->retType != CORINFO_TYPE_VAR)
7591                 {
7592                     // Make sure that all valuetypes (including enums) that we push are loaded.
7593                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7594                     // all valuetypes in the method signature are already loaded.
7595                     // We need to be able to find the size of the valuetypes, but we cannot
7596                     // do a class-load from within GC.
7597                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7598                 }
7599             }
7600
7601             assert(numArgsDef <= sig->numArgs);
7602         }
7603
7604         /* We will have "cookie" as the last argument but we cannot push
7605          * it on the operand stack because we may overflow, so we append it
7606          * to the arg list next after we pop them */
7607     }
7608
7609     if (mflags & CORINFO_FLG_SECURITYCHECK)
7610     {
7611         assert(!compIsForInlining());
7612
7613         // Need security prolog/epilog callouts when there is
7614         // imperative security in the method. This is to give security a
7615         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7616
7617         if (compIsForInlining())
7618         {
7619             // Cannot handle this if the method being imported is an inlinee by itself.
7620             // Because inlinee method does not have its own frame.
7621
7622             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7623             return TYP_UNDEF;
7624         }
7625         else
7626         {
7627             tiSecurityCalloutNeeded = true;
7628
7629             // If the current method calls a method which needs a security check,
7630             // (i.e. the method being compiled has imperative security)
7631             // we need to reserve a slot for the security object in
7632             // the current method's stack frame
7633             opts.compNeedSecurityCheck = true;
7634         }
7635     }
7636
7637     //--------------------------- Inline NDirect ------------------------------
7638
7639     // For inline cases we technically should look at both the current
7640     // block and the call site block (or just the latter if we've
7641     // fused the EH trees). However the block-related checks pertain to
7642     // EH and we currently won't inline a method with EH. So for
7643     // inlinees, just checking the call site block is sufficient.
7644     {
7645         // New lexical block here to avoid compilation errors because of GOTOs.
7646         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7647         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7648     }
7649
7650     if (call->gtFlags & GTF_CALL_UNMANAGED)
7651     {
7652         // We set up the unmanaged call by linking the frame, disabling GC, etc
7653         // This needs to be cleaned up on return
7654         if (canTailCall)
7655         {
7656             canTailCall             = false;
7657             szCanTailCallFailReason = "Callee is native";
7658         }
7659
7660         checkForSmallType = true;
7661
7662         impPopArgsForUnmanagedCall(call, sig);
7663
7664         goto DONE;
7665     }
7666     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7667                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7668                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7669                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7670     {
7671         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7672         {
7673             // Normally this only happens with inlining.
7674             // However, a generic method (or type) being NGENd into another module
7675             // can run into this issue as well.  There's not an easy fall-back for NGEN
7676             // so instead we fallback to JIT.
7677             if (compIsForInlining())
7678             {
7679                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7680             }
7681             else
7682             {
7683                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7684             }
7685
7686             return TYP_UNDEF;
7687         }
7688
7689         GenTree* cookie = eeGetPInvokeCookie(sig);
7690
7691         // This cookie is required to be either a simple GT_CNS_INT or
7692         // an indirection of a GT_CNS_INT
7693         //
7694         GenTree* cookieConst = cookie;
7695         if (cookie->gtOper == GT_IND)
7696         {
7697             cookieConst = cookie->gtOp.gtOp1;
7698         }
7699         assert(cookieConst->gtOper == GT_CNS_INT);
7700
7701         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7702         // we won't allow this tree to participate in any CSE logic
7703         //
7704         cookie->gtFlags |= GTF_DONT_CSE;
7705         cookieConst->gtFlags |= GTF_DONT_CSE;
7706
7707         call->gtCall.gtCallCookie = cookie;
7708
7709         if (canTailCall)
7710         {
7711             canTailCall             = false;
7712             szCanTailCallFailReason = "PInvoke calli";
7713         }
7714     }
7715
7716     /*-------------------------------------------------------------------------
7717      * Create the argument list
7718      */
7719
7720     //-------------------------------------------------------------------------
7721     // Special case - for varargs we have an implicit last argument
7722
7723     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7724     {
7725         assert(!compIsForInlining());
7726
7727         void *varCookie, *pVarCookie;
7728         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7729         {
7730             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7731             return TYP_UNDEF;
7732         }
7733
7734         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7735         assert((!varCookie) != (!pVarCookie));
7736         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7737
7738         assert(extraArg == nullptr);
7739         extraArg = gtNewArgList(cookie);
7740     }
7741
7742     //-------------------------------------------------------------------------
7743     // Extra arg for shared generic code and array methods
7744     //
7745     // Extra argument containing instantiation information is passed in the
7746     // following circumstances:
7747     // (a) To the "Address" method on array classes; the extra parameter is
7748     //     the array's type handle (a TypeDesc)
7749     // (b) To shared-code instance methods in generic structs; the extra parameter
7750     //     is the struct's type handle (a vtable ptr)
7751     // (c) To shared-code per-instantiation non-generic static methods in generic
7752     //     classes and structs; the extra parameter is the type handle
7753     // (d) To shared-code generic methods; the extra parameter is an
7754     //     exact-instantiation MethodDesc
7755     //
7756     // We also set the exact type context associated with the call so we can
7757     // inline the call correctly later on.
7758
7759     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7760     {
7761         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7762         if (clsHnd == nullptr)
7763         {
7764             NO_WAY("CALLI on parameterized type");
7765         }
7766
7767         assert(opcode != CEE_CALLI);
7768
7769         GenTree* instParam;
7770         BOOL     runtimeLookup;
7771
7772         // Instantiated generic method
7773         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7774         {
7775             CORINFO_METHOD_HANDLE exactMethodHandle =
7776                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7777
7778             if (!exactContextNeedsRuntimeLookup)
7779             {
7780 #ifdef FEATURE_READYTORUN_COMPILER
7781                 if (opts.IsReadyToRun())
7782                 {
7783                     instParam =
7784                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7785                     if (instParam == nullptr)
7786                     {
7787                         assert(compDonotInline());
7788                         return TYP_UNDEF;
7789                     }
7790                 }
7791                 else
7792 #endif
7793                 {
7794                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7795                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7796                 }
7797             }
7798             else
7799             {
7800                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7801                 if (instParam == nullptr)
7802                 {
7803                     assert(compDonotInline());
7804                     return TYP_UNDEF;
7805                 }
7806             }
7807         }
7808
7809         // otherwise must be an instance method in a generic struct,
7810         // a static method in a generic type, or a runtime-generated array method
7811         else
7812         {
7813             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7814             CORINFO_CLASS_HANDLE exactClassHandle =
7815                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7816
7817             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7818             {
7819                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7820                 return TYP_UNDEF;
7821             }
7822
7823             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7824             {
7825                 // We indicate "readonly" to the Address operation by using a null
7826                 // instParam.
7827                 instParam = gtNewIconNode(0, TYP_REF);
7828             }
7829             else if (!exactContextNeedsRuntimeLookup)
7830             {
7831 #ifdef FEATURE_READYTORUN_COMPILER
7832                 if (opts.IsReadyToRun())
7833                 {
7834                     instParam =
7835                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7836                     if (instParam == nullptr)
7837                     {
7838                         assert(compDonotInline());
7839                         return TYP_UNDEF;
7840                     }
7841                 }
7842                 else
7843 #endif
7844                 {
7845                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7846                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7847                 }
7848             }
7849             else
7850             {
7851                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7852                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7853                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7854                 if (pConstrainedResolvedToken)
7855                 {
7856                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7857                                                  FALSE /* importParent */);
7858                 }
7859                 else
7860                 {
7861                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7862                 }
7863
7864                 if (instParam == nullptr)
7865                 {
7866                     assert(compDonotInline());
7867                     return TYP_UNDEF;
7868                 }
7869             }
7870         }
7871
7872         assert(extraArg == nullptr);
7873         extraArg = gtNewArgList(instParam);
7874     }
7875
7876     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7877     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7878     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7879     // exactContextHnd is not currently required when inlining shared generic code into shared
7880     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7881     // (e.g. anything marked needsRuntimeLookup)
7882     if (exactContextNeedsRuntimeLookup)
7883     {
7884         exactContextHnd = nullptr;
7885     }
7886
7887     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7888     {
7889         // Only verifiable cases are supported.
7890         // dup; ldvirtftn; newobj; or ldftn; newobj.
7891         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7892         if (impStackHeight() > 0)
7893         {
7894             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7895             if (delegateTypeInfo.IsToken())
7896             {
7897                 ldftnToken = delegateTypeInfo.GetToken();
7898             }
7899         }
7900     }
7901
7902     //-------------------------------------------------------------------------
7903     // The main group of arguments
7904
7905     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7906
7907     if (args)
7908     {
7909         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7910     }
7911
7912     //-------------------------------------------------------------------------
7913     // The "this" pointer
7914
7915     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7916     {
7917         GenTree* obj;
7918
7919         if (opcode == CEE_NEWOBJ)
7920         {
7921             obj = newobjThis;
7922         }
7923         else
7924         {
7925             obj = impPopStack().val;
7926             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7927             if (compDonotInline())
7928             {
7929                 return TYP_UNDEF;
7930             }
7931         }
7932
7933         // Store the "this" value in the call
7934         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7935         call->gtCall.gtCallObjp = obj;
7936
7937         // Is this a virtual or interface call?
7938         if (call->gtCall.IsVirtual())
7939         {
7940             // only true object pointers can be virtual
7941             assert(obj->gtType == TYP_REF);
7942
7943             // See if we can devirtualize.
7944             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7945                                 &exactContextHnd);
7946         }
7947
7948         if (impIsThis(obj))
7949         {
7950             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7951         }
7952     }
7953
7954     //-------------------------------------------------------------------------
7955     // The "this" pointer for "newobj"
7956
7957     if (opcode == CEE_NEWOBJ)
7958     {
7959         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7960         {
7961             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7962             // This is a 'new' of a variable sized object, wher
7963             // the constructor is to return the object.  In this case
7964             // the constructor claims to return VOID but we know it
7965             // actually returns the new object
7966             assert(callRetTyp == TYP_VOID);
7967             callRetTyp   = TYP_REF;
7968             call->gtType = TYP_REF;
7969             impSpillSpecialSideEff();
7970
7971             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7972         }
7973         else
7974         {
7975             if (clsFlags & CORINFO_FLG_DELEGATE)
7976             {
7977                 // New inliner morph it in impImportCall.
7978                 // This will allow us to inline the call to the delegate constructor.
7979                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7980             }
7981
7982             if (!bIntrinsicImported)
7983             {
7984
7985 #if defined(DEBUG) || defined(INLINE_DATA)
7986
7987                 // Keep track of the raw IL offset of the call
7988                 call->gtCall.gtRawILOffset = rawILOffset;
7989
7990 #endif // defined(DEBUG) || defined(INLINE_DATA)
7991
7992                 // Is it an inline candidate?
7993                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7994             }
7995
7996             // append the call node.
7997             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7998
7999             // Now push the value of the 'new onto the stack
8000
8001             // This is a 'new' of a non-variable sized object.
8002             // Append the new node (op1) to the statement list,
8003             // and then push the local holding the value of this
8004             // new instruction on the stack.
8005
8006             if (clsFlags & CORINFO_FLG_VALUECLASS)
8007             {
8008                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8009
8010                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8011                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8012             }
8013             else
8014             {
8015                 if (newobjThis->gtOper == GT_COMMA)
8016                 {
8017                     // In coreclr the callout can be inserted even if verification is disabled
8018                     // so we cannot rely on tiVerificationNeeded alone
8019
8020                     // We must have inserted the callout. Get the real newobj.
8021                     newobjThis = newobjThis->gtOp.gtOp2;
8022                 }
8023
8024                 assert(newobjThis->gtOper == GT_LCL_VAR);
8025                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8026             }
8027         }
8028         return callRetTyp;
8029     }
8030
8031 DONE:
8032
8033     if (tailCall)
8034     {
8035         // This check cannot be performed for implicit tail calls for the reason
8036         // that impIsImplicitTailCallCandidate() is not checking whether return
8037         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8038         // As a result it is possible that in the following case, we find that
8039         // the type stack is non-empty if Callee() is considered for implicit
8040         // tail calling.
8041         //      int Caller(..) { .... void Callee(); ret val; ... }
8042         //
8043         // Note that we cannot check return type compatibility before ImpImportCall()
8044         // as we don't have required info or need to duplicate some of the logic of
8045         // ImpImportCall().
8046         //
8047         // For implicit tail calls, we perform this check after return types are
8048         // known to be compatible.
8049         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8050         {
8051             BADCODE("Stack should be empty after tailcall");
8052         }
8053
8054         // Note that we can not relax this condition with genActualType() as
8055         // the calling convention dictates that the caller of a function with
8056         // a small-typed return value is responsible for normalizing the return val
8057
8058         if (canTailCall &&
8059             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8060                                           callInfo->sig.retTypeClass))
8061         {
8062             canTailCall             = false;
8063             szCanTailCallFailReason = "Return types are not tail call compatible";
8064         }
8065
8066         // Stack empty check for implicit tail calls.
8067         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8068         {
8069 #ifdef _TARGET_AMD64_
8070             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8071             // in JIT64, not an InvalidProgramException.
8072             Verify(false, "Stack should be empty after tailcall");
8073 #else  // _TARGET_64BIT_
8074             BADCODE("Stack should be empty after tailcall");
8075 #endif //!_TARGET_64BIT_
8076         }
8077
8078         // assert(compCurBB is not a catch, finally or filter block);
8079         // assert(compCurBB is not a try block protected by a finally block);
8080
8081         // Check for permission to tailcall
8082         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8083
8084         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8085
8086         if (canTailCall)
8087         {
8088             // True virtual or indirect calls, shouldn't pass in a callee handle.
8089             CORINFO_METHOD_HANDLE exactCalleeHnd =
8090                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8091             GenTree* thisArg = call->gtCall.gtCallObjp;
8092
8093             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8094             {
8095                 canTailCall = true;
8096                 if (explicitTailCall)
8097                 {
8098                     // In case of explicit tail calls, mark it so that it is not considered
8099                     // for in-lining.
8100                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8101 #ifdef DEBUG
8102                     if (verbose)
8103                     {
8104                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8105                         printTreeID(call);
8106                         printf("\n");
8107                     }
8108 #endif
8109                 }
8110                 else
8111                 {
8112 #if FEATURE_TAILCALL_OPT
8113                     // Must be an implicit tail call.
8114                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8115
8116                     // It is possible that a call node is both an inline candidate and marked
8117                     // for opportunistic tail calling.  In-lining happens before morhphing of
8118                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8119                     // reason, it will survive to the morphing stage at which point it will be
8120                     // transformed into a tail call after performing additional checks.
8121
8122                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8123 #ifdef DEBUG
8124                     if (verbose)
8125                     {
8126                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8127                         printTreeID(call);
8128                         printf("\n");
8129                     }
8130 #endif
8131
8132 #else //! FEATURE_TAILCALL_OPT
8133                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8134
8135 #endif // FEATURE_TAILCALL_OPT
8136                 }
8137
8138                 // we can't report success just yet...
8139             }
8140             else
8141             {
8142                 canTailCall = false;
8143 // canTailCall reported its reasons already
8144 #ifdef DEBUG
8145                 if (verbose)
8146                 {
8147                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8148                     printTreeID(call);
8149                     printf("\n");
8150                 }
8151 #endif
8152             }
8153         }
8154         else
8155         {
8156             // If this assert fires it means that canTailCall was set to false without setting a reason!
8157             assert(szCanTailCallFailReason != nullptr);
8158
8159 #ifdef DEBUG
8160             if (verbose)
8161             {
8162                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8163                 printTreeID(call);
8164                 printf(": %s\n", szCanTailCallFailReason);
8165             }
8166 #endif
8167             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8168                                                      szCanTailCallFailReason);
8169         }
8170     }
8171
8172     // Note: we assume that small return types are already normalized by the managed callee
8173     // or by the pinvoke stub for calls to unmanaged code.
8174
8175     if (!bIntrinsicImported)
8176     {
8177         //
8178         // Things needed to be checked when bIntrinsicImported is false.
8179         //
8180
8181         assert(call->gtOper == GT_CALL);
8182         assert(sig != nullptr);
8183
8184         // Tail calls require us to save the call site's sig info so we can obtain an argument
8185         // copying thunk from the EE later on.
8186         if (call->gtCall.callSig == nullptr)
8187         {
8188             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8189             *call->gtCall.callSig = *sig;
8190         }
8191
8192         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8193         {
8194             GenTree* callObj = call->gtCall.gtCallObjp;
8195             assert(callObj != nullptr);
8196
8197             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8198                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8199                                                                    impInlineInfo->inlArgInfo))
8200             {
8201                 impInlineInfo->thisDereferencedFirst = true;
8202             }
8203         }
8204
8205 #if defined(DEBUG) || defined(INLINE_DATA)
8206
8207         // Keep track of the raw IL offset of the call
8208         call->gtCall.gtRawILOffset = rawILOffset;
8209
8210 #endif // defined(DEBUG) || defined(INLINE_DATA)
8211
8212         // Is it an inline candidate?
8213         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8214     }
8215
8216 DONE_CALL:
8217     // Push or append the result of the call
8218     if (callRetTyp == TYP_VOID)
8219     {
8220         if (opcode == CEE_NEWOBJ)
8221         {
8222             // we actually did push something, so don't spill the thing we just pushed.
8223             assert(verCurrentState.esStackDepth > 0);
8224             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8225         }
8226         else
8227         {
8228             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8229         }
8230     }
8231     else
8232     {
8233         impSpillSpecialSideEff();
8234
8235         if (clsFlags & CORINFO_FLG_ARRAY)
8236         {
8237             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8238         }
8239
8240         // Find the return type used for verification by interpreting the method signature.
8241         // NB: we are clobbering the already established sig.
8242         if (tiVerificationNeeded)
8243         {
8244             // Actually, we never get the sig for the original method.
8245             sig = &(callInfo->verSig);
8246         }
8247
8248         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8249         tiRetVal.NormaliseForStack();
8250
8251         // The CEE_READONLY prefix modifies the verification semantics of an Address
8252         // operation on an array type.
8253         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8254         {
8255             tiRetVal.SetIsReadonlyByRef();
8256         }
8257
8258         if (tiVerificationNeeded)
8259         {
8260             // We assume all calls return permanent home byrefs. If they
8261             // didn't they wouldn't be verifiable. This is also covering
8262             // the Address() helper for multidimensional arrays.
8263             if (tiRetVal.IsByRef())
8264             {
8265                 tiRetVal.SetIsPermanentHomeByRef();
8266             }
8267         }
8268
8269         if (call->IsCall())
8270         {
8271             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8272
8273             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8274             if (varTypeIsStruct(callRetTyp))
8275             {
8276                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8277             }
8278
8279             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8280             {
8281                 assert(opts.OptEnabled(CLFLG_INLINING));
8282                 assert(!fatPointerCandidate); // We should not try to inline calli.
8283
8284                 // Make the call its own tree (spill the stack if needed).
8285                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8286
8287                 // TODO: Still using the widened type.
8288                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8289             }
8290             else
8291             {
8292                 if (fatPointerCandidate)
8293                 {
8294                     // fatPointer candidates should be in statements of the form call() or var = call().
8295                     // Such form allows to find statements with fat calls without walking through whole trees
8296                     // and removes problems with cutting trees.
8297                     assert(!bIntrinsicImported);
8298                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8299                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8300                     {
8301                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8302                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8303                         varDsc->lvVerTypeInfo = tiRetVal;
8304                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8305                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8306                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8307                         call           = gtNewLclvNode(calliSlot, type);
8308                     }
8309                 }
8310
8311                 // For non-candidates we must also spill, since we
8312                 // might have locals live on the eval stack that this
8313                 // call can modify.
8314                 //
8315                 // Suppress this for certain well-known call targets
8316                 // that we know won't modify locals, eg calls that are
8317                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8318                 // we may break key fragile pattern matches later on.
8319                 bool spillStack = true;
8320                 if (call->IsCall())
8321                 {
8322                     GenTreeCall* callNode = call->AsCall();
8323                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8324                     {
8325                         spillStack = false;
8326                     }
8327                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8328                     {
8329                         spillStack = false;
8330                     }
8331                 }
8332
8333                 if (spillStack)
8334                 {
8335                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8336                 }
8337             }
8338         }
8339
8340         if (!bIntrinsicImported)
8341         {
8342             //-------------------------------------------------------------------------
8343             //
8344             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8345                 before returning.
8346                 However, we need to normalize small type values returned by unmanaged
8347                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8348                 if we use the shorter inlined pinvoke stub. */
8349
8350             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8351             {
8352                 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8353             }
8354         }
8355
8356         impPushOnStack(call, tiRetVal);
8357     }
8358
8359     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8360     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8361     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8362     //  callInfoCache.uncacheCallInfo();
8363
8364     return callRetTyp;
8365 }
8366 #ifdef _PREFAST_
8367 #pragma warning(pop)
8368 #endif
8369
8370 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8371 {
8372     CorInfoType corType = methInfo->args.retType;
8373
8374     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8375     {
8376         // We have some kind of STRUCT being returned
8377
8378         structPassingKind howToReturnStruct = SPK_Unknown;
8379
8380         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8381
8382         if (howToReturnStruct == SPK_ByReference)
8383         {
8384             return true;
8385         }
8386     }
8387
8388     return false;
8389 }
8390
8391 #ifdef DEBUG
8392 //
8393 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8394 {
8395     TestLabelAndNum tlAndN;
8396     if (numArgs == 2)
8397     {
8398         tlAndN.m_num  = 0;
8399         StackEntry se = impPopStack();
8400         assert(se.seTypeInfo.GetType() == TI_INT);
8401         GenTree* val = se.val;
8402         assert(val->IsCnsIntOrI());
8403         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8404     }
8405     else if (numArgs == 3)
8406     {
8407         StackEntry se = impPopStack();
8408         assert(se.seTypeInfo.GetType() == TI_INT);
8409         GenTree* val = se.val;
8410         assert(val->IsCnsIntOrI());
8411         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8412         se           = impPopStack();
8413         assert(se.seTypeInfo.GetType() == TI_INT);
8414         val = se.val;
8415         assert(val->IsCnsIntOrI());
8416         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8417     }
8418     else
8419     {
8420         assert(false);
8421     }
8422
8423     StackEntry expSe = impPopStack();
8424     GenTree*   node  = expSe.val;
8425
8426     // There are a small number of special cases, where we actually put the annotation on a subnode.
8427     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8428     {
8429         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8430         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8431         // offset within the the static field block whose address is returned by the helper call.
8432         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8433         GenTree* helperCall = nullptr;
8434         assert(node->OperGet() == GT_IND);
8435         tlAndN.m_num -= 100;
8436         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8437         GetNodeTestData()->Remove(node);
8438     }
8439     else
8440     {
8441         GetNodeTestData()->Set(node, tlAndN);
8442     }
8443
8444     impPushOnStack(node, expSe.seTypeInfo);
8445     return node->TypeGet();
8446 }
8447 #endif // DEBUG
8448
8449 //-----------------------------------------------------------------------------------
8450 //  impFixupCallStructReturn: For a call node that returns a struct type either
8451 //  adjust the return type to an enregisterable type, or set the flag to indicate
8452 //  struct return via retbuf arg.
8453 //
8454 //  Arguments:
8455 //    call       -  GT_CALL GenTree node
8456 //    retClsHnd  -  Class handle of return type of the call
8457 //
8458 //  Return Value:
8459 //    Returns new GenTree node after fixing struct return of call node
8460 //
8461 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8462 {
8463     if (!varTypeIsStruct(call))
8464     {
8465         return call;
8466     }
8467
8468     call->gtRetClsHnd = retClsHnd;
8469
8470 #if FEATURE_MULTIREG_RET
8471     // Initialize Return type descriptor of call node
8472     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8473     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8474 #endif // FEATURE_MULTIREG_RET
8475
8476 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8477
8478     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8479     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8480
8481     // The return type will remain as the incoming struct type unless normalized to a
8482     // single eightbyte return type below.
8483     call->gtReturnType = call->gtType;
8484
8485     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8486     if (retRegCount != 0)
8487     {
8488         if (retRegCount == 1)
8489         {
8490             // struct returned in a single register
8491             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8492         }
8493         else
8494         {
8495             // must be a struct returned in two registers
8496             assert(retRegCount == 2);
8497
8498             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8499             {
8500                 // Force a call returning multi-reg struct to be always of the IR form
8501                 //   tmp = call
8502                 //
8503                 // No need to assign a multi-reg struct to a local var if:
8504                 //  - It is a tail call or
8505                 //  - The call is marked for in-lining later
8506                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8507             }
8508         }
8509     }
8510     else
8511     {
8512         // struct not returned in registers i.e returned via hiddden retbuf arg.
8513         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8514     }
8515
8516 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8517
8518     // Check for TYP_STRUCT type that wraps a primitive type
8519     // Such structs are returned using a single register
8520     // and we change the return type on those calls here.
8521     //
8522     structPassingKind howToReturnStruct;
8523     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8524
8525     if (howToReturnStruct == SPK_ByReference)
8526     {
8527         assert(returnType == TYP_UNKNOWN);
8528         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8529     }
8530     else
8531     {
8532         assert(returnType != TYP_UNKNOWN);
8533         call->gtReturnType = returnType;
8534
8535         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8536         if ((returnType == TYP_LONG) && (compLongUsed == false))
8537         {
8538             compLongUsed = true;
8539         }
8540         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8541         {
8542             compFloatingPointUsed = true;
8543         }
8544
8545 #if FEATURE_MULTIREG_RET
8546         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8547         assert(retRegCount != 0);
8548
8549         if (retRegCount >= 2)
8550         {
8551             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8552             {
8553                 // Force a call returning multi-reg struct to be always of the IR form
8554                 //   tmp = call
8555                 //
8556                 // No need to assign a multi-reg struct to a local var if:
8557                 //  - It is a tail call or
8558                 //  - The call is marked for in-lining later
8559                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8560             }
8561         }
8562 #endif // FEATURE_MULTIREG_RET
8563     }
8564
8565 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8566
8567     return call;
8568 }
8569
8570 /*****************************************************************************
8571    For struct return values, re-type the operand in the case where the ABI
8572    does not use a struct return buffer
8573    Note that this method is only call for !_TARGET_X86_
8574  */
8575
8576 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8577 {
8578     assert(varTypeIsStruct(info.compRetType));
8579     assert(info.compRetBuffArg == BAD_VAR_NUM);
8580
8581 #if defined(_TARGET_XARCH_)
8582
8583 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8584     // No VarArgs for CoreCLR on x64 Unix
8585     assert(!info.compIsVarArgs);
8586
8587     // Is method returning a multi-reg struct?
8588     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8589     {
8590         // In case of multi-reg struct return, we force IR to be one of the following:
8591         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8592         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8593
8594         if (op->gtOper == GT_LCL_VAR)
8595         {
8596             // Make sure that this struct stays in memory and doesn't get promoted.
8597             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8598             lvaTable[lclNum].lvIsMultiRegRet = true;
8599
8600             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8601             op->gtFlags |= GTF_DONT_CSE;
8602
8603             return op;
8604         }
8605
8606         if (op->gtOper == GT_CALL)
8607         {
8608             return op;
8609         }
8610
8611         return impAssignMultiRegTypeToVar(op, retClsHnd);
8612     }
8613 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8614     assert(info.compRetNativeType != TYP_STRUCT);
8615 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8616
8617 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8618
8619     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8620     {
8621         if (op->gtOper == GT_LCL_VAR)
8622         {
8623             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8624             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8625             // Make sure this struct type stays as struct so that we can return it as an HFA
8626             lvaTable[lclNum].lvIsMultiRegRet = true;
8627
8628             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8629             op->gtFlags |= GTF_DONT_CSE;
8630
8631             return op;
8632         }
8633
8634         if (op->gtOper == GT_CALL)
8635         {
8636             if (op->gtCall.IsVarargs())
8637             {
8638                 // We cannot tail call because control needs to return to fixup the calling
8639                 // convention for result return.
8640                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8641                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8642             }
8643             else
8644             {
8645                 return op;
8646             }
8647         }
8648         return impAssignMultiRegTypeToVar(op, retClsHnd);
8649     }
8650
8651 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8652
8653     // Is method returning a multi-reg struct?
8654     if (IsMultiRegReturnedType(retClsHnd))
8655     {
8656         if (op->gtOper == GT_LCL_VAR)
8657         {
8658             // This LCL_VAR stays as a TYP_STRUCT
8659             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8660
8661             // Make sure this struct type is not struct promoted
8662             lvaTable[lclNum].lvIsMultiRegRet = true;
8663
8664             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8665             op->gtFlags |= GTF_DONT_CSE;
8666
8667             return op;
8668         }
8669
8670         if (op->gtOper == GT_CALL)
8671         {
8672             if (op->gtCall.IsVarargs())
8673             {
8674                 // We cannot tail call because control needs to return to fixup the calling
8675                 // convention for result return.
8676                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8677                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8678             }
8679             else
8680             {
8681                 return op;
8682             }
8683         }
8684         return impAssignMultiRegTypeToVar(op, retClsHnd);
8685     }
8686
8687 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8688
8689 REDO_RETURN_NODE:
8690     // adjust the type away from struct to integral
8691     // and no normalizing
8692     if (op->gtOper == GT_LCL_VAR)
8693     {
8694         op->ChangeOper(GT_LCL_FLD);
8695     }
8696     else if (op->gtOper == GT_OBJ)
8697     {
8698         GenTree* op1 = op->AsObj()->Addr();
8699
8700         // We will fold away OBJ/ADDR
8701         // except for OBJ/ADDR/INDEX
8702         //     as the array type influences the array element's offset
8703         //     Later in this method we change op->gtType to info.compRetNativeType
8704         //     This is not correct when op is a GT_INDEX as the starting offset
8705         //     for the array elements 'elemOffs' is different for an array of
8706         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8707         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8708         //
8709         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8710         {
8711             // Change '*(&X)' to 'X' and see if we can do better
8712             op = op1->gtOp.gtOp1;
8713             goto REDO_RETURN_NODE;
8714         }
8715         op->gtObj.gtClass = NO_CLASS_HANDLE;
8716         op->ChangeOperUnchecked(GT_IND);
8717         op->gtFlags |= GTF_IND_TGTANYWHERE;
8718     }
8719     else if (op->gtOper == GT_CALL)
8720     {
8721         if (op->AsCall()->TreatAsHasRetBufArg(this))
8722         {
8723             // This must be one of those 'special' helpers that don't
8724             // really have a return buffer, but instead use it as a way
8725             // to keep the trees cleaner with fewer address-taken temps.
8726             //
8727             // Well now we have to materialize the the return buffer as
8728             // an address-taken temp. Then we can return the temp.
8729             //
8730             // NOTE: this code assumes that since the call directly
8731             // feeds the return, then the call must be returning the
8732             // same structure/class/type.
8733             //
8734             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8735
8736             // No need to spill anything as we're about to return.
8737             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8738
8739             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8740             // jump directly to a GT_LCL_FLD.
8741             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8742             op->ChangeOper(GT_LCL_FLD);
8743         }
8744         else
8745         {
8746             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8747
8748             // Don't change the gtType of the node just yet, it will get changed later.
8749             return op;
8750         }
8751     }
8752 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8753     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8754     {
8755         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8756         // assert(op->gtType == info.compRetNativeType)
8757         if (op->gtType != info.compRetNativeType)
8758         {
8759             // Insert a register move to keep target type of SIMD intrinsic intact
8760             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8761         }
8762     }
8763 #endif
8764     else if (op->gtOper == GT_COMMA)
8765     {
8766         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8767     }
8768
8769     op->gtType = info.compRetNativeType;
8770
8771     return op;
8772 }
8773
8774 /*****************************************************************************
8775    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8776    finally-protected try. We find the finally blocks protecting the current
8777    offset (in order) by walking over the complete exception table and
8778    finding enclosing clauses. This assumes that the table is sorted.
8779    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8780
8781    If we are leaving a catch handler, we need to attach the
8782    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8783
8784    After this function, the BBJ_LEAVE block has been converted to a different type.
8785  */
8786
8787 #if !FEATURE_EH_FUNCLETS
8788
8789 void Compiler::impImportLeave(BasicBlock* block)
8790 {
8791 #ifdef DEBUG
8792     if (verbose)
8793     {
8794         printf("\nBefore import CEE_LEAVE:\n");
8795         fgDispBasicBlocks();
8796         fgDispHandlerTab();
8797     }
8798 #endif // DEBUG
8799
8800     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8801     unsigned    blkAddr         = block->bbCodeOffs;
8802     BasicBlock* leaveTarget     = block->bbJumpDest;
8803     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8804
8805     // LEAVE clears the stack, spill side effects, and set stack to 0
8806
8807     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8808     verCurrentState.esStackDepth = 0;
8809
8810     assert(block->bbJumpKind == BBJ_LEAVE);
8811     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8812
8813     BasicBlock* step         = DUMMY_INIT(NULL);
8814     unsigned    encFinallies = 0; // Number of enclosing finallies.
8815     GenTree*    endCatches   = NULL;
8816     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8817
8818     unsigned  XTnum;
8819     EHblkDsc* HBtab;
8820
8821     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8822     {
8823         // Grab the handler offsets
8824
8825         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8826         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8827         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8828         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8829
8830         /* Is this a catch-handler we are CEE_LEAVEing out of?
8831          * If so, we need to call CORINFO_HELP_ENDCATCH.
8832          */
8833
8834         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8835         {
8836             // Can't CEE_LEAVE out of a finally/fault handler
8837             if (HBtab->HasFinallyOrFaultHandler())
8838                 BADCODE("leave out of fault/finally block");
8839
8840             // Create the call to CORINFO_HELP_ENDCATCH
8841             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8842
8843             // Make a list of all the currently pending endCatches
8844             if (endCatches)
8845                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8846             else
8847                 endCatches = endCatch;
8848
8849 #ifdef DEBUG
8850             if (verbose)
8851             {
8852                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8853                        "CORINFO_HELP_ENDCATCH\n",
8854                        block->bbNum, XTnum);
8855             }
8856 #endif
8857         }
8858         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8859                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8860         {
8861             /* This is a finally-protected try we are jumping out of */
8862
8863             /* If there are any pending endCatches, and we have already
8864                jumped out of a finally-protected try, then the endCatches
8865                have to be put in a block in an outer try for async
8866                exceptions to work correctly.
8867                Else, just use append to the original block */
8868
8869             BasicBlock* callBlock;
8870
8871             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8872
8873             if (encFinallies == 0)
8874             {
8875                 assert(step == DUMMY_INIT(NULL));
8876                 callBlock             = block;
8877                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8878
8879                 if (endCatches)
8880                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8881
8882 #ifdef DEBUG
8883                 if (verbose)
8884                 {
8885                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8886                            "block %s\n",
8887                            callBlock->dspToString());
8888                 }
8889 #endif
8890             }
8891             else
8892             {
8893                 assert(step != DUMMY_INIT(NULL));
8894
8895                 /* Calling the finally block */
8896                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8897                 assert(step->bbJumpKind == BBJ_ALWAYS);
8898                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8899                                               // finally in the chain)
8900                 step->bbJumpDest->bbRefs++;
8901
8902                 /* The new block will inherit this block's weight */
8903                 callBlock->setBBWeight(block->bbWeight);
8904                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8905
8906 #ifdef DEBUG
8907                 if (verbose)
8908                 {
8909                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8910                            callBlock->dspToString());
8911                 }
8912 #endif
8913
8914                 GenTree* lastStmt;
8915
8916                 if (endCatches)
8917                 {
8918                     lastStmt         = gtNewStmt(endCatches);
8919                     endLFin->gtNext  = lastStmt;
8920                     lastStmt->gtPrev = endLFin;
8921                 }
8922                 else
8923                 {
8924                     lastStmt = endLFin;
8925                 }
8926
8927                 // note that this sets BBF_IMPORTED on the block
8928                 impEndTreeList(callBlock, endLFin, lastStmt);
8929             }
8930
8931             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8932             /* The new block will inherit this block's weight */
8933             step->setBBWeight(block->bbWeight);
8934             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8935
8936 #ifdef DEBUG
8937             if (verbose)
8938             {
8939                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8940                        step->dspToString());
8941             }
8942 #endif
8943
8944             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8945             assert(finallyNesting <= compHndBBtabCount);
8946
8947             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8948             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8949             endLFin               = gtNewStmt(endLFin);
8950             endCatches            = NULL;
8951
8952             encFinallies++;
8953
8954             invalidatePreds = true;
8955         }
8956     }
8957
8958     /* Append any remaining endCatches, if any */
8959
8960     assert(!encFinallies == !endLFin);
8961
8962     if (encFinallies == 0)
8963     {
8964         assert(step == DUMMY_INIT(NULL));
8965         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8966
8967         if (endCatches)
8968             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8969
8970 #ifdef DEBUG
8971         if (verbose)
8972         {
8973             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8974                    "block %s\n",
8975                    block->dspToString());
8976         }
8977 #endif
8978     }
8979     else
8980     {
8981         // If leaveTarget is the start of another try block, we want to make sure that
8982         // we do not insert finalStep into that try block. Hence, we find the enclosing
8983         // try block.
8984         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8985
8986         // Insert a new BB either in the try region indicated by tryIndex or
8987         // the handler region indicated by leaveTarget->bbHndIndex,
8988         // depending on which is the inner region.
8989         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8990         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8991         step->bbJumpDest = finalStep;
8992
8993         /* The new block will inherit this block's weight */
8994         finalStep->setBBWeight(block->bbWeight);
8995         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8996
8997 #ifdef DEBUG
8998         if (verbose)
8999         {
9000             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9001                    finalStep->dspToString());
9002         }
9003 #endif
9004
9005         GenTree* lastStmt;
9006
9007         if (endCatches)
9008         {
9009             lastStmt         = gtNewStmt(endCatches);
9010             endLFin->gtNext  = lastStmt;
9011             lastStmt->gtPrev = endLFin;
9012         }
9013         else
9014         {
9015             lastStmt = endLFin;
9016         }
9017
9018         impEndTreeList(finalStep, endLFin, lastStmt);
9019
9020         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9021
9022         // Queue up the jump target for importing
9023
9024         impImportBlockPending(leaveTarget);
9025
9026         invalidatePreds = true;
9027     }
9028
9029     if (invalidatePreds && fgComputePredsDone)
9030     {
9031         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9032         fgRemovePreds();
9033     }
9034
9035 #ifdef DEBUG
9036     fgVerifyHandlerTab();
9037
9038     if (verbose)
9039     {
9040         printf("\nAfter import CEE_LEAVE:\n");
9041         fgDispBasicBlocks();
9042         fgDispHandlerTab();
9043     }
9044 #endif // DEBUG
9045 }
9046
9047 #else // FEATURE_EH_FUNCLETS
9048
9049 void Compiler::impImportLeave(BasicBlock* block)
9050 {
9051 #ifdef DEBUG
9052     if (verbose)
9053     {
9054         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9055         fgDispBasicBlocks();
9056         fgDispHandlerTab();
9057     }
9058 #endif // DEBUG
9059
9060     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9061     unsigned    blkAddr         = block->bbCodeOffs;
9062     BasicBlock* leaveTarget     = block->bbJumpDest;
9063     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9064
9065     // LEAVE clears the stack, spill side effects, and set stack to 0
9066
9067     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9068     verCurrentState.esStackDepth = 0;
9069
9070     assert(block->bbJumpKind == BBJ_LEAVE);
9071     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9072
9073     BasicBlock* step = nullptr;
9074
9075     enum StepType
9076     {
9077         // No step type; step == NULL.
9078         ST_None,
9079
9080         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9081         // That is, is step->bbJumpDest where a finally will return to?
9082         ST_FinallyReturn,
9083
9084         // The step block is a catch return.
9085         ST_Catch,
9086
9087         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9088         ST_Try
9089     };
9090     StepType stepType = ST_None;
9091
9092     unsigned  XTnum;
9093     EHblkDsc* HBtab;
9094
9095     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9096     {
9097         // Grab the handler offsets
9098
9099         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9100         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9101         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9102         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9103
9104         /* Is this a catch-handler we are CEE_LEAVEing out of?
9105          */
9106
9107         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9108         {
9109             // Can't CEE_LEAVE out of a finally/fault handler
9110             if (HBtab->HasFinallyOrFaultHandler())
9111             {
9112                 BADCODE("leave out of fault/finally block");
9113             }
9114
9115             /* We are jumping out of a catch */
9116
9117             if (step == nullptr)
9118             {
9119                 step             = block;
9120                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9121                 stepType         = ST_Catch;
9122
9123 #ifdef DEBUG
9124                 if (verbose)
9125                 {
9126                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9127                            "block\n",
9128                            XTnum, step->bbNum);
9129                 }
9130 #endif
9131             }
9132             else
9133             {
9134                 BasicBlock* exitBlock;
9135
9136                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9137                  * scope */
9138                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9139
9140                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9141                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9142                                               // exit) returns to this block
9143                 step->bbJumpDest->bbRefs++;
9144
9145 #if defined(_TARGET_ARM_)
9146                 if (stepType == ST_FinallyReturn)
9147                 {
9148                     assert(step->bbJumpKind == BBJ_ALWAYS);
9149                     // Mark the target of a finally return
9150                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9151                 }
9152 #endif // defined(_TARGET_ARM_)
9153
9154                 /* The new block will inherit this block's weight */
9155                 exitBlock->setBBWeight(block->bbWeight);
9156                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9157
9158                 /* This exit block is the new step */
9159                 step     = exitBlock;
9160                 stepType = ST_Catch;
9161
9162                 invalidatePreds = true;
9163
9164 #ifdef DEBUG
9165                 if (verbose)
9166                 {
9167                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9168                            exitBlock->bbNum);
9169                 }
9170 #endif
9171             }
9172         }
9173         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9174                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9175         {
9176             /* We are jumping out of a finally-protected try */
9177
9178             BasicBlock* callBlock;
9179
9180             if (step == nullptr)
9181             {
9182 #if FEATURE_EH_CALLFINALLY_THUNKS
9183
9184                 // Put the call to the finally in the enclosing region.
9185                 unsigned callFinallyTryIndex =
9186                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9187                 unsigned callFinallyHndIndex =
9188                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9189                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9190
9191                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9192                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9193                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9194                 // next block, and flow optimizations will remove it.
9195                 block->bbJumpKind = BBJ_ALWAYS;
9196                 block->bbJumpDest = callBlock;
9197                 block->bbJumpDest->bbRefs++;
9198
9199                 /* The new block will inherit this block's weight */
9200                 callBlock->setBBWeight(block->bbWeight);
9201                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9202
9203 #ifdef DEBUG
9204                 if (verbose)
9205                 {
9206                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9207                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9208                            XTnum, block->bbNum, callBlock->bbNum);
9209                 }
9210 #endif
9211
9212 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9213
9214                 callBlock             = block;
9215                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9216
9217 #ifdef DEBUG
9218                 if (verbose)
9219                 {
9220                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9221                            "BBJ_CALLFINALLY block\n",
9222                            XTnum, callBlock->bbNum);
9223                 }
9224 #endif
9225
9226 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9227             }
9228             else
9229             {
9230                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9231                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9232                 // a 'finally'), or the step block is the return from a catch.
9233                 //
9234                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9235                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9236                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9237                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9238                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9239                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9240                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9241                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9242                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9243                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9244                 // stack walks.)
9245
9246                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9247
9248 #if FEATURE_EH_CALLFINALLY_THUNKS
9249                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9250                 {
9251                     // Need to create another step block in the 'try' region that will actually branch to the
9252                     // call-to-finally thunk.
9253                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9254                     step->bbJumpDest  = step2;
9255                     step->bbJumpDest->bbRefs++;
9256                     step2->setBBWeight(block->bbWeight);
9257                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9258
9259 #ifdef DEBUG
9260                     if (verbose)
9261                     {
9262                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9263                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9264                                XTnum, step->bbNum, step2->bbNum);
9265                     }
9266 #endif
9267
9268                     step = step2;
9269                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9270                 }
9271 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9272
9273 #if FEATURE_EH_CALLFINALLY_THUNKS
9274                 unsigned callFinallyTryIndex =
9275                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9276                 unsigned callFinallyHndIndex =
9277                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9278 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9279                 unsigned callFinallyTryIndex = XTnum + 1;
9280                 unsigned callFinallyHndIndex = 0; // don't care
9281 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9282
9283                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9284                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9285                                               // finally in the chain)
9286                 step->bbJumpDest->bbRefs++;
9287
9288 #if defined(_TARGET_ARM_)
9289                 if (stepType == ST_FinallyReturn)
9290                 {
9291                     assert(step->bbJumpKind == BBJ_ALWAYS);
9292                     // Mark the target of a finally return
9293                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9294                 }
9295 #endif // defined(_TARGET_ARM_)
9296
9297                 /* The new block will inherit this block's weight */
9298                 callBlock->setBBWeight(block->bbWeight);
9299                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9300
9301 #ifdef DEBUG
9302                 if (verbose)
9303                 {
9304                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9305                            "BB%02u\n",
9306                            XTnum, callBlock->bbNum);
9307                 }
9308 #endif
9309             }
9310
9311             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9312             stepType = ST_FinallyReturn;
9313
9314             /* The new block will inherit this block's weight */
9315             step->setBBWeight(block->bbWeight);
9316             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9317
9318 #ifdef DEBUG
9319             if (verbose)
9320             {
9321                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9322                        "block BB%02u\n",
9323                        XTnum, step->bbNum);
9324             }
9325 #endif
9326
9327             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9328
9329             invalidatePreds = true;
9330         }
9331         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9332                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9333         {
9334             // We are jumping out of a catch-protected try.
9335             //
9336             // If we are returning from a call to a finally, then we must have a step block within a try
9337             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9338             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9339             // and invoke the appropriate catch.
9340             //
9341             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9342             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9343             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9344             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9345             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9346             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9347             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9348             // For example:
9349             //
9350             // try {
9351             //    try {
9352             //       // something here raises ThreadAbortException
9353             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9354             //    } catch (Exception) {
9355             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9356             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9357             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9358             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9359             //       // need to do this transformation if the current EH block is a try/catch that catches
9360             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9361             //       // information, so currently we do it for all catch types.
9362             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9363             //    }
9364             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9365             // } catch (ThreadAbortException) {
9366             // }
9367             // LABEL_1:
9368             //
9369             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9370             // compiler.
9371
9372             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9373             {
9374                 BasicBlock* catchStep;
9375
9376                 assert(step);
9377
9378                 if (stepType == ST_FinallyReturn)
9379                 {
9380                     assert(step->bbJumpKind == BBJ_ALWAYS);
9381                 }
9382                 else
9383                 {
9384                     assert(stepType == ST_Catch);
9385                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9386                 }
9387
9388                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9389                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9390                 step->bbJumpDest = catchStep;
9391                 step->bbJumpDest->bbRefs++;
9392
9393 #if defined(_TARGET_ARM_)
9394                 if (stepType == ST_FinallyReturn)
9395                 {
9396                     // Mark the target of a finally return
9397                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9398                 }
9399 #endif // defined(_TARGET_ARM_)
9400
9401                 /* The new block will inherit this block's weight */
9402                 catchStep->setBBWeight(block->bbWeight);
9403                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9404
9405 #ifdef DEBUG
9406                 if (verbose)
9407                 {
9408                     if (stepType == ST_FinallyReturn)
9409                     {
9410                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9411                                "BBJ_ALWAYS block BB%02u\n",
9412                                XTnum, catchStep->bbNum);
9413                     }
9414                     else
9415                     {
9416                         assert(stepType == ST_Catch);
9417                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9418                                "BBJ_ALWAYS block BB%02u\n",
9419                                XTnum, catchStep->bbNum);
9420                     }
9421                 }
9422 #endif // DEBUG
9423
9424                 /* This block is the new step */
9425                 step     = catchStep;
9426                 stepType = ST_Try;
9427
9428                 invalidatePreds = true;
9429             }
9430         }
9431     }
9432
9433     if (step == nullptr)
9434     {
9435         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9436
9437 #ifdef DEBUG
9438         if (verbose)
9439         {
9440             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9441                    "block BB%02u to BBJ_ALWAYS\n",
9442                    block->bbNum);
9443         }
9444 #endif
9445     }
9446     else
9447     {
9448         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9449
9450 #if defined(_TARGET_ARM_)
9451         if (stepType == ST_FinallyReturn)
9452         {
9453             assert(step->bbJumpKind == BBJ_ALWAYS);
9454             // Mark the target of a finally return
9455             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9456         }
9457 #endif // defined(_TARGET_ARM_)
9458
9459 #ifdef DEBUG
9460         if (verbose)
9461         {
9462             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9463         }
9464 #endif
9465
9466         // Queue up the jump target for importing
9467
9468         impImportBlockPending(leaveTarget);
9469     }
9470
9471     if (invalidatePreds && fgComputePredsDone)
9472     {
9473         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9474         fgRemovePreds();
9475     }
9476
9477 #ifdef DEBUG
9478     fgVerifyHandlerTab();
9479
9480     if (verbose)
9481     {
9482         printf("\nAfter import CEE_LEAVE:\n");
9483         fgDispBasicBlocks();
9484         fgDispHandlerTab();
9485     }
9486 #endif // DEBUG
9487 }
9488
9489 #endif // FEATURE_EH_FUNCLETS
9490
9491 /*****************************************************************************/
9492 // This is called when reimporting a leave block. It resets the JumpKind,
9493 // JumpDest, and bbNext to the original values
9494
9495 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9496 {
9497 #if FEATURE_EH_FUNCLETS
9498     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9499     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9500     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9501     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9502     // only predecessor are also considered orphans and attempted to be deleted.
9503     //
9504     //  try  {
9505     //     ....
9506     //     try
9507     //     {
9508     //         ....
9509     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9510     //     } finally { }
9511     //  } finally { }
9512     //  OUTSIDE:
9513     //
9514     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9515     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9516     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9517     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9518     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9519     // will be treated as pair and handled correctly.
9520     if (block->bbJumpKind == BBJ_CALLFINALLY)
9521     {
9522         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9523         dupBlock->bbFlags    = block->bbFlags;
9524         dupBlock->bbJumpDest = block->bbJumpDest;
9525         dupBlock->copyEHRegion(block);
9526         dupBlock->bbCatchTyp = block->bbCatchTyp;
9527
9528         // Mark this block as
9529         //  a) not referenced by any other block to make sure that it gets deleted
9530         //  b) weight zero
9531         //  c) prevent from being imported
9532         //  d) as internal
9533         //  e) as rarely run
9534         dupBlock->bbRefs   = 0;
9535         dupBlock->bbWeight = 0;
9536         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9537
9538         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9539         // will be next to each other.
9540         fgInsertBBafter(block, dupBlock);
9541
9542 #ifdef DEBUG
9543         if (verbose)
9544         {
9545             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9546         }
9547 #endif
9548     }
9549 #endif // FEATURE_EH_FUNCLETS
9550
9551     block->bbJumpKind = BBJ_LEAVE;
9552     fgInitBBLookup();
9553     block->bbJumpDest = fgLookupBB(jmpAddr);
9554
9555     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9556     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9557     // reason we don't want to remove the block at this point is that if we call
9558     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9559     // added and the linked list length will be different than fgBBcount.
9560 }
9561
9562 /*****************************************************************************/
9563 // Get the first non-prefix opcode. Used for verification of valid combinations
9564 // of prefixes and actual opcodes.
9565
9566 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9567 {
9568     while (codeAddr < codeEndp)
9569     {
9570         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9571         codeAddr += sizeof(__int8);
9572
9573         if (opcode == CEE_PREFIX1)
9574         {
9575             if (codeAddr >= codeEndp)
9576             {
9577                 break;
9578             }
9579             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9580             codeAddr += sizeof(__int8);
9581         }
9582
9583         switch (opcode)
9584         {
9585             case CEE_UNALIGNED:
9586             case CEE_VOLATILE:
9587             case CEE_TAILCALL:
9588             case CEE_CONSTRAINED:
9589             case CEE_READONLY:
9590                 break;
9591             default:
9592                 return opcode;
9593         }
9594
9595         codeAddr += opcodeSizes[opcode];
9596     }
9597
9598     return CEE_ILLEGAL;
9599 }
9600
9601 /*****************************************************************************/
9602 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9603
9604 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9605 {
9606     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9607
9608     if (!(
9609             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9610             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9611             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9612             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9613             // volatile. prefix is allowed with the ldsfld and stsfld
9614             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9615     {
9616         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9617     }
9618 }
9619
9620 /*****************************************************************************/
9621
9622 #ifdef DEBUG
9623
9624 #undef RETURN // undef contracts RETURN macro
9625
9626 enum controlFlow_t
9627 {
9628     NEXT,
9629     CALL,
9630     RETURN,
9631     THROW,
9632     BRANCH,
9633     COND_BRANCH,
9634     BREAK,
9635     PHI,
9636     META,
9637 };
9638
9639 const static controlFlow_t controlFlow[] = {
9640 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9641 #include "opcode.def"
9642 #undef OPDEF
9643 };
9644
9645 #endif // DEBUG
9646
9647 /*****************************************************************************
9648  *  Determine the result type of an arithemetic operation
9649  *  On 64-bit inserts upcasts when native int is mixed with int32
9650  */
9651 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9652 {
9653     var_types type = TYP_UNDEF;
9654     GenTree*  op1  = *pOp1;
9655     GenTree*  op2  = *pOp2;
9656
9657     // Arithemetic operations are generally only allowed with
9658     // primitive types, but certain operations are allowed
9659     // with byrefs
9660
9661     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9662     {
9663         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9664         {
9665             // byref1-byref2 => gives a native int
9666             type = TYP_I_IMPL;
9667         }
9668         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9669         {
9670             // [native] int - byref => gives a native int
9671
9672             //
9673             // The reason is that it is possible, in managed C++,
9674             // to have a tree like this:
9675             //
9676             //              -
9677             //             / \
9678             //            /   \
9679             //           /     \
9680             //          /       \
9681             // const(h) int     addr byref
9682             //
9683             // <BUGNUM> VSW 318822 </BUGNUM>
9684             //
9685             // So here we decide to make the resulting type to be a native int.
9686             CLANG_FORMAT_COMMENT_ANCHOR;
9687
9688 #ifdef _TARGET_64BIT_
9689             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9690             {
9691                 // insert an explicit upcast
9692                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9693             }
9694 #endif // _TARGET_64BIT_
9695
9696             type = TYP_I_IMPL;
9697         }
9698         else
9699         {
9700             // byref - [native] int => gives a byref
9701             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9702
9703 #ifdef _TARGET_64BIT_
9704             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9705             {
9706                 // insert an explicit upcast
9707                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9708             }
9709 #endif // _TARGET_64BIT_
9710
9711             type = TYP_BYREF;
9712         }
9713     }
9714     else if ((oper == GT_ADD) &&
9715              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9716     {
9717         // byref + [native] int => gives a byref
9718         // (or)
9719         // [native] int + byref => gives a byref
9720
9721         // only one can be a byref : byref op byref not allowed
9722         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9723         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9724
9725 #ifdef _TARGET_64BIT_
9726         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9727         {
9728             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9729             {
9730                 // insert an explicit upcast
9731                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9732             }
9733         }
9734         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9735         {
9736             // insert an explicit upcast
9737             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9738         }
9739 #endif // _TARGET_64BIT_
9740
9741         type = TYP_BYREF;
9742     }
9743 #ifdef _TARGET_64BIT_
9744     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9745     {
9746         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9747
9748         // int + long => gives long
9749         // long + int => gives long
9750         // we get this because in the IL the long isn't Int64, it's just IntPtr
9751
9752         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9753         {
9754             // insert an explicit upcast
9755             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9756         }
9757         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9758         {
9759             // insert an explicit upcast
9760             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9761         }
9762
9763         type = TYP_I_IMPL;
9764     }
9765 #else  // 32-bit TARGET
9766     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9767     {
9768         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9769
9770         // int + long => gives long
9771         // long + int => gives long
9772
9773         type = TYP_LONG;
9774     }
9775 #endif // _TARGET_64BIT_
9776     else
9777     {
9778         // int + int => gives an int
9779         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9780
9781         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9782                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9783
9784         type = genActualType(op1->gtType);
9785
9786 #if FEATURE_X87_DOUBLES
9787
9788         // For x87, since we only have 1 size of registers, prefer double
9789         // For everybody else, be more precise
9790         if (type == TYP_FLOAT)
9791             type = TYP_DOUBLE;
9792
9793 #else // !FEATURE_X87_DOUBLES
9794
9795         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9796         // Otherwise, turn floats into doubles
9797         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9798         {
9799             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9800             type = TYP_DOUBLE;
9801         }
9802
9803 #endif // FEATURE_X87_DOUBLES
9804     }
9805
9806 #if FEATURE_X87_DOUBLES
9807     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9808 #else  // FEATURE_X87_DOUBLES
9809     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9810 #endif // FEATURE_X87_DOUBLES
9811
9812     return type;
9813 }
9814
9815 //------------------------------------------------------------------------
9816 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9817 //
9818 // Arguments:
9819 //   op1 - value to cast
9820 //   pResolvedToken - resolved token for type to cast to
9821 //   isCastClass - true if this is a castclass, false if isinst
9822 //
9823 // Return Value:
9824 //   tree representing optimized cast, or null if no optimization possible
9825
9826 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9827 {
9828     assert(op1->TypeGet() == TYP_REF);
9829
9830     // Don't optimize for minopts or debug codegen.
9831     if (opts.compDbgCode || opts.MinOpts())
9832     {
9833         return nullptr;
9834     }
9835
9836     // See what we know about the type of the object being cast.
9837     bool                 isExact   = false;
9838     bool                 isNonNull = false;
9839     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9840     GenTree*             optResult = nullptr;
9841
9842     if (fromClass != nullptr)
9843     {
9844         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9845         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9846                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9847                 info.compCompHnd->getClassName(toClass));
9848
9849         // Perhaps we know if the cast will succeed or fail.
9850         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9851
9852         if (castResult == TypeCompareState::Must)
9853         {
9854             // Cast will succeed, result is simply op1.
9855             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9856             return op1;
9857         }
9858         else if (castResult == TypeCompareState::MustNot)
9859         {
9860             // See if we can sharpen exactness by looking for final classes
9861             if (!isExact)
9862             {
9863                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9864                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9865                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9866                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9867             }
9868
9869             // Cast to exact type will fail. Handle case where we have
9870             // an exact type (that is, fromClass is not a subtype)
9871             // and we're not going to throw on failure.
9872             if (isExact && !isCastClass)
9873             {
9874                 JITDUMP("Cast will fail, optimizing to return null\n");
9875                 GenTree* result = gtNewIconNode(0, TYP_REF);
9876
9877                 // If the cast was fed by a box, we can remove that too.
9878                 if (op1->IsBoxedValue())
9879                 {
9880                     JITDUMP("Also removing upstream box\n");
9881                     gtTryRemoveBoxUpstreamEffects(op1);
9882                 }
9883
9884                 return result;
9885             }
9886             else if (isExact)
9887             {
9888                 JITDUMP("Not optimizing failing castclass (yet)\n");
9889             }
9890             else
9891             {
9892                 JITDUMP("Can't optimize since fromClass is inexact\n");
9893             }
9894         }
9895         else
9896         {
9897             JITDUMP("Result of cast unknown, must generate runtime test\n");
9898         }
9899     }
9900     else
9901     {
9902         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9903     }
9904
9905     return nullptr;
9906 }
9907
9908 //------------------------------------------------------------------------
9909 // impCastClassOrIsInstToTree: build and import castclass/isinst
9910 //
9911 // Arguments:
9912 //   op1 - value to cast
9913 //   op2 - type handle for type to cast to
9914 //   pResolvedToken - resolved token from the cast operation
9915 //   isCastClass - true if this is castclass, false means isinst
9916 //
9917 // Return Value:
9918 //   Tree representing the cast
9919 //
9920 // Notes:
9921 //   May expand into a series of runtime checks or a helper call.
9922
9923 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
9924                                               GenTree*                op2,
9925                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
9926                                               bool                    isCastClass)
9927 {
9928     assert(op1->TypeGet() == TYP_REF);
9929
9930     // Optimistically assume the jit should expand this as an inline test
9931     bool shouldExpandInline = true;
9932
9933     // Profitability check.
9934     //
9935     // Don't bother with inline expansion when jit is trying to
9936     // generate code quickly, or the cast is in code that won't run very
9937     // often, or the method already is pretty big.
9938     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9939     {
9940         // not worth the code expansion if jitting fast or in a rarely run block
9941         shouldExpandInline = false;
9942     }
9943     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9944     {
9945         // not worth creating an untracked local variable
9946         shouldExpandInline = false;
9947     }
9948
9949     // Pessimistically assume the jit cannot expand this as an inline test
9950     bool                  canExpandInline = false;
9951     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9952
9953     // Legality check.
9954     //
9955     // Not all classclass/isinst operations can be inline expanded.
9956     // Check legality only if an inline expansion is desirable.
9957     if (shouldExpandInline)
9958     {
9959         if (isCastClass)
9960         {
9961             // Jit can only inline expand the normal CHKCASTCLASS helper.
9962             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9963         }
9964         else
9965         {
9966             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9967             {
9968                 // Check the class attributes.
9969                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9970
9971                 // If the class is final and is not marshal byref or
9972                 // contextful, the jit can expand the IsInst check inline.
9973                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9974                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9975             }
9976         }
9977     }
9978
9979     const bool expandInline = canExpandInline && shouldExpandInline;
9980
9981     if (!expandInline)
9982     {
9983         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9984                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9985
9986         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9987         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9988         //
9989         op2->gtFlags |= GTF_DONT_CSE;
9990
9991         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9992     }
9993
9994     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9995
9996     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9997
9998     GenTree* temp;
9999     GenTree* condMT;
10000     //
10001     // expand the methodtable match:
10002     //
10003     //  condMT ==>   GT_NE
10004     //               /    \
10005     //           GT_IND   op2 (typically CNS_INT)
10006     //              |
10007     //           op1Copy
10008     //
10009
10010     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10011     //
10012     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10013     //
10014     // op1 is now known to be a non-complex tree
10015     // thus we can use gtClone(op1) from now on
10016     //
10017
10018     GenTree* op2Var = op2;
10019     if (isCastClass)
10020     {
10021         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10022         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10023     }
10024     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10025     temp->gtFlags |= GTF_EXCEPT;
10026     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10027
10028     GenTree* condNull;
10029     //
10030     // expand the null check:
10031     //
10032     //  condNull ==>   GT_EQ
10033     //                 /    \
10034     //             op1Copy CNS_INT
10035     //                      null
10036     //
10037     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10038
10039     //
10040     // expand the true and false trees for the condMT
10041     //
10042     GenTree* condFalse = gtClone(op1);
10043     GenTree* condTrue;
10044     if (isCastClass)
10045     {
10046         //
10047         // use the special helper that skips the cases checked by our inlined cast
10048         //
10049         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10050
10051         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10052     }
10053     else
10054     {
10055         condTrue = gtNewIconNode(0, TYP_REF);
10056     }
10057
10058 #define USE_QMARK_TREES
10059
10060 #ifdef USE_QMARK_TREES
10061     GenTree* qmarkMT;
10062     //
10063     // Generate first QMARK - COLON tree
10064     //
10065     //  qmarkMT ==>   GT_QMARK
10066     //                 /     \
10067     //            condMT   GT_COLON
10068     //                      /     \
10069     //                condFalse  condTrue
10070     //
10071     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10072     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10073     condMT->gtFlags |= GTF_RELOP_QMARK;
10074
10075     GenTree* qmarkNull;
10076     //
10077     // Generate second QMARK - COLON tree
10078     //
10079     //  qmarkNull ==>  GT_QMARK
10080     //                 /     \
10081     //           condNull  GT_COLON
10082     //                      /     \
10083     //                qmarkMT   op1Copy
10084     //
10085     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10086     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10087     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10088     condNull->gtFlags |= GTF_RELOP_QMARK;
10089
10090     // Make QMark node a top level node by spilling it.
10091     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10092     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10093
10094     // TODO: Is it possible op1 has a better type?
10095     lvaSetClass(tmp, pResolvedToken->hClass);
10096     return gtNewLclvNode(tmp, TYP_REF);
10097 #endif
10098 }
10099
10100 #ifndef DEBUG
10101 #define assertImp(cond) ((void)0)
10102 #else
10103 #define assertImp(cond)                                                                                                \
10104     do                                                                                                                 \
10105     {                                                                                                                  \
10106         if (!(cond))                                                                                                   \
10107         {                                                                                                              \
10108             const int cchAssertImpBuf = 600;                                                                           \
10109             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10110             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10111                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10112                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10113                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10114             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10115         }                                                                                                              \
10116     } while (0)
10117 #endif // DEBUG
10118
10119 #ifdef _PREFAST_
10120 #pragma warning(push)
10121 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10122 #endif
10123 /*****************************************************************************
10124  *  Import the instr for the given basic block
10125  */
10126 void Compiler::impImportBlockCode(BasicBlock* block)
10127 {
10128 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10129
10130 #ifdef DEBUG
10131
10132     if (verbose)
10133     {
10134         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10135     }
10136 #endif
10137
10138     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10139     IL_OFFSET nxtStmtOffs;
10140
10141     GenTree*                     arrayNodeFrom;
10142     GenTree*                     arrayNodeTo;
10143     GenTree*                     arrayNodeToIndex;
10144     CorInfoHelpFunc              helper;
10145     CorInfoIsAccessAllowedResult accessAllowedResult;
10146     CORINFO_HELPER_DESC          calloutHelper;
10147     const BYTE*                  lastLoadToken = nullptr;
10148
10149     // reject cyclic constraints
10150     if (tiVerificationNeeded)
10151     {
10152         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10153         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10154     }
10155
10156     /* Get the tree list started */
10157
10158     impBeginTreeList();
10159
10160     /* Walk the opcodes that comprise the basic block */
10161
10162     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10163     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10164
10165     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10166     IL_OFFSET lastSpillOffs = opcodeOffs;
10167
10168     signed jmpDist;
10169
10170     /* remember the start of the delegate creation sequence (used for verification) */
10171     const BYTE* delegateCreateStart = nullptr;
10172
10173     int  prefixFlags = 0;
10174     bool explicitTailCall, constraintCall, readonlyCall;
10175
10176     typeInfo tiRetVal;
10177
10178     unsigned numArgs = info.compArgsCount;
10179
10180     /* Now process all the opcodes in the block */
10181
10182     var_types callTyp    = TYP_COUNT;
10183     OPCODE    prevOpcode = CEE_ILLEGAL;
10184
10185     if (block->bbCatchTyp)
10186     {
10187         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10188         {
10189             impCurStmtOffsSet(block->bbCodeOffs);
10190         }
10191
10192         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10193         // to a temp. This is a trade off for code simplicity
10194         impSpillSpecialSideEff();
10195     }
10196
10197     while (codeAddr < codeEndp)
10198     {
10199         bool                   usingReadyToRunHelper = false;
10200         CORINFO_RESOLVED_TOKEN resolvedToken;
10201         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10202         CORINFO_CALL_INFO      callInfo;
10203         CORINFO_FIELD_INFO     fieldInfo;
10204
10205         tiRetVal = typeInfo(); // Default type info
10206
10207         //---------------------------------------------------------------------
10208
10209         /* We need to restrict the max tree depth as many of the Compiler
10210            functions are recursive. We do this by spilling the stack */
10211
10212         if (verCurrentState.esStackDepth)
10213         {
10214             /* Has it been a while since we last saw a non-empty stack (which
10215                guarantees that the tree depth isnt accumulating. */
10216
10217             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10218             {
10219                 impSpillStackEnsure();
10220                 lastSpillOffs = opcodeOffs;
10221             }
10222         }
10223         else
10224         {
10225             lastSpillOffs   = opcodeOffs;
10226             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10227         }
10228
10229         /* Compute the current instr offset */
10230
10231         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10232
10233 #ifndef DEBUG
10234         if (opts.compDbgInfo)
10235 #endif
10236         {
10237             if (!compIsForInlining())
10238             {
10239                 nxtStmtOffs =
10240                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10241
10242                 /* Have we reached the next stmt boundary ? */
10243
10244                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10245                 {
10246                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10247
10248                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10249                     {
10250                         /* We need to provide accurate IP-mapping at this point.
10251                            So spill anything on the stack so that it will form
10252                            gtStmts with the correct stmt offset noted */
10253
10254                         impSpillStackEnsure(true);
10255                     }
10256
10257                     // Has impCurStmtOffs been reported in any tree?
10258
10259                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10260                     {
10261                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10262                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10263
10264                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10265                     }
10266
10267                     if (impCurStmtOffs == BAD_IL_OFFSET)
10268                     {
10269                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10270                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10271
10272                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10273                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10274                         {
10275                             nxtStmtIndex++;
10276                         }
10277
10278                         /* Go to the new stmt */
10279
10280                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10281
10282                         /* Update the stmt boundary index */
10283
10284                         nxtStmtIndex++;
10285                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10286
10287                         /* Are there any more line# entries after this one? */
10288
10289                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10290                         {
10291                             /* Remember where the next line# starts */
10292
10293                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10294                         }
10295                         else
10296                         {
10297                             /* No more line# entries */
10298
10299                             nxtStmtOffs = BAD_IL_OFFSET;
10300                         }
10301                     }
10302                 }
10303                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10304                          (verCurrentState.esStackDepth == 0))
10305                 {
10306                     /* At stack-empty locations, we have already added the tree to
10307                        the stmt list with the last offset. We just need to update
10308                        impCurStmtOffs
10309                      */
10310
10311                     impCurStmtOffsSet(opcodeOffs);
10312                 }
10313                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10314                          impOpcodeIsCallSiteBoundary(prevOpcode))
10315                 {
10316                     /* Make sure we have a type cached */
10317                     assert(callTyp != TYP_COUNT);
10318
10319                     if (callTyp == TYP_VOID)
10320                     {
10321                         impCurStmtOffsSet(opcodeOffs);
10322                     }
10323                     else if (opts.compDbgCode)
10324                     {
10325                         impSpillStackEnsure(true);
10326                         impCurStmtOffsSet(opcodeOffs);
10327                     }
10328                 }
10329                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10330                 {
10331                     if (opts.compDbgCode)
10332                     {
10333                         impSpillStackEnsure(true);
10334                     }
10335
10336                     impCurStmtOffsSet(opcodeOffs);
10337                 }
10338
10339                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10340                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10341             }
10342         }
10343
10344         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10345         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10346         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10347
10348         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10349         GenTree*        op1           = DUMMY_INIT(NULL);
10350         GenTree*        op2           = DUMMY_INIT(NULL);
10351         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10352         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10353         bool            uns           = DUMMY_INIT(false);
10354         bool            isLocal       = false;
10355
10356         /* Get the next opcode and the size of its parameters */
10357
10358         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10359         codeAddr += sizeof(__int8);
10360
10361 #ifdef DEBUG
10362         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10363         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10364 #endif
10365
10366     DECODE_OPCODE:
10367
10368         // Return if any previous code has caused inline to fail.
10369         if (compDonotInline())
10370         {
10371             return;
10372         }
10373
10374         /* Get the size of additional parameters */
10375
10376         signed int sz = opcodeSizes[opcode];
10377
10378 #ifdef DEBUG
10379         clsHnd  = NO_CLASS_HANDLE;
10380         lclTyp  = TYP_COUNT;
10381         callTyp = TYP_COUNT;
10382
10383         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10384         impCurOpcName = opcodeNames[opcode];
10385
10386         if (verbose && (opcode != CEE_PREFIX1))
10387         {
10388             printf("%s", impCurOpcName);
10389         }
10390
10391         /* Use assertImp() to display the opcode */
10392
10393         op1 = op2 = nullptr;
10394 #endif
10395
10396         /* See what kind of an opcode we have, then */
10397
10398         unsigned mflags   = 0;
10399         unsigned clsFlags = 0;
10400
10401         switch (opcode)
10402         {
10403             unsigned  lclNum;
10404             var_types type;
10405
10406             GenTree*   op3;
10407             genTreeOps oper;
10408             unsigned   size;
10409
10410             int val;
10411
10412             CORINFO_SIG_INFO     sig;
10413             IL_OFFSET            jmpAddr;
10414             bool                 ovfl, unordered, callNode;
10415             bool                 ldstruct;
10416             CORINFO_CLASS_HANDLE tokenType;
10417
10418             union {
10419                 int     intVal;
10420                 float   fltVal;
10421                 __int64 lngVal;
10422                 double  dblVal;
10423             } cval;
10424
10425             case CEE_PREFIX1:
10426                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10427                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10428                 codeAddr += sizeof(__int8);
10429                 goto DECODE_OPCODE;
10430
10431             SPILL_APPEND:
10432
10433                 // We need to call impSpillLclRefs() for a struct type lclVar.
10434                 // This is done for non-block assignments in the handling of stloc.
10435                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10436                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10437                 {
10438                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10439                 }
10440
10441                 /* Append 'op1' to the list of statements */
10442                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10443                 goto DONE_APPEND;
10444
10445             APPEND:
10446
10447                 /* Append 'op1' to the list of statements */
10448
10449                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10450                 goto DONE_APPEND;
10451
10452             DONE_APPEND:
10453
10454 #ifdef DEBUG
10455                 // Remember at which BC offset the tree was finished
10456                 impNoteLastILoffs();
10457 #endif
10458                 break;
10459
10460             case CEE_LDNULL:
10461                 impPushNullObjRefOnStack();
10462                 break;
10463
10464             case CEE_LDC_I4_M1:
10465             case CEE_LDC_I4_0:
10466             case CEE_LDC_I4_1:
10467             case CEE_LDC_I4_2:
10468             case CEE_LDC_I4_3:
10469             case CEE_LDC_I4_4:
10470             case CEE_LDC_I4_5:
10471             case CEE_LDC_I4_6:
10472             case CEE_LDC_I4_7:
10473             case CEE_LDC_I4_8:
10474                 cval.intVal = (opcode - CEE_LDC_I4_0);
10475                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10476                 goto PUSH_I4CON;
10477
10478             case CEE_LDC_I4_S:
10479                 cval.intVal = getI1LittleEndian(codeAddr);
10480                 goto PUSH_I4CON;
10481             case CEE_LDC_I4:
10482                 cval.intVal = getI4LittleEndian(codeAddr);
10483                 goto PUSH_I4CON;
10484             PUSH_I4CON:
10485                 JITDUMP(" %d", cval.intVal);
10486                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10487                 break;
10488
10489             case CEE_LDC_I8:
10490                 cval.lngVal = getI8LittleEndian(codeAddr);
10491                 JITDUMP(" 0x%016llx", cval.lngVal);
10492                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10493                 break;
10494
10495             case CEE_LDC_R8:
10496                 cval.dblVal = getR8LittleEndian(codeAddr);
10497                 JITDUMP(" %#.17g", cval.dblVal);
10498                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10499                 break;
10500
10501             case CEE_LDC_R4:
10502                 cval.dblVal = getR4LittleEndian(codeAddr);
10503                 JITDUMP(" %#.17g", cval.dblVal);
10504                 {
10505                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10506 #if !FEATURE_X87_DOUBLES
10507                     // X87 stack doesn't differentiate between float/double
10508                     // so R4 is treated as R8, but everybody else does
10509                     cnsOp->gtType = TYP_FLOAT;
10510 #endif // FEATURE_X87_DOUBLES
10511                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10512                 }
10513                 break;
10514
10515             case CEE_LDSTR:
10516
10517                 if (compIsForInlining())
10518                 {
10519                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10520                     {
10521                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10522                         return;
10523                     }
10524                 }
10525
10526                 val = getU4LittleEndian(codeAddr);
10527                 JITDUMP(" %08X", val);
10528                 if (tiVerificationNeeded)
10529                 {
10530                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10531                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10532                 }
10533                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10534
10535                 break;
10536
10537             case CEE_LDARG:
10538                 lclNum = getU2LittleEndian(codeAddr);
10539                 JITDUMP(" %u", lclNum);
10540                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10541                 break;
10542
10543             case CEE_LDARG_S:
10544                 lclNum = getU1LittleEndian(codeAddr);
10545                 JITDUMP(" %u", lclNum);
10546                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10547                 break;
10548
10549             case CEE_LDARG_0:
10550             case CEE_LDARG_1:
10551             case CEE_LDARG_2:
10552             case CEE_LDARG_3:
10553                 lclNum = (opcode - CEE_LDARG_0);
10554                 assert(lclNum >= 0 && lclNum < 4);
10555                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10556                 break;
10557
10558             case CEE_LDLOC:
10559                 lclNum = getU2LittleEndian(codeAddr);
10560                 JITDUMP(" %u", lclNum);
10561                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10562                 break;
10563
10564             case CEE_LDLOC_S:
10565                 lclNum = getU1LittleEndian(codeAddr);
10566                 JITDUMP(" %u", lclNum);
10567                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10568                 break;
10569
10570             case CEE_LDLOC_0:
10571             case CEE_LDLOC_1:
10572             case CEE_LDLOC_2:
10573             case CEE_LDLOC_3:
10574                 lclNum = (opcode - CEE_LDLOC_0);
10575                 assert(lclNum >= 0 && lclNum < 4);
10576                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10577                 break;
10578
10579             case CEE_STARG:
10580                 lclNum = getU2LittleEndian(codeAddr);
10581                 goto STARG;
10582
10583             case CEE_STARG_S:
10584                 lclNum = getU1LittleEndian(codeAddr);
10585             STARG:
10586                 JITDUMP(" %u", lclNum);
10587
10588                 if (tiVerificationNeeded)
10589                 {
10590                     Verify(lclNum < info.compILargsCount, "bad arg num");
10591                 }
10592
10593                 if (compIsForInlining())
10594                 {
10595                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10596                     noway_assert(op1->gtOper == GT_LCL_VAR);
10597                     lclNum = op1->AsLclVar()->gtLclNum;
10598
10599                     goto VAR_ST_VALID;
10600                 }
10601
10602                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10603                 assertImp(lclNum < numArgs);
10604
10605                 if (lclNum == info.compThisArg)
10606                 {
10607                     lclNum = lvaArg0Var;
10608                 }
10609
10610                 // We should have seen this arg write in the prescan
10611                 assert(lvaTable[lclNum].lvHasILStoreOp);
10612
10613                 if (tiVerificationNeeded)
10614                 {
10615                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10616                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10617                            "type mismatch");
10618
10619                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10620                     {
10621                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10622                     }
10623                 }
10624
10625                 goto VAR_ST;
10626
10627             case CEE_STLOC:
10628                 lclNum  = getU2LittleEndian(codeAddr);
10629                 isLocal = true;
10630                 JITDUMP(" %u", lclNum);
10631                 goto LOC_ST;
10632
10633             case CEE_STLOC_S:
10634                 lclNum  = getU1LittleEndian(codeAddr);
10635                 isLocal = true;
10636                 JITDUMP(" %u", lclNum);
10637                 goto LOC_ST;
10638
10639             case CEE_STLOC_0:
10640             case CEE_STLOC_1:
10641             case CEE_STLOC_2:
10642             case CEE_STLOC_3:
10643                 isLocal = true;
10644                 lclNum  = (opcode - CEE_STLOC_0);
10645                 assert(lclNum >= 0 && lclNum < 4);
10646
10647             LOC_ST:
10648                 if (tiVerificationNeeded)
10649                 {
10650                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10651                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10652                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10653                            "type mismatch");
10654                 }
10655
10656                 if (compIsForInlining())
10657                 {
10658                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10659
10660                     /* Have we allocated a temp for this local? */
10661
10662                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10663
10664                     goto _PopValue;
10665                 }
10666
10667                 lclNum += numArgs;
10668
10669             VAR_ST:
10670
10671                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10672                 {
10673                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10674                     BADCODE("Bad IL");
10675                 }
10676
10677             VAR_ST_VALID:
10678
10679                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10680                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10681
10682                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10683                 {
10684                     lclTyp = lvaGetRealType(lclNum);
10685                 }
10686                 else
10687                 {
10688                     lclTyp = lvaGetActualType(lclNum);
10689                 }
10690
10691             _PopValue:
10692                 /* Pop the value being assigned */
10693
10694                 {
10695                     StackEntry se = impPopStack();
10696                     clsHnd        = se.seTypeInfo.GetClassHandle();
10697                     op1           = se.val;
10698                     tiRetVal      = se.seTypeInfo;
10699                 }
10700
10701 #ifdef FEATURE_SIMD
10702                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10703                 {
10704                     assert(op1->TypeGet() == TYP_STRUCT);
10705                     op1->gtType = lclTyp;
10706                 }
10707 #endif // FEATURE_SIMD
10708
10709                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10710
10711 #ifdef _TARGET_64BIT_
10712                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10713                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10714                 {
10715                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10716                     op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
10717                 }
10718 #endif // _TARGET_64BIT_
10719
10720                 // We had better assign it a value of the correct type
10721                 assertImp(
10722                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10723                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10724                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10725                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10726                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10727                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10728
10729                 /* If op1 is "&var" then its type is the transient "*" and it can
10730                    be used either as TYP_BYREF or TYP_I_IMPL */
10731
10732                 if (op1->IsVarAddr())
10733                 {
10734                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10735
10736                     /* When "&var" is created, we assume it is a byref. If it is
10737                        being assigned to a TYP_I_IMPL var, change the type to
10738                        prevent unnecessary GC info */
10739
10740                     if (genActualType(lclTyp) == TYP_I_IMPL)
10741                     {
10742                         op1->gtType = TYP_I_IMPL;
10743                     }
10744                 }
10745
10746                 // If this is a local and the local is a ref type, see
10747                 // if we can improve type information based on the
10748                 // value being assigned.
10749                 if (isLocal && (lclTyp == TYP_REF))
10750                 {
10751                     // We should have seen a stloc in our IL prescan.
10752                     assert(lvaTable[lclNum].lvHasILStoreOp);
10753
10754                     const bool isSingleILStoreLocal =
10755                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10756
10757                     // Conservative check that there is just one
10758                     // definition that reaches this store.
10759                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10760
10761                     if (isSingleILStoreLocal && hasSingleReachingDef)
10762                     {
10763                         lvaUpdateClass(lclNum, op1, clsHnd);
10764                     }
10765                 }
10766
10767                 /* Filter out simple assignments to itself */
10768
10769                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10770                 {
10771                     if (opts.compDbgCode)
10772                     {
10773                         op1 = gtNewNothingNode();
10774                         goto SPILL_APPEND;
10775                     }
10776                     else
10777                     {
10778                         break;
10779                     }
10780                 }
10781
10782                 /* Create the assignment node */
10783
10784                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10785
10786                 /* If the local is aliased or pinned, we need to spill calls and
10787                    indirections from the stack. */
10788
10789                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10790                     (verCurrentState.esStackDepth > 0))
10791                 {
10792                     impSpillSideEffects(false,
10793                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10794                 }
10795
10796                 /* Spill any refs to the local from the stack */
10797
10798                 impSpillLclRefs(lclNum);
10799
10800 #if !FEATURE_X87_DOUBLES
10801                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10802                 // We insert a cast to the dest 'op2' type
10803                 //
10804                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10805                     varTypeIsFloating(op2->gtType))
10806                 {
10807                     op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
10808                 }
10809 #endif // !FEATURE_X87_DOUBLES
10810
10811                 if (varTypeIsStruct(lclTyp))
10812                 {
10813                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10814                 }
10815                 else
10816                 {
10817                     // The code generator generates GC tracking information
10818                     // based on the RHS of the assignment.  Later the LHS (which is
10819                     // is a BYREF) gets used and the emitter checks that that variable
10820                     // is being tracked.  It is not (since the RHS was an int and did
10821                     // not need tracking).  To keep this assert happy, we change the RHS
10822                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10823                     {
10824                         op1->gtType = TYP_BYREF;
10825                     }
10826                     op1 = gtNewAssignNode(op2, op1);
10827                 }
10828
10829                 goto SPILL_APPEND;
10830
10831             case CEE_LDLOCA:
10832                 lclNum = getU2LittleEndian(codeAddr);
10833                 goto LDLOCA;
10834
10835             case CEE_LDLOCA_S:
10836                 lclNum = getU1LittleEndian(codeAddr);
10837             LDLOCA:
10838                 JITDUMP(" %u", lclNum);
10839                 if (tiVerificationNeeded)
10840                 {
10841                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10842                     Verify(info.compInitMem, "initLocals not set");
10843                 }
10844
10845                 if (compIsForInlining())
10846                 {
10847                     // Get the local type
10848                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10849
10850                     /* Have we allocated a temp for this local? */
10851
10852                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10853
10854                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10855
10856                     goto _PUSH_ADRVAR;
10857                 }
10858
10859                 lclNum += numArgs;
10860                 assertImp(lclNum < info.compLocalsCount);
10861                 goto ADRVAR;
10862
10863             case CEE_LDARGA:
10864                 lclNum = getU2LittleEndian(codeAddr);
10865                 goto LDARGA;
10866
10867             case CEE_LDARGA_S:
10868                 lclNum = getU1LittleEndian(codeAddr);
10869             LDARGA:
10870                 JITDUMP(" %u", lclNum);
10871                 Verify(lclNum < info.compILargsCount, "bad arg num");
10872
10873                 if (compIsForInlining())
10874                 {
10875                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10876                     // followed by a ldfld to load the field.
10877
10878                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10879                     if (op1->gtOper != GT_LCL_VAR)
10880                     {
10881                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10882                         return;
10883                     }
10884
10885                     assert(op1->gtOper == GT_LCL_VAR);
10886
10887                     goto _PUSH_ADRVAR;
10888                 }
10889
10890                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10891                 assertImp(lclNum < numArgs);
10892
10893                 if (lclNum == info.compThisArg)
10894                 {
10895                     lclNum = lvaArg0Var;
10896                 }
10897
10898                 goto ADRVAR;
10899
10900             ADRVAR:
10901
10902                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10903
10904             _PUSH_ADRVAR:
10905                 assert(op1->gtOper == GT_LCL_VAR);
10906
10907                 /* Note that this is supposed to create the transient type "*"
10908                    which may be used as a TYP_I_IMPL. However we catch places
10909                    where it is used as a TYP_I_IMPL and change the node if needed.
10910                    Thus we are pessimistic and may report byrefs in the GC info
10911                    where it was not absolutely needed, but it is safer this way.
10912                  */
10913                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10914
10915                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10916                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10917
10918                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10919                 if (tiVerificationNeeded)
10920                 {
10921                     // Don't allow taking address of uninit this ptr.
10922                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10923                     {
10924                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10925                     }
10926
10927                     if (!tiRetVal.IsByRef())
10928                     {
10929                         tiRetVal.MakeByRef();
10930                     }
10931                     else
10932                     {
10933                         Verify(false, "byref to byref");
10934                     }
10935                 }
10936
10937                 impPushOnStack(op1, tiRetVal);
10938                 break;
10939
10940             case CEE_ARGLIST:
10941
10942                 if (!info.compIsVarArgs)
10943                 {
10944                     BADCODE("arglist in non-vararg method");
10945                 }
10946
10947                 if (tiVerificationNeeded)
10948                 {
10949                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10950                 }
10951                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10952
10953                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10954                    adjusted the arg count cos this is like fetching the last param */
10955                 assertImp(0 < numArgs);
10956                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10957                 lclNum = lvaVarargsHandleArg;
10958                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10959                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10960                 impPushOnStack(op1, tiRetVal);
10961                 break;
10962
10963             case CEE_ENDFINALLY:
10964
10965                 if (compIsForInlining())
10966                 {
10967                     assert(!"Shouldn't have exception handlers in the inliner!");
10968                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10969                     return;
10970                 }
10971
10972                 if (verCurrentState.esStackDepth > 0)
10973                 {
10974                     impEvalSideEffects();
10975                 }
10976
10977                 if (info.compXcptnsCount == 0)
10978                 {
10979                     BADCODE("endfinally outside finally");
10980                 }
10981
10982                 assert(verCurrentState.esStackDepth == 0);
10983
10984                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10985                 goto APPEND;
10986
10987             case CEE_ENDFILTER:
10988
10989                 if (compIsForInlining())
10990                 {
10991                     assert(!"Shouldn't have exception handlers in the inliner!");
10992                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10993                     return;
10994                 }
10995
10996                 block->bbSetRunRarely(); // filters are rare
10997
10998                 if (info.compXcptnsCount == 0)
10999                 {
11000                     BADCODE("endfilter outside filter");
11001                 }
11002
11003                 if (tiVerificationNeeded)
11004                 {
11005                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11006                 }
11007
11008                 op1 = impPopStack().val;
11009                 assertImp(op1->gtType == TYP_INT);
11010                 if (!bbInFilterILRange(block))
11011                 {
11012                     BADCODE("EndFilter outside a filter handler");
11013                 }
11014
11015                 /* Mark current bb as end of filter */
11016
11017                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11018                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11019
11020                 /* Mark catch handler as successor */
11021
11022                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11023                 if (verCurrentState.esStackDepth != 0)
11024                 {
11025                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11026                                                 DEBUGARG(__LINE__));
11027                 }
11028                 goto APPEND;
11029
11030             case CEE_RET:
11031                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11032             RET:
11033                 if (!impReturnInstruction(block, prefixFlags, opcode))
11034                 {
11035                     return; // abort
11036                 }
11037                 else
11038                 {
11039                     break;
11040                 }
11041
11042             case CEE_JMP:
11043
11044                 assert(!compIsForInlining());
11045
11046                 if (tiVerificationNeeded)
11047                 {
11048                     Verify(false, "Invalid opcode: CEE_JMP");
11049                 }
11050
11051                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11052                 {
11053                     /* CEE_JMP does not make sense in some "protected" regions. */
11054
11055                     BADCODE("Jmp not allowed in protected region");
11056                 }
11057
11058                 if (verCurrentState.esStackDepth != 0)
11059                 {
11060                     BADCODE("Stack must be empty after CEE_JMPs");
11061                 }
11062
11063                 _impResolveToken(CORINFO_TOKENKIND_Method);
11064
11065                 JITDUMP(" %08X", resolvedToken.token);
11066
11067                 /* The signature of the target has to be identical to ours.
11068                    At least check that argCnt and returnType match */
11069
11070                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11071                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11072                     sig.retType != info.compMethodInfo->args.retType ||
11073                     sig.callConv != info.compMethodInfo->args.callConv)
11074                 {
11075                     BADCODE("Incompatible target for CEE_JMPs");
11076                 }
11077
11078                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11079
11080                 /* Mark the basic block as being a JUMP instead of RETURN */
11081
11082                 block->bbFlags |= BBF_HAS_JMP;
11083
11084                 /* Set this flag to make sure register arguments have a location assigned
11085                  * even if we don't use them inside the method */
11086
11087                 compJmpOpUsed = true;
11088
11089                 fgNoStructPromotion = true;
11090
11091                 goto APPEND;
11092
11093             case CEE_LDELEMA:
11094                 assertImp(sz == sizeof(unsigned));
11095
11096                 _impResolveToken(CORINFO_TOKENKIND_Class);
11097
11098                 JITDUMP(" %08X", resolvedToken.token);
11099
11100                 ldelemClsHnd = resolvedToken.hClass;
11101
11102                 if (tiVerificationNeeded)
11103                 {
11104                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11105                     typeInfo tiIndex = impStackTop().seTypeInfo;
11106
11107                     // As per ECMA 'index' specified can be either int32 or native int.
11108                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11109
11110                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11111                     Verify(tiArray.IsNullObjRef() ||
11112                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11113                            "bad array");
11114
11115                     tiRetVal = arrayElemType;
11116                     tiRetVal.MakeByRef();
11117                     if (prefixFlags & PREFIX_READONLY)
11118                     {
11119                         tiRetVal.SetIsReadonlyByRef();
11120                     }
11121
11122                     // an array interior pointer is always in the heap
11123                     tiRetVal.SetIsPermanentHomeByRef();
11124                 }
11125
11126                 // If it's a value class array we just do a simple address-of
11127                 if (eeIsValueClass(ldelemClsHnd))
11128                 {
11129                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11130                     if (cit == CORINFO_TYPE_UNDEF)
11131                     {
11132                         lclTyp = TYP_STRUCT;
11133                     }
11134                     else
11135                     {
11136                         lclTyp = JITtype2varType(cit);
11137                     }
11138                     goto ARR_LD_POST_VERIFY;
11139                 }
11140
11141                 // Similarly, if its a readonly access, we can do a simple address-of
11142                 // without doing a runtime type-check
11143                 if (prefixFlags & PREFIX_READONLY)
11144                 {
11145                     lclTyp = TYP_REF;
11146                     goto ARR_LD_POST_VERIFY;
11147                 }
11148
11149                 // Otherwise we need the full helper function with run-time type check
11150                 op1 = impTokenToHandle(&resolvedToken);
11151                 if (op1 == nullptr)
11152                 { // compDonotInline()
11153                     return;
11154                 }
11155
11156                 args = gtNewArgList(op1);                      // Type
11157                 args = gtNewListNode(impPopStack().val, args); // index
11158                 args = gtNewListNode(impPopStack().val, args); // array
11159                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11160
11161                 impPushOnStack(op1, tiRetVal);
11162                 break;
11163
11164             // ldelem for reference and value types
11165             case CEE_LDELEM:
11166                 assertImp(sz == sizeof(unsigned));
11167
11168                 _impResolveToken(CORINFO_TOKENKIND_Class);
11169
11170                 JITDUMP(" %08X", resolvedToken.token);
11171
11172                 ldelemClsHnd = resolvedToken.hClass;
11173
11174                 if (tiVerificationNeeded)
11175                 {
11176                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11177                     typeInfo tiIndex = impStackTop().seTypeInfo;
11178
11179                     // As per ECMA 'index' specified can be either int32 or native int.
11180                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11181                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11182
11183                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11184                            "type of array incompatible with type operand");
11185                     tiRetVal.NormaliseForStack();
11186                 }
11187
11188                 // If it's a reference type or generic variable type
11189                 // then just generate code as though it's a ldelem.ref instruction
11190                 if (!eeIsValueClass(ldelemClsHnd))
11191                 {
11192                     lclTyp = TYP_REF;
11193                     opcode = CEE_LDELEM_REF;
11194                 }
11195                 else
11196                 {
11197                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11198                     lclTyp             = JITtype2varType(jitTyp);
11199                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11200                     tiRetVal.NormaliseForStack();
11201                 }
11202                 goto ARR_LD_POST_VERIFY;
11203
11204             case CEE_LDELEM_I1:
11205                 lclTyp = TYP_BYTE;
11206                 goto ARR_LD;
11207             case CEE_LDELEM_I2:
11208                 lclTyp = TYP_SHORT;
11209                 goto ARR_LD;
11210             case CEE_LDELEM_I:
11211                 lclTyp = TYP_I_IMPL;
11212                 goto ARR_LD;
11213
11214             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11215             // and treating it as TYP_INT avoids other asserts.
11216             case CEE_LDELEM_U4:
11217                 lclTyp = TYP_INT;
11218                 goto ARR_LD;
11219
11220             case CEE_LDELEM_I4:
11221                 lclTyp = TYP_INT;
11222                 goto ARR_LD;
11223             case CEE_LDELEM_I8:
11224                 lclTyp = TYP_LONG;
11225                 goto ARR_LD;
11226             case CEE_LDELEM_REF:
11227                 lclTyp = TYP_REF;
11228                 goto ARR_LD;
11229             case CEE_LDELEM_R4:
11230                 lclTyp = TYP_FLOAT;
11231                 goto ARR_LD;
11232             case CEE_LDELEM_R8:
11233                 lclTyp = TYP_DOUBLE;
11234                 goto ARR_LD;
11235             case CEE_LDELEM_U1:
11236                 lclTyp = TYP_UBYTE;
11237                 goto ARR_LD;
11238             case CEE_LDELEM_U2:
11239                 lclTyp = TYP_USHORT;
11240                 goto ARR_LD;
11241
11242             ARR_LD:
11243
11244                 if (tiVerificationNeeded)
11245                 {
11246                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11247                     typeInfo tiIndex = impStackTop().seTypeInfo;
11248
11249                     // As per ECMA 'index' specified can be either int32 or native int.
11250                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11251                     if (tiArray.IsNullObjRef())
11252                     {
11253                         if (lclTyp == TYP_REF)
11254                         { // we will say a deref of a null array yields a null ref
11255                             tiRetVal = typeInfo(TI_NULL);
11256                         }
11257                         else
11258                         {
11259                             tiRetVal = typeInfo(lclTyp);
11260                         }
11261                     }
11262                     else
11263                     {
11264                         tiRetVal             = verGetArrayElemType(tiArray);
11265                         typeInfo arrayElemTi = typeInfo(lclTyp);
11266 #ifdef _TARGET_64BIT_
11267                         if (opcode == CEE_LDELEM_I)
11268                         {
11269                             arrayElemTi = typeInfo::nativeInt();
11270                         }
11271
11272                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11273                         {
11274                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11275                         }
11276                         else
11277 #endif // _TARGET_64BIT_
11278                         {
11279                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11280                         }
11281                     }
11282                     tiRetVal.NormaliseForStack();
11283                 }
11284             ARR_LD_POST_VERIFY:
11285
11286                 /* Pull the index value and array address */
11287                 op2 = impPopStack().val;
11288                 op1 = impPopStack().val;
11289                 assertImp(op1->gtType == TYP_REF);
11290
11291                 /* Check for null pointer - in the inliner case we simply abort */
11292
11293                 if (compIsForInlining())
11294                 {
11295                     if (op1->gtOper == GT_CNS_INT)
11296                     {
11297                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11298                         return;
11299                     }
11300                 }
11301
11302                 op1 = impCheckForNullPointer(op1);
11303
11304                 /* Mark the block as containing an index expression */
11305
11306                 if (op1->gtOper == GT_LCL_VAR)
11307                 {
11308                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11309                     {
11310                         block->bbFlags |= BBF_HAS_IDX_LEN;
11311                         optMethodFlags |= OMF_HAS_ARRAYREF;
11312                     }
11313                 }
11314
11315                 /* Create the index node and push it on the stack */
11316
11317                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11318
11319                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11320
11321                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11322                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11323                 {
11324                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11325
11326                     // remember the element size
11327                     if (lclTyp == TYP_REF)
11328                     {
11329                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11330                     }
11331                     else
11332                     {
11333                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11334                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11335                         {
11336                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11337                         }
11338                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11339                         if (lclTyp == TYP_STRUCT)
11340                         {
11341                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11342                             op1->gtIndex.gtIndElemSize = size;
11343                             op1->gtType                = lclTyp;
11344                         }
11345                     }
11346
11347                     if ((opcode == CEE_LDELEMA) || ldstruct)
11348                     {
11349                         // wrap it in a &
11350                         lclTyp = TYP_BYREF;
11351
11352                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11353                     }
11354                     else
11355                     {
11356                         assert(lclTyp != TYP_STRUCT);
11357                     }
11358                 }
11359
11360                 if (ldstruct)
11361                 {
11362                     // Create an OBJ for the result
11363                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11364                     op1->gtFlags |= GTF_EXCEPT;
11365                 }
11366                 impPushOnStack(op1, tiRetVal);
11367                 break;
11368
11369             // stelem for reference and value types
11370             case CEE_STELEM:
11371
11372                 assertImp(sz == sizeof(unsigned));
11373
11374                 _impResolveToken(CORINFO_TOKENKIND_Class);
11375
11376                 JITDUMP(" %08X", resolvedToken.token);
11377
11378                 stelemClsHnd = resolvedToken.hClass;
11379
11380                 if (tiVerificationNeeded)
11381                 {
11382                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11383                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11384                     typeInfo tiValue = impStackTop().seTypeInfo;
11385
11386                     // As per ECMA 'index' specified can be either int32 or native int.
11387                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11388                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11389
11390                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11391                            "type operand incompatible with array element type");
11392                     arrayElem.NormaliseForStack();
11393                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11394                 }
11395
11396                 // If it's a reference type just behave as though it's a stelem.ref instruction
11397                 if (!eeIsValueClass(stelemClsHnd))
11398                 {
11399                     goto STELEM_REF_POST_VERIFY;
11400                 }
11401
11402                 // Otherwise extract the type
11403                 {
11404                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11405                     lclTyp             = JITtype2varType(jitTyp);
11406                     goto ARR_ST_POST_VERIFY;
11407                 }
11408
11409             case CEE_STELEM_REF:
11410
11411                 if (tiVerificationNeeded)
11412                 {
11413                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11414                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11415                     typeInfo tiValue = impStackTop().seTypeInfo;
11416
11417                     // As per ECMA 'index' specified can be either int32 or native int.
11418                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11419                     Verify(tiValue.IsObjRef(), "bad value");
11420
11421                     // we only check that it is an object referece, The helper does additional checks
11422                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11423                 }
11424
11425             STELEM_REF_POST_VERIFY:
11426
11427                 arrayNodeTo      = impStackTop(2).val;
11428                 arrayNodeToIndex = impStackTop(1).val;
11429                 arrayNodeFrom    = impStackTop().val;
11430
11431                 //
11432                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11433                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11434                 //
11435
11436                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11437                 // This does not need CORINFO_HELP_ARRADDR_ST
11438                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11439                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11440                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11441                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11442                 {
11443                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11444                     lclTyp = TYP_REF;
11445                     goto ARR_ST_POST_VERIFY;
11446                 }
11447
11448                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11449                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11450                 {
11451                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11452                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11453                     lclTyp = TYP_REF;
11454                     goto ARR_ST_POST_VERIFY;
11455                 }
11456
11457                 /* Call a helper function to do the assignment */
11458                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11459
11460                 goto SPILL_APPEND;
11461
11462             case CEE_STELEM_I1:
11463                 lclTyp = TYP_BYTE;
11464                 goto ARR_ST;
11465             case CEE_STELEM_I2:
11466                 lclTyp = TYP_SHORT;
11467                 goto ARR_ST;
11468             case CEE_STELEM_I:
11469                 lclTyp = TYP_I_IMPL;
11470                 goto ARR_ST;
11471             case CEE_STELEM_I4:
11472                 lclTyp = TYP_INT;
11473                 goto ARR_ST;
11474             case CEE_STELEM_I8:
11475                 lclTyp = TYP_LONG;
11476                 goto ARR_ST;
11477             case CEE_STELEM_R4:
11478                 lclTyp = TYP_FLOAT;
11479                 goto ARR_ST;
11480             case CEE_STELEM_R8:
11481                 lclTyp = TYP_DOUBLE;
11482                 goto ARR_ST;
11483
11484             ARR_ST:
11485
11486                 if (tiVerificationNeeded)
11487                 {
11488                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11489                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11490                     typeInfo tiValue = impStackTop().seTypeInfo;
11491
11492                     // As per ECMA 'index' specified can be either int32 or native int.
11493                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11494                     typeInfo arrayElem = typeInfo(lclTyp);
11495 #ifdef _TARGET_64BIT_
11496                     if (opcode == CEE_STELEM_I)
11497                     {
11498                         arrayElem = typeInfo::nativeInt();
11499                     }
11500 #endif // _TARGET_64BIT_
11501                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11502                            "bad array");
11503
11504                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11505                            "bad value");
11506                 }
11507
11508             ARR_ST_POST_VERIFY:
11509                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11510                    range-check, and then assignment. However, codegen currently
11511                    does the range-check before evaluation the RHS-operands. So to
11512                    maintain strict ordering, we spill the stack. */
11513
11514                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11515                 {
11516                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11517                                                    "Strict ordering of exceptions for Array store"));
11518                 }
11519
11520                 /* Pull the new value from the stack */
11521                 op2 = impPopStack().val;
11522
11523                 /* Pull the index value */
11524                 op1 = impPopStack().val;
11525
11526                 /* Pull the array address */
11527                 op3 = impPopStack().val;
11528
11529                 assertImp(op3->gtType == TYP_REF);
11530                 if (op2->IsVarAddr())
11531                 {
11532                     op2->gtType = TYP_I_IMPL;
11533                 }
11534
11535                 op3 = impCheckForNullPointer(op3);
11536
11537                 // Mark the block as containing an index expression
11538
11539                 if (op3->gtOper == GT_LCL_VAR)
11540                 {
11541                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11542                     {
11543                         block->bbFlags |= BBF_HAS_IDX_LEN;
11544                         optMethodFlags |= OMF_HAS_ARRAYREF;
11545                     }
11546                 }
11547
11548                 /* Create the index node */
11549
11550                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11551
11552                 /* Create the assignment node and append it */
11553
11554                 if (lclTyp == TYP_STRUCT)
11555                 {
11556                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11557
11558                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11559                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11560                 }
11561                 if (varTypeIsStruct(op1))
11562                 {
11563                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11564                 }
11565                 else
11566                 {
11567                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11568                     op1 = gtNewAssignNode(op1, op2);
11569                 }
11570
11571                 /* Mark the expression as containing an assignment */
11572
11573                 op1->gtFlags |= GTF_ASG;
11574
11575                 goto SPILL_APPEND;
11576
11577             case CEE_ADD:
11578                 oper = GT_ADD;
11579                 goto MATH_OP2;
11580
11581             case CEE_ADD_OVF:
11582                 uns = false;
11583                 goto ADD_OVF;
11584             case CEE_ADD_OVF_UN:
11585                 uns = true;
11586                 goto ADD_OVF;
11587
11588             ADD_OVF:
11589                 ovfl     = true;
11590                 callNode = false;
11591                 oper     = GT_ADD;
11592                 goto MATH_OP2_FLAGS;
11593
11594             case CEE_SUB:
11595                 oper = GT_SUB;
11596                 goto MATH_OP2;
11597
11598             case CEE_SUB_OVF:
11599                 uns = false;
11600                 goto SUB_OVF;
11601             case CEE_SUB_OVF_UN:
11602                 uns = true;
11603                 goto SUB_OVF;
11604
11605             SUB_OVF:
11606                 ovfl     = true;
11607                 callNode = false;
11608                 oper     = GT_SUB;
11609                 goto MATH_OP2_FLAGS;
11610
11611             case CEE_MUL:
11612                 oper = GT_MUL;
11613                 goto MATH_MAYBE_CALL_NO_OVF;
11614
11615             case CEE_MUL_OVF:
11616                 uns = false;
11617                 goto MUL_OVF;
11618             case CEE_MUL_OVF_UN:
11619                 uns = true;
11620                 goto MUL_OVF;
11621
11622             MUL_OVF:
11623                 ovfl = true;
11624                 oper = GT_MUL;
11625                 goto MATH_MAYBE_CALL_OVF;
11626
11627             // Other binary math operations
11628
11629             case CEE_DIV:
11630                 oper = GT_DIV;
11631                 goto MATH_MAYBE_CALL_NO_OVF;
11632
11633             case CEE_DIV_UN:
11634                 oper = GT_UDIV;
11635                 goto MATH_MAYBE_CALL_NO_OVF;
11636
11637             case CEE_REM:
11638                 oper = GT_MOD;
11639                 goto MATH_MAYBE_CALL_NO_OVF;
11640
11641             case CEE_REM_UN:
11642                 oper = GT_UMOD;
11643                 goto MATH_MAYBE_CALL_NO_OVF;
11644
11645             MATH_MAYBE_CALL_NO_OVF:
11646                 ovfl = false;
11647             MATH_MAYBE_CALL_OVF:
11648                 // Morpher has some complex logic about when to turn different
11649                 // typed nodes on different platforms into helper calls. We
11650                 // need to either duplicate that logic here, or just
11651                 // pessimistically make all the nodes large enough to become
11652                 // call nodes.  Since call nodes aren't that much larger and
11653                 // these opcodes are infrequent enough I chose the latter.
11654                 callNode = true;
11655                 goto MATH_OP2_FLAGS;
11656
11657             case CEE_AND:
11658                 oper = GT_AND;
11659                 goto MATH_OP2;
11660             case CEE_OR:
11661                 oper = GT_OR;
11662                 goto MATH_OP2;
11663             case CEE_XOR:
11664                 oper = GT_XOR;
11665                 goto MATH_OP2;
11666
11667             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11668
11669                 ovfl     = false;
11670                 callNode = false;
11671
11672             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11673
11674                 /* Pull two values and push back the result */
11675
11676                 if (tiVerificationNeeded)
11677                 {
11678                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11679                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11680
11681                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11682                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11683                     {
11684                         Verify(tiOp1.IsNumberType(), "not number");
11685                     }
11686                     else
11687                     {
11688                         Verify(tiOp1.IsIntegerType(), "not integer");
11689                     }
11690
11691                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11692
11693                     tiRetVal = tiOp1;
11694
11695 #ifdef _TARGET_64BIT_
11696                     if (tiOp2.IsNativeIntType())
11697                     {
11698                         tiRetVal = tiOp2;
11699                     }
11700 #endif // _TARGET_64BIT_
11701                 }
11702
11703                 op2 = impPopStack().val;
11704                 op1 = impPopStack().val;
11705
11706 #if !CPU_HAS_FP_SUPPORT
11707                 if (varTypeIsFloating(op1->gtType))
11708                 {
11709                     callNode = true;
11710                 }
11711 #endif
11712                 /* Can't do arithmetic with references */
11713                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11714
11715                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11716                 // if it is in the stack)
11717                 impBashVarAddrsToI(op1, op2);
11718
11719                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11720
11721                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11722
11723                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11724
11725                 if (op2->gtOper == GT_CNS_INT)
11726                 {
11727                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11728                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11729
11730                     {
11731                         impPushOnStack(op1, tiRetVal);
11732                         break;
11733                     }
11734                 }
11735
11736 #if !FEATURE_X87_DOUBLES
11737                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11738                 //
11739                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11740                 {
11741                     if (op1->TypeGet() != type)
11742                     {
11743                         // We insert a cast of op1 to 'type'
11744                         op1 = gtNewCastNode(type, op1, false, type);
11745                     }
11746                     if (op2->TypeGet() != type)
11747                     {
11748                         // We insert a cast of op2 to 'type'
11749                         op2 = gtNewCastNode(type, op2, false, type);
11750                     }
11751                 }
11752 #endif // !FEATURE_X87_DOUBLES
11753
11754 #if SMALL_TREE_NODES
11755                 if (callNode)
11756                 {
11757                     /* These operators can later be transformed into 'GT_CALL' */
11758
11759                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11760 #ifndef _TARGET_ARM_
11761                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11762                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11763                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11764                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11765 #endif
11766                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11767                     // that we'll need to transform into a general large node, but rather specifically
11768                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11769                     // and a CALL is no longer the largest.
11770                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11771                     // than an "if".
11772                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11773                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11774                 }
11775                 else
11776 #endif // SMALL_TREE_NODES
11777                 {
11778                     op1 = gtNewOperNode(oper, type, op1, op2);
11779                 }
11780
11781                 /* Special case: integer/long division may throw an exception */
11782
11783                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11784                 {
11785                     op1->gtFlags |= GTF_EXCEPT;
11786                 }
11787
11788                 if (ovfl)
11789                 {
11790                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11791                     if (ovflType != TYP_UNKNOWN)
11792                     {
11793                         op1->gtType = ovflType;
11794                     }
11795                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11796                     if (uns)
11797                     {
11798                         op1->gtFlags |= GTF_UNSIGNED;
11799                     }
11800                 }
11801
11802                 impPushOnStack(op1, tiRetVal);
11803                 break;
11804
11805             case CEE_SHL:
11806                 oper = GT_LSH;
11807                 goto CEE_SH_OP2;
11808
11809             case CEE_SHR:
11810                 oper = GT_RSH;
11811                 goto CEE_SH_OP2;
11812             case CEE_SHR_UN:
11813                 oper = GT_RSZ;
11814                 goto CEE_SH_OP2;
11815
11816             CEE_SH_OP2:
11817                 if (tiVerificationNeeded)
11818                 {
11819                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11820                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11821                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11822                     tiRetVal = tiVal;
11823                 }
11824                 op2 = impPopStack().val;
11825                 op1 = impPopStack().val; // operand to be shifted
11826                 impBashVarAddrsToI(op1, op2);
11827
11828                 type = genActualType(op1->TypeGet());
11829                 op1  = gtNewOperNode(oper, type, op1, op2);
11830
11831                 impPushOnStack(op1, tiRetVal);
11832                 break;
11833
11834             case CEE_NOT:
11835                 if (tiVerificationNeeded)
11836                 {
11837                     tiRetVal = impStackTop().seTypeInfo;
11838                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11839                 }
11840
11841                 op1 = impPopStack().val;
11842                 impBashVarAddrsToI(op1, nullptr);
11843                 type = genActualType(op1->TypeGet());
11844                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11845                 break;
11846
11847             case CEE_CKFINITE:
11848                 if (tiVerificationNeeded)
11849                 {
11850                     tiRetVal = impStackTop().seTypeInfo;
11851                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11852                 }
11853                 op1  = impPopStack().val;
11854                 type = op1->TypeGet();
11855                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11856                 op1->gtFlags |= GTF_EXCEPT;
11857
11858                 impPushOnStack(op1, tiRetVal);
11859                 break;
11860
11861             case CEE_LEAVE:
11862
11863                 val     = getI4LittleEndian(codeAddr); // jump distance
11864                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11865                 goto LEAVE;
11866
11867             case CEE_LEAVE_S:
11868                 val     = getI1LittleEndian(codeAddr); // jump distance
11869                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11870
11871             LEAVE:
11872
11873                 if (compIsForInlining())
11874                 {
11875                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11876                     return;
11877                 }
11878
11879                 JITDUMP(" %04X", jmpAddr);
11880                 if (block->bbJumpKind != BBJ_LEAVE)
11881                 {
11882                     impResetLeaveBlock(block, jmpAddr);
11883                 }
11884
11885                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11886                 impImportLeave(block);
11887                 impNoteBranchOffs();
11888
11889                 break;
11890
11891             case CEE_BR:
11892             case CEE_BR_S:
11893                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11894
11895                 if (compIsForInlining() && jmpDist == 0)
11896                 {
11897                     break; /* NOP */
11898                 }
11899
11900                 impNoteBranchOffs();
11901                 break;
11902
11903             case CEE_BRTRUE:
11904             case CEE_BRTRUE_S:
11905             case CEE_BRFALSE:
11906             case CEE_BRFALSE_S:
11907
11908                 /* Pop the comparand (now there's a neat term) from the stack */
11909                 if (tiVerificationNeeded)
11910                 {
11911                     typeInfo& tiVal = impStackTop().seTypeInfo;
11912                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11913                            "bad value");
11914                 }
11915
11916                 op1  = impPopStack().val;
11917                 type = op1->TypeGet();
11918
11919                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11920                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11921                 {
11922                     block->bbJumpKind = BBJ_NONE;
11923
11924                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11925                     {
11926                         op1 = gtUnusedValNode(op1);
11927                         goto SPILL_APPEND;
11928                     }
11929                     else
11930                     {
11931                         break;
11932                     }
11933                 }
11934
11935                 if (op1->OperIsCompare())
11936                 {
11937                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11938                     {
11939                         // Flip the sense of the compare
11940
11941                         op1 = gtReverseCond(op1);
11942                     }
11943                 }
11944                 else
11945                 {
11946                     /* We'll compare against an equally-sized integer 0 */
11947                     /* For small types, we always compare against int   */
11948                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11949
11950                     /* Create the comparison operator and try to fold it */
11951
11952                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11953                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11954                 }
11955
11956             // fall through
11957
11958             COND_JUMP:
11959
11960                 /* Fold comparison if we can */
11961
11962                 op1 = gtFoldExpr(op1);
11963
11964                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11965                 /* Don't make any blocks unreachable in import only mode */
11966
11967                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11968                 {
11969                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11970                        unreachable under compDbgCode */
11971                     assert(!opts.compDbgCode);
11972
11973                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11974                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11975                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11976                                                                          // block for the second time
11977
11978                     block->bbJumpKind = foldedJumpKind;
11979 #ifdef DEBUG
11980                     if (verbose)
11981                     {
11982                         if (op1->gtIntCon.gtIconVal)
11983                         {
11984                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11985                                    block->bbJumpDest->bbNum);
11986                         }
11987                         else
11988                         {
11989                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11990                         }
11991                     }
11992 #endif
11993                     break;
11994                 }
11995
11996                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11997
11998                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11999                    in impImportBlock(block). For correct line numbers, spill stack. */
12000
12001                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12002                 {
12003                     impSpillStackEnsure(true);
12004                 }
12005
12006                 goto SPILL_APPEND;
12007
12008             case CEE_CEQ:
12009                 oper = GT_EQ;
12010                 uns  = false;
12011                 goto CMP_2_OPs;
12012             case CEE_CGT_UN:
12013                 oper = GT_GT;
12014                 uns  = true;
12015                 goto CMP_2_OPs;
12016             case CEE_CGT:
12017                 oper = GT_GT;
12018                 uns  = false;
12019                 goto CMP_2_OPs;
12020             case CEE_CLT_UN:
12021                 oper = GT_LT;
12022                 uns  = true;
12023                 goto CMP_2_OPs;
12024             case CEE_CLT:
12025                 oper = GT_LT;
12026                 uns  = false;
12027                 goto CMP_2_OPs;
12028
12029             CMP_2_OPs:
12030                 if (tiVerificationNeeded)
12031                 {
12032                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12033                     tiRetVal = typeInfo(TI_INT);
12034                 }
12035
12036                 op2 = impPopStack().val;
12037                 op1 = impPopStack().val;
12038
12039 #ifdef _TARGET_64BIT_
12040                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12041                 {
12042                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12043                 }
12044                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12045                 {
12046                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12047                 }
12048 #endif // _TARGET_64BIT_
12049
12050                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12051                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12052                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12053
12054                 /* Create the comparison node */
12055
12056                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12057
12058                 /* TODO: setting both flags when only one is appropriate */
12059                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12060                 {
12061                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12062                 }
12063
12064                 // Fold result, if possible.
12065                 op1 = gtFoldExpr(op1);
12066
12067                 impPushOnStack(op1, tiRetVal);
12068                 break;
12069
12070             case CEE_BEQ_S:
12071             case CEE_BEQ:
12072                 oper = GT_EQ;
12073                 goto CMP_2_OPs_AND_BR;
12074
12075             case CEE_BGE_S:
12076             case CEE_BGE:
12077                 oper = GT_GE;
12078                 goto CMP_2_OPs_AND_BR;
12079
12080             case CEE_BGE_UN_S:
12081             case CEE_BGE_UN:
12082                 oper = GT_GE;
12083                 goto CMP_2_OPs_AND_BR_UN;
12084
12085             case CEE_BGT_S:
12086             case CEE_BGT:
12087                 oper = GT_GT;
12088                 goto CMP_2_OPs_AND_BR;
12089
12090             case CEE_BGT_UN_S:
12091             case CEE_BGT_UN:
12092                 oper = GT_GT;
12093                 goto CMP_2_OPs_AND_BR_UN;
12094
12095             case CEE_BLE_S:
12096             case CEE_BLE:
12097                 oper = GT_LE;
12098                 goto CMP_2_OPs_AND_BR;
12099
12100             case CEE_BLE_UN_S:
12101             case CEE_BLE_UN:
12102                 oper = GT_LE;
12103                 goto CMP_2_OPs_AND_BR_UN;
12104
12105             case CEE_BLT_S:
12106             case CEE_BLT:
12107                 oper = GT_LT;
12108                 goto CMP_2_OPs_AND_BR;
12109
12110             case CEE_BLT_UN_S:
12111             case CEE_BLT_UN:
12112                 oper = GT_LT;
12113                 goto CMP_2_OPs_AND_BR_UN;
12114
12115             case CEE_BNE_UN_S:
12116             case CEE_BNE_UN:
12117                 oper = GT_NE;
12118                 goto CMP_2_OPs_AND_BR_UN;
12119
12120             CMP_2_OPs_AND_BR_UN:
12121                 uns       = true;
12122                 unordered = true;
12123                 goto CMP_2_OPs_AND_BR_ALL;
12124             CMP_2_OPs_AND_BR:
12125                 uns       = false;
12126                 unordered = false;
12127                 goto CMP_2_OPs_AND_BR_ALL;
12128             CMP_2_OPs_AND_BR_ALL:
12129
12130                 if (tiVerificationNeeded)
12131                 {
12132                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12133                 }
12134
12135                 /* Pull two values */
12136                 op2 = impPopStack().val;
12137                 op1 = impPopStack().val;
12138
12139 #ifdef _TARGET_64BIT_
12140                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12141                 {
12142                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12143                 }
12144                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12145                 {
12146                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12147                 }
12148 #endif // _TARGET_64BIT_
12149
12150                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12151                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12152                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12153
12154                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12155                 {
12156                     block->bbJumpKind = BBJ_NONE;
12157
12158                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12159                     {
12160                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12161                                                        "Branch to next Optimization, op1 side effect"));
12162                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12163                     }
12164                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12165                     {
12166                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12167                                                        "Branch to next Optimization, op2 side effect"));
12168                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12169                     }
12170
12171 #ifdef DEBUG
12172                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12173                     {
12174                         impNoteLastILoffs();
12175                     }
12176 #endif
12177                     break;
12178                 }
12179 #if !FEATURE_X87_DOUBLES
12180                 // We can generate an compare of different sized floating point op1 and op2
12181                 // We insert a cast
12182                 //
12183                 if (varTypeIsFloating(op1->TypeGet()))
12184                 {
12185                     if (op1->TypeGet() != op2->TypeGet())
12186                     {
12187                         assert(varTypeIsFloating(op2->TypeGet()));
12188
12189                         // say op1=double, op2=float. To avoid loss of precision
12190                         // while comparing, op2 is converted to double and double
12191                         // comparison is done.
12192                         if (op1->TypeGet() == TYP_DOUBLE)
12193                         {
12194                             // We insert a cast of op2 to TYP_DOUBLE
12195                             op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12196                         }
12197                         else if (op2->TypeGet() == TYP_DOUBLE)
12198                         {
12199                             // We insert a cast of op1 to TYP_DOUBLE
12200                             op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12201                         }
12202                     }
12203                 }
12204 #endif // !FEATURE_X87_DOUBLES
12205
12206                 /* Create and append the operator */
12207
12208                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12209
12210                 if (uns)
12211                 {
12212                     op1->gtFlags |= GTF_UNSIGNED;
12213                 }
12214
12215                 if (unordered)
12216                 {
12217                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12218                 }
12219
12220                 goto COND_JUMP;
12221
12222             case CEE_SWITCH:
12223                 assert(!compIsForInlining());
12224
12225                 if (tiVerificationNeeded)
12226                 {
12227                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12228                 }
12229                 /* Pop the switch value off the stack */
12230                 op1 = impPopStack().val;
12231                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12232
12233                 /* We can create a switch node */
12234
12235                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12236
12237                 val = (int)getU4LittleEndian(codeAddr);
12238                 codeAddr += 4 + val * 4; // skip over the switch-table
12239
12240                 goto SPILL_APPEND;
12241
12242             /************************** Casting OPCODES ***************************/
12243
12244             case CEE_CONV_OVF_I1:
12245                 lclTyp = TYP_BYTE;
12246                 goto CONV_OVF;
12247             case CEE_CONV_OVF_I2:
12248                 lclTyp = TYP_SHORT;
12249                 goto CONV_OVF;
12250             case CEE_CONV_OVF_I:
12251                 lclTyp = TYP_I_IMPL;
12252                 goto CONV_OVF;
12253             case CEE_CONV_OVF_I4:
12254                 lclTyp = TYP_INT;
12255                 goto CONV_OVF;
12256             case CEE_CONV_OVF_I8:
12257                 lclTyp = TYP_LONG;
12258                 goto CONV_OVF;
12259
12260             case CEE_CONV_OVF_U1:
12261                 lclTyp = TYP_UBYTE;
12262                 goto CONV_OVF;
12263             case CEE_CONV_OVF_U2:
12264                 lclTyp = TYP_USHORT;
12265                 goto CONV_OVF;
12266             case CEE_CONV_OVF_U:
12267                 lclTyp = TYP_U_IMPL;
12268                 goto CONV_OVF;
12269             case CEE_CONV_OVF_U4:
12270                 lclTyp = TYP_UINT;
12271                 goto CONV_OVF;
12272             case CEE_CONV_OVF_U8:
12273                 lclTyp = TYP_ULONG;
12274                 goto CONV_OVF;
12275
12276             case CEE_CONV_OVF_I1_UN:
12277                 lclTyp = TYP_BYTE;
12278                 goto CONV_OVF_UN;
12279             case CEE_CONV_OVF_I2_UN:
12280                 lclTyp = TYP_SHORT;
12281                 goto CONV_OVF_UN;
12282             case CEE_CONV_OVF_I_UN:
12283                 lclTyp = TYP_I_IMPL;
12284                 goto CONV_OVF_UN;
12285             case CEE_CONV_OVF_I4_UN:
12286                 lclTyp = TYP_INT;
12287                 goto CONV_OVF_UN;
12288             case CEE_CONV_OVF_I8_UN:
12289                 lclTyp = TYP_LONG;
12290                 goto CONV_OVF_UN;
12291
12292             case CEE_CONV_OVF_U1_UN:
12293                 lclTyp = TYP_UBYTE;
12294                 goto CONV_OVF_UN;
12295             case CEE_CONV_OVF_U2_UN:
12296                 lclTyp = TYP_USHORT;
12297                 goto CONV_OVF_UN;
12298             case CEE_CONV_OVF_U_UN:
12299                 lclTyp = TYP_U_IMPL;
12300                 goto CONV_OVF_UN;
12301             case CEE_CONV_OVF_U4_UN:
12302                 lclTyp = TYP_UINT;
12303                 goto CONV_OVF_UN;
12304             case CEE_CONV_OVF_U8_UN:
12305                 lclTyp = TYP_ULONG;
12306                 goto CONV_OVF_UN;
12307
12308             CONV_OVF_UN:
12309                 uns = true;
12310                 goto CONV_OVF_COMMON;
12311             CONV_OVF:
12312                 uns = false;
12313                 goto CONV_OVF_COMMON;
12314
12315             CONV_OVF_COMMON:
12316                 ovfl = true;
12317                 goto _CONV;
12318
12319             case CEE_CONV_I1:
12320                 lclTyp = TYP_BYTE;
12321                 goto CONV;
12322             case CEE_CONV_I2:
12323                 lclTyp = TYP_SHORT;
12324                 goto CONV;
12325             case CEE_CONV_I:
12326                 lclTyp = TYP_I_IMPL;
12327                 goto CONV;
12328             case CEE_CONV_I4:
12329                 lclTyp = TYP_INT;
12330                 goto CONV;
12331             case CEE_CONV_I8:
12332                 lclTyp = TYP_LONG;
12333                 goto CONV;
12334
12335             case CEE_CONV_U1:
12336                 lclTyp = TYP_UBYTE;
12337                 goto CONV;
12338             case CEE_CONV_U2:
12339                 lclTyp = TYP_USHORT;
12340                 goto CONV;
12341 #if (REGSIZE_BYTES == 8)
12342             case CEE_CONV_U:
12343                 lclTyp = TYP_U_IMPL;
12344                 goto CONV_UN;
12345 #else
12346             case CEE_CONV_U:
12347                 lclTyp = TYP_U_IMPL;
12348                 goto CONV;
12349 #endif
12350             case CEE_CONV_U4:
12351                 lclTyp = TYP_UINT;
12352                 goto CONV;
12353             case CEE_CONV_U8:
12354                 lclTyp = TYP_ULONG;
12355                 goto CONV_UN;
12356
12357             case CEE_CONV_R4:
12358                 lclTyp = TYP_FLOAT;
12359                 goto CONV;
12360             case CEE_CONV_R8:
12361                 lclTyp = TYP_DOUBLE;
12362                 goto CONV;
12363
12364             case CEE_CONV_R_UN:
12365                 lclTyp = TYP_DOUBLE;
12366                 goto CONV_UN;
12367
12368             CONV_UN:
12369                 uns  = true;
12370                 ovfl = false;
12371                 goto _CONV;
12372
12373             CONV:
12374                 uns  = false;
12375                 ovfl = false;
12376                 goto _CONV;
12377
12378             _CONV:
12379                 // just check that we have a number on the stack
12380                 if (tiVerificationNeeded)
12381                 {
12382                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12383                     Verify(tiVal.IsNumberType(), "bad arg");
12384
12385 #ifdef _TARGET_64BIT_
12386                     bool isNative = false;
12387
12388                     switch (opcode)
12389                     {
12390                         case CEE_CONV_OVF_I:
12391                         case CEE_CONV_OVF_I_UN:
12392                         case CEE_CONV_I:
12393                         case CEE_CONV_OVF_U:
12394                         case CEE_CONV_OVF_U_UN:
12395                         case CEE_CONV_U:
12396                             isNative = true;
12397                         default:
12398                             // leave 'isNative' = false;
12399                             break;
12400                     }
12401                     if (isNative)
12402                     {
12403                         tiRetVal = typeInfo::nativeInt();
12404                     }
12405                     else
12406 #endif // _TARGET_64BIT_
12407                     {
12408                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12409                     }
12410                 }
12411
12412                 // only converts from FLOAT or DOUBLE to an integer type
12413                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12414
12415                 if (varTypeIsFloating(lclTyp))
12416                 {
12417                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12418 #ifdef _TARGET_64BIT_
12419                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12420                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12421                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12422                                // and generate SSE2 code instead of going through helper calls.
12423                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12424 #endif
12425                         ;
12426                 }
12427                 else
12428                 {
12429                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12430                 }
12431
12432                 // At this point uns, ovf, callNode all set
12433
12434                 op1 = impPopStack().val;
12435                 impBashVarAddrsToI(op1);
12436
12437                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12438                 {
12439                     op2 = op1->gtOp.gtOp2;
12440
12441                     if (op2->gtOper == GT_CNS_INT)
12442                     {
12443                         ssize_t ival = op2->gtIntCon.gtIconVal;
12444                         ssize_t mask, umask;
12445
12446                         switch (lclTyp)
12447                         {
12448                             case TYP_BYTE:
12449                             case TYP_UBYTE:
12450                                 mask  = 0x00FF;
12451                                 umask = 0x007F;
12452                                 break;
12453                             case TYP_USHORT:
12454                             case TYP_SHORT:
12455                                 mask  = 0xFFFF;
12456                                 umask = 0x7FFF;
12457                                 break;
12458
12459                             default:
12460                                 assert(!"unexpected type");
12461                                 return;
12462                         }
12463
12464                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12465                         {
12466                             /* Toss the cast, it's a waste of time */
12467
12468                             impPushOnStack(op1, tiRetVal);
12469                             break;
12470                         }
12471                         else if (ival == mask)
12472                         {
12473                             /* Toss the masking, it's a waste of time, since
12474                                we sign-extend from the small value anyways */
12475
12476                             op1 = op1->gtOp.gtOp1;
12477                         }
12478                     }
12479                 }
12480
12481                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12482                     since the result of a cast to one of the 'small' integer
12483                     types is an integer.
12484                  */
12485
12486                 type = genActualType(lclTyp);
12487
12488 #if SMALL_TREE_NODES
12489                 if (callNode)
12490                 {
12491                     op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12492                 }
12493                 else
12494 #endif // SMALL_TREE_NODES
12495                 {
12496                     op1 = gtNewCastNode(type, op1, uns, lclTyp);
12497                 }
12498
12499                 if (ovfl)
12500                 {
12501                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12502                 }
12503                 impPushOnStack(op1, tiRetVal);
12504                 break;
12505
12506             case CEE_NEG:
12507                 if (tiVerificationNeeded)
12508                 {
12509                     tiRetVal = impStackTop().seTypeInfo;
12510                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12511                 }
12512
12513                 op1 = impPopStack().val;
12514                 impBashVarAddrsToI(op1, nullptr);
12515                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12516                 break;
12517
12518             case CEE_POP:
12519             {
12520                 /* Pull the top value from the stack */
12521
12522                 StackEntry se = impPopStack();
12523                 clsHnd        = se.seTypeInfo.GetClassHandle();
12524                 op1           = se.val;
12525
12526                 /* Get hold of the type of the value being duplicated */
12527
12528                 lclTyp = genActualType(op1->gtType);
12529
12530                 /* Does the value have any side effects? */
12531
12532                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12533                 {
12534                     // Since we are throwing away the value, just normalize
12535                     // it to its address.  This is more efficient.
12536
12537                     if (varTypeIsStruct(op1))
12538                     {
12539 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12540                         // Non-calls, such as obj or ret_expr, have to go through this.
12541                         // Calls with large struct return value have to go through this.
12542                         // Helper calls with small struct return value also have to go
12543                         // through this since they do not follow Unix calling convention.
12544                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12545                             op1->AsCall()->gtCallType == CT_HELPER)
12546 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12547                         {
12548                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12549                         }
12550                     }
12551
12552                     // If op1 is non-overflow cast, throw it away since it is useless.
12553                     // Another reason for throwing away the useless cast is in the context of
12554                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12555                     // The cast gets added as part of importing GT_CALL, which gets in the way
12556                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12557                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12558                     {
12559                         op1 = op1->gtOp.gtOp1;
12560                     }
12561
12562                     // If 'op1' is an expression, create an assignment node.
12563                     // Helps analyses (like CSE) to work fine.
12564
12565                     if (op1->gtOper != GT_CALL)
12566                     {
12567                         op1 = gtUnusedValNode(op1);
12568                     }
12569
12570                     /* Append the value to the tree list */
12571                     goto SPILL_APPEND;
12572                 }
12573
12574                 /* No side effects - just throw the <BEEP> thing away */
12575             }
12576             break;
12577
12578             case CEE_DUP:
12579             {
12580                 if (tiVerificationNeeded)
12581                 {
12582                     // Dup could start the begining of delegate creation sequence, remember that
12583                     delegateCreateStart = codeAddr - 1;
12584                     impStackTop(0);
12585                 }
12586
12587                 // If the expression to dup is simple, just clone it.
12588                 // Otherwise spill it to a temp, and reload the temp
12589                 // twice.
12590                 StackEntry se   = impPopStack();
12591                 GenTree*   tree = se.val;
12592                 tiRetVal        = se.seTypeInfo;
12593                 op1             = tree;
12594
12595                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12596                 {
12597                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12598                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12599                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12600                     op1            = gtNewLclvNode(tmpNum, type);
12601
12602                     // Propagate type info to the temp from the stack and the original tree
12603                     if (type == TYP_REF)
12604                     {
12605                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12606                     }
12607                 }
12608
12609                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12610                                    nullptr DEBUGARG("DUP instruction"));
12611
12612                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12613                 impPushOnStack(op1, tiRetVal);
12614                 impPushOnStack(op2, tiRetVal);
12615             }
12616             break;
12617
12618             case CEE_STIND_I1:
12619                 lclTyp = TYP_BYTE;
12620                 goto STIND;
12621             case CEE_STIND_I2:
12622                 lclTyp = TYP_SHORT;
12623                 goto STIND;
12624             case CEE_STIND_I4:
12625                 lclTyp = TYP_INT;
12626                 goto STIND;
12627             case CEE_STIND_I8:
12628                 lclTyp = TYP_LONG;
12629                 goto STIND;
12630             case CEE_STIND_I:
12631                 lclTyp = TYP_I_IMPL;
12632                 goto STIND;
12633             case CEE_STIND_REF:
12634                 lclTyp = TYP_REF;
12635                 goto STIND;
12636             case CEE_STIND_R4:
12637                 lclTyp = TYP_FLOAT;
12638                 goto STIND;
12639             case CEE_STIND_R8:
12640                 lclTyp = TYP_DOUBLE;
12641                 goto STIND;
12642             STIND:
12643
12644                 if (tiVerificationNeeded)
12645                 {
12646                     typeInfo instrType(lclTyp);
12647 #ifdef _TARGET_64BIT_
12648                     if (opcode == CEE_STIND_I)
12649                     {
12650                         instrType = typeInfo::nativeInt();
12651                     }
12652 #endif // _TARGET_64BIT_
12653                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12654                 }
12655                 else
12656                 {
12657                     compUnsafeCastUsed = true; // Have to go conservative
12658                 }
12659
12660             STIND_POST_VERIFY:
12661
12662                 op2 = impPopStack().val; // value to store
12663                 op1 = impPopStack().val; // address to store to
12664
12665                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12666                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12667
12668                 impBashVarAddrsToI(op1, op2);
12669
12670                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12671
12672 #ifdef _TARGET_64BIT_
12673                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12674                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12675                 {
12676                     op2->gtType = TYP_I_IMPL;
12677                 }
12678                 else
12679                 {
12680                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12681                     //
12682                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12683                     {
12684                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12685                         op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
12686                     }
12687                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12688                     //
12689                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12690                     {
12691                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12692                         op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
12693                     }
12694                 }
12695 #endif // _TARGET_64BIT_
12696
12697                 if (opcode == CEE_STIND_REF)
12698                 {
12699                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12700                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12701                     lclTyp = genActualType(op2->TypeGet());
12702                 }
12703
12704 // Check target type.
12705 #ifdef DEBUG
12706                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12707                 {
12708                     if (op2->gtType == TYP_BYREF)
12709                     {
12710                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12711                     }
12712                     else if (lclTyp == TYP_BYREF)
12713                     {
12714                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12715                     }
12716                 }
12717                 else
12718                 {
12719                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12720                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12721                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12722                 }
12723 #endif
12724
12725                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12726
12727                 // stind could point anywhere, example a boxed class static int
12728                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12729
12730                 if (prefixFlags & PREFIX_VOLATILE)
12731                 {
12732                     assert(op1->OperGet() == GT_IND);
12733                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12734                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12735                     op1->gtFlags |= GTF_IND_VOLATILE;
12736                 }
12737
12738                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12739                 {
12740                     assert(op1->OperGet() == GT_IND);
12741                     op1->gtFlags |= GTF_IND_UNALIGNED;
12742                 }
12743
12744                 op1 = gtNewAssignNode(op1, op2);
12745                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12746
12747                 // Spill side-effects AND global-data-accesses
12748                 if (verCurrentState.esStackDepth > 0)
12749                 {
12750                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12751                 }
12752
12753                 goto APPEND;
12754
12755             case CEE_LDIND_I1:
12756                 lclTyp = TYP_BYTE;
12757                 goto LDIND;
12758             case CEE_LDIND_I2:
12759                 lclTyp = TYP_SHORT;
12760                 goto LDIND;
12761             case CEE_LDIND_U4:
12762             case CEE_LDIND_I4:
12763                 lclTyp = TYP_INT;
12764                 goto LDIND;
12765             case CEE_LDIND_I8:
12766                 lclTyp = TYP_LONG;
12767                 goto LDIND;
12768             case CEE_LDIND_REF:
12769                 lclTyp = TYP_REF;
12770                 goto LDIND;
12771             case CEE_LDIND_I:
12772                 lclTyp = TYP_I_IMPL;
12773                 goto LDIND;
12774             case CEE_LDIND_R4:
12775                 lclTyp = TYP_FLOAT;
12776                 goto LDIND;
12777             case CEE_LDIND_R8:
12778                 lclTyp = TYP_DOUBLE;
12779                 goto LDIND;
12780             case CEE_LDIND_U1:
12781                 lclTyp = TYP_UBYTE;
12782                 goto LDIND;
12783             case CEE_LDIND_U2:
12784                 lclTyp = TYP_USHORT;
12785                 goto LDIND;
12786             LDIND:
12787
12788                 if (tiVerificationNeeded)
12789                 {
12790                     typeInfo lclTiType(lclTyp);
12791 #ifdef _TARGET_64BIT_
12792                     if (opcode == CEE_LDIND_I)
12793                     {
12794                         lclTiType = typeInfo::nativeInt();
12795                     }
12796 #endif // _TARGET_64BIT_
12797                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12798                     tiRetVal.NormaliseForStack();
12799                 }
12800                 else
12801                 {
12802                     compUnsafeCastUsed = true; // Have to go conservative
12803                 }
12804
12805             LDIND_POST_VERIFY:
12806
12807                 op1 = impPopStack().val; // address to load from
12808                 impBashVarAddrsToI(op1);
12809
12810 #ifdef _TARGET_64BIT_
12811                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12812                 //
12813                 if (genActualType(op1->gtType) == TYP_INT)
12814                 {
12815                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12816                     op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
12817                 }
12818 #endif
12819
12820                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12821
12822                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12823
12824                 // ldind could point anywhere, example a boxed class static int
12825                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12826
12827                 if (prefixFlags & PREFIX_VOLATILE)
12828                 {
12829                     assert(op1->OperGet() == GT_IND);
12830                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12831                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12832                     op1->gtFlags |= GTF_IND_VOLATILE;
12833                 }
12834
12835                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12836                 {
12837                     assert(op1->OperGet() == GT_IND);
12838                     op1->gtFlags |= GTF_IND_UNALIGNED;
12839                 }
12840
12841                 impPushOnStack(op1, tiRetVal);
12842
12843                 break;
12844
12845             case CEE_UNALIGNED:
12846
12847                 assert(sz == 1);
12848                 val = getU1LittleEndian(codeAddr);
12849                 ++codeAddr;
12850                 JITDUMP(" %u", val);
12851                 if ((val != 1) && (val != 2) && (val != 4))
12852                 {
12853                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12854                 }
12855
12856                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12857                 prefixFlags |= PREFIX_UNALIGNED;
12858
12859                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12860
12861             PREFIX:
12862                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12863                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12864                 codeAddr += sizeof(__int8);
12865                 goto DECODE_OPCODE;
12866
12867             case CEE_VOLATILE:
12868
12869                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12870                 prefixFlags |= PREFIX_VOLATILE;
12871
12872                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12873
12874                 assert(sz == 0);
12875                 goto PREFIX;
12876
12877             case CEE_LDFTN:
12878             {
12879                 // Need to do a lookup here so that we perform an access check
12880                 // and do a NOWAY if protections are violated
12881                 _impResolveToken(CORINFO_TOKENKIND_Method);
12882
12883                 JITDUMP(" %08X", resolvedToken.token);
12884
12885                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12886                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12887                               &callInfo);
12888
12889                 // This check really only applies to intrinsic Array.Address methods
12890                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12891                 {
12892                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12893                 }
12894
12895                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12896                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12897
12898                 if (tiVerificationNeeded)
12899                 {
12900                     // LDFTN could start the begining of delegate creation sequence, remember that
12901                     delegateCreateStart = codeAddr - 2;
12902
12903                     // check any constraints on the callee's class and type parameters
12904                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12905                                    "method has unsatisfied class constraints");
12906                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12907                                                                                 resolvedToken.hMethod),
12908                                    "method has unsatisfied method constraints");
12909
12910                     mflags = callInfo.verMethodFlags;
12911                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12912                 }
12913
12914             DO_LDFTN:
12915                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12916                 if (compDonotInline())
12917                 {
12918                     return;
12919                 }
12920
12921                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12922                 impPushOnStack(op1, typeInfo(heapToken));
12923
12924                 break;
12925             }
12926
12927             case CEE_LDVIRTFTN:
12928             {
12929                 /* Get the method token */
12930
12931                 _impResolveToken(CORINFO_TOKENKIND_Method);
12932
12933                 JITDUMP(" %08X", resolvedToken.token);
12934
12935                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12936                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12937                                                     CORINFO_CALLINFO_CALLVIRT)),
12938                               &callInfo);
12939
12940                 // This check really only applies to intrinsic Array.Address methods
12941                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12942                 {
12943                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12944                 }
12945
12946                 mflags = callInfo.methodFlags;
12947
12948                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12949
12950                 if (compIsForInlining())
12951                 {
12952                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12953                     {
12954                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12955                         return;
12956                     }
12957                 }
12958
12959                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12960
12961                 if (tiVerificationNeeded)
12962                 {
12963
12964                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12965                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12966
12967                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12968                     typeInfo declType =
12969                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12970
12971                     typeInfo arg = impStackTop().seTypeInfo;
12972                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12973                            "bad ldvirtftn");
12974
12975                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12976                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12977                     {
12978                         instanceClassHnd = arg.GetClassHandleForObjRef();
12979                     }
12980
12981                     // check any constraints on the method's class and type parameters
12982                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12983                                    "method has unsatisfied class constraints");
12984                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12985                                                                                 resolvedToken.hMethod),
12986                                    "method has unsatisfied method constraints");
12987
12988                     if (mflags & CORINFO_FLG_PROTECTED)
12989                     {
12990                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12991                                "Accessing protected method through wrong type.");
12992                     }
12993                 }
12994
12995                 /* Get the object-ref */
12996                 op1 = impPopStack().val;
12997                 assertImp(op1->gtType == TYP_REF);
12998
12999                 if (opts.IsReadyToRun())
13000                 {
13001                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13002                     {
13003                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13004                         {
13005                             op1 = gtUnusedValNode(op1);
13006                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13007                         }
13008                         goto DO_LDFTN;
13009                     }
13010                 }
13011                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13012                 {
13013                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13014                     {
13015                         op1 = gtUnusedValNode(op1);
13016                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13017                     }
13018                     goto DO_LDFTN;
13019                 }
13020
13021                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13022                 if (compDonotInline())
13023                 {
13024                     return;
13025                 }
13026
13027                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13028                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13029                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13030                 impPushOnStack(fptr, typeInfo(heapToken));
13031
13032                 break;
13033             }
13034
13035             case CEE_CONSTRAINED:
13036
13037                 assertImp(sz == sizeof(unsigned));
13038                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13039                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13040                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13041
13042                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13043                 prefixFlags |= PREFIX_CONSTRAINED;
13044
13045                 {
13046                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13047                     if (actualOpcode != CEE_CALLVIRT)
13048                     {
13049                         BADCODE("constrained. has to be followed by callvirt");
13050                     }
13051                 }
13052
13053                 goto PREFIX;
13054
13055             case CEE_READONLY:
13056                 JITDUMP(" readonly.");
13057
13058                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13059                 prefixFlags |= PREFIX_READONLY;
13060
13061                 {
13062                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13063                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13064                     {
13065                         BADCODE("readonly. has to be followed by ldelema or call");
13066                     }
13067                 }
13068
13069                 assert(sz == 0);
13070                 goto PREFIX;
13071
13072             case CEE_TAILCALL:
13073                 JITDUMP(" tail.");
13074
13075                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13076                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13077
13078                 {
13079                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13080                     if (!impOpcodeIsCallOpcode(actualOpcode))
13081                     {
13082                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13083                     }
13084                 }
13085                 assert(sz == 0);
13086                 goto PREFIX;
13087
13088             case CEE_NEWOBJ:
13089
13090                 /* Since we will implicitly insert newObjThisPtr at the start of the
13091                    argument list, spill any GTF_ORDER_SIDEEFF */
13092                 impSpillSpecialSideEff();
13093
13094                 /* NEWOBJ does not respond to TAIL */
13095                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13096
13097                 /* NEWOBJ does not respond to CONSTRAINED */
13098                 prefixFlags &= ~PREFIX_CONSTRAINED;
13099
13100                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13101
13102                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13103                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13104                               &callInfo);
13105
13106                 if (compIsForInlining())
13107                 {
13108                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13109                     {
13110                         // Check to see if this call violates the boundary.
13111                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13112                         return;
13113                     }
13114                 }
13115
13116                 mflags = callInfo.methodFlags;
13117
13118                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13119                 {
13120                     BADCODE("newobj on static or abstract method");
13121                 }
13122
13123                 // Insert the security callout before any actual code is generated
13124                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13125
13126                 // There are three different cases for new
13127                 // Object size is variable (depends on arguments)
13128                 //      1) Object is an array (arrays treated specially by the EE)
13129                 //      2) Object is some other variable sized object (e.g. String)
13130                 //      3) Class Size can be determined beforehand (normal case)
13131                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13132                 // in the second case we call the constructor with a '0' this pointer
13133                 // In the third case we alloc the memory, then call the constuctor
13134
13135                 clsFlags = callInfo.classFlags;
13136                 if (clsFlags & CORINFO_FLG_ARRAY)
13137                 {
13138                     if (tiVerificationNeeded)
13139                     {
13140                         CORINFO_CLASS_HANDLE elemTypeHnd;
13141                         INDEBUG(CorInfoType corType =)
13142                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13143                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13144                         Verify(elemTypeHnd == nullptr ||
13145                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13146                                "newarr of byref-like objects");
13147                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13148                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13149                                       &callInfo DEBUGARG(info.compFullName));
13150                     }
13151                     // Arrays need to call the NEWOBJ helper.
13152                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13153
13154                     impImportNewObjArray(&resolvedToken, &callInfo);
13155                     if (compDonotInline())
13156                     {
13157                         return;
13158                     }
13159
13160                     callTyp = TYP_REF;
13161                     break;
13162                 }
13163                 // At present this can only be String
13164                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13165                 {
13166                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13167                     {
13168                         // The dummy argument does not exist in CoreRT
13169                         newObjThisPtr = nullptr;
13170                     }
13171                     else
13172                     {
13173                         // This is the case for variable-sized objects that are not
13174                         // arrays.  In this case, call the constructor with a null 'this'
13175                         // pointer
13176                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13177                     }
13178
13179                     /* Remember that this basic block contains 'new' of an object */
13180                     block->bbFlags |= BBF_HAS_NEWOBJ;
13181                     optMethodFlags |= OMF_HAS_NEWOBJ;
13182                 }
13183                 else
13184                 {
13185                     // This is the normal case where the size of the object is
13186                     // fixed.  Allocate the memory and call the constructor.
13187
13188                     // Note: We cannot add a peep to avoid use of temp here
13189                     // becase we don't have enough interference info to detect when
13190                     // sources and destination interfere, example: s = new S(ref);
13191
13192                     // TODO: We find the correct place to introduce a general
13193                     // reverse copy prop for struct return values from newobj or
13194                     // any function returning structs.
13195
13196                     /* get a temporary for the new object */
13197                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13198                     if (compDonotInline())
13199                     {
13200                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13201                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13202                         return;
13203                     }
13204
13205                     // In the value class case we only need clsHnd for size calcs.
13206                     //
13207                     // The lookup of the code pointer will be handled by CALL in this case
13208                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13209                     {
13210                         if (compIsForInlining())
13211                         {
13212                             // If value class has GC fields, inform the inliner. It may choose to
13213                             // bail out on the inline.
13214                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13215                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13216                             {
13217                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13218                                 if (compInlineResult->IsFailure())
13219                                 {
13220                                     return;
13221                                 }
13222
13223                                 // Do further notification in the case where the call site is rare;
13224                                 // some policies do not track the relative hotness of call sites for
13225                                 // "always" inline cases.
13226                                 if (impInlineInfo->iciBlock->isRunRarely())
13227                                 {
13228                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13229                                     if (compInlineResult->IsFailure())
13230                                     {
13231                                         return;
13232                                     }
13233                                 }
13234                             }
13235                         }
13236
13237                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13238                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13239
13240                         if (impIsPrimitive(jitTyp))
13241                         {
13242                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13243                         }
13244                         else
13245                         {
13246                             // The local variable itself is the allocated space.
13247                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13248                             // and potentially exploitable.
13249                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13250                         }
13251                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13252                         {
13253                             // Append a tree to zero-out the temp
13254                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13255
13256                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13257                                                            gtNewIconNode(0), // Value
13258                                                            size,             // Size
13259                                                            false,            // isVolatile
13260                                                            false);           // not copyBlock
13261                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13262                         }
13263
13264                         // Obtain the address of the temp
13265                         newObjThisPtr =
13266                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13267                     }
13268                     else
13269                     {
13270 #ifdef FEATURE_READYTORUN_COMPILER
13271                         if (opts.IsReadyToRun())
13272                         {
13273                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13274                             usingReadyToRunHelper = (op1 != nullptr);
13275                         }
13276
13277                         if (!usingReadyToRunHelper)
13278 #endif
13279                         {
13280                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13281                             if (op1 == nullptr)
13282                             { // compDonotInline()
13283                                 return;
13284                             }
13285
13286                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13287                             // and the newfast call with a single call to a dynamic R2R cell that will:
13288                             //      1) Load the context
13289                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13290                             //      stub
13291                             //      3) Allocate and return the new object
13292                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13293
13294                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13295                                                     resolvedToken.hClass, TYP_REF, op1);
13296                         }
13297
13298                         // Remember that this basic block contains 'new' of an object
13299                         block->bbFlags |= BBF_HAS_NEWOBJ;
13300                         optMethodFlags |= OMF_HAS_NEWOBJ;
13301
13302                         // Append the assignment to the temp/local. Dont need to spill
13303                         // at all as we are just calling an EE-Jit helper which can only
13304                         // cause an (async) OutOfMemoryException.
13305
13306                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13307                         // to a temp. Note that the pattern "temp = allocObj" is required
13308                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13309                         // without exhaustive walk over all expressions.
13310
13311                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13312                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13313
13314                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13315                     }
13316                 }
13317                 goto CALL;
13318
13319             case CEE_CALLI:
13320
13321                 /* CALLI does not respond to CONSTRAINED */
13322                 prefixFlags &= ~PREFIX_CONSTRAINED;
13323
13324                 if (compIsForInlining())
13325                 {
13326                     // CALLI doesn't have a method handle, so assume the worst.
13327                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13328                     {
13329                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13330                         return;
13331                     }
13332                 }
13333
13334             // fall through
13335
13336             case CEE_CALLVIRT:
13337             case CEE_CALL:
13338
13339                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13340                 // many other places.  We unfortunately embed that knowledge here.
13341                 if (opcode != CEE_CALLI)
13342                 {
13343                     _impResolveToken(CORINFO_TOKENKIND_Method);
13344
13345                     eeGetCallInfo(&resolvedToken,
13346                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13347                                   // this is how impImportCall invokes getCallInfo
13348                                   addVerifyFlag(
13349                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13350                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13351                                                                        : CORINFO_CALLINFO_NONE)),
13352                                   &callInfo);
13353                 }
13354                 else
13355                 {
13356                     // Suppress uninitialized use warning.
13357                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13358                     memset(&callInfo, 0, sizeof(callInfo));
13359
13360                     resolvedToken.token = getU4LittleEndian(codeAddr);
13361                 }
13362
13363             CALL: // memberRef should be set.
13364                 // newObjThisPtr should be set for CEE_NEWOBJ
13365
13366                 JITDUMP(" %08X", resolvedToken.token);
13367                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13368
13369                 bool newBBcreatedForTailcallStress;
13370
13371                 newBBcreatedForTailcallStress = false;
13372
13373                 if (compIsForInlining())
13374                 {
13375                     if (compDonotInline())
13376                     {
13377                         return;
13378                     }
13379                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13380                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13381                 }
13382                 else
13383                 {
13384                     if (compTailCallStress())
13385                     {
13386                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13387                         // Tail call stress only recognizes call+ret patterns and forces them to be
13388                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13389                         // doesn't import 'ret' opcode following the call into the basic block containing
13390                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13391                         // is already checking that there is an opcode following call and hence it is
13392                         // safe here to read next opcode without bounds check.
13393                         newBBcreatedForTailcallStress =
13394                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13395                                                              // make it jump to RET.
13396                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13397
13398                         if (newBBcreatedForTailcallStress &&
13399                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13400                             verCheckTailCallConstraint(opcode, &resolvedToken,
13401                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13402                                                        true) // Is it legal to do tailcall?
13403                             )
13404                         {
13405                             // Stress the tailcall.
13406                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13407                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13408                         }
13409                     }
13410                 }
13411
13412                 // This is split up to avoid goto flow warnings.
13413                 bool isRecursive;
13414                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13415
13416                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13417                 // hence will not be considered for implicit tail calling.
13418                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13419                 {
13420                     if (compIsForInlining())
13421                     {
13422 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13423                         // Are we inlining at an implicit tail call site? If so the we can flag
13424                         // implicit tail call sites in the inline body. These call sites
13425                         // often end up in non BBJ_RETURN blocks, so only flag them when
13426                         // we're able to handle shared returns.
13427                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13428                         {
13429                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13430                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13431                         }
13432 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13433                     }
13434                     else
13435                     {
13436                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13437                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13438                     }
13439                 }
13440
13441                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13442                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13443                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13444
13445                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13446                 {
13447                     // All calls and delegates need a security callout.
13448                     // For delegates, this is the call to the delegate constructor, not the access check on the
13449                     // LD(virt)FTN.
13450                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13451
13452 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13453
13454                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13455                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13456                 // ldtoken <filed token>, and we now check accessibility
13457                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13458                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13459                 {
13460                     if (prevOpcode != CEE_LDTOKEN)
13461                     {
13462                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13463                     }
13464                     else
13465                     {
13466                         assert(lastLoadToken != NULL);
13467                         // Now that we know we have a token, verify that it is accessible for loading
13468                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13469                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13470                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13471                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13472                     }
13473                 }
13474
13475 #endif // DevDiv 410397
13476                 }
13477
13478                 if (tiVerificationNeeded)
13479                 {
13480                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13481                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13482                                   &callInfo DEBUGARG(info.compFullName));
13483                 }
13484
13485                 // Insert delegate callout here.
13486                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13487                 {
13488 #ifdef DEBUG
13489                     // We should do this only if verification is enabled
13490                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13491                     if (tiVerificationNeeded)
13492                     {
13493                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13494                         // We should get here only for well formed delegate creation.
13495                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13496                     }
13497 #endif
13498                 }
13499
13500                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13501                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13502                 if (compDonotInline())
13503                 {
13504                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13505                     assert((callTyp == TYP_UNDEF) ||
13506                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13507                     return;
13508                 }
13509
13510                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13511                                                                        // have created a new BB after the "call"
13512                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13513                 {
13514                     assert(!compIsForInlining());
13515                     goto RET;
13516                 }
13517
13518                 break;
13519
13520             case CEE_LDFLD:
13521             case CEE_LDSFLD:
13522             case CEE_LDFLDA:
13523             case CEE_LDSFLDA:
13524             {
13525
13526                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13527                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13528
13529                 /* Get the CP_Fieldref index */
13530                 assertImp(sz == sizeof(unsigned));
13531
13532                 _impResolveToken(CORINFO_TOKENKIND_Field);
13533
13534                 JITDUMP(" %08X", resolvedToken.token);
13535
13536                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13537
13538                 GenTree*             obj     = nullptr;
13539                 typeInfo*            tiObj   = nullptr;
13540                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13541
13542                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13543                 {
13544                     tiObj         = &impStackTop().seTypeInfo;
13545                     StackEntry se = impPopStack();
13546                     objType       = se.seTypeInfo.GetClassHandle();
13547                     obj           = se.val;
13548
13549                     if (impIsThis(obj))
13550                     {
13551                         aflags |= CORINFO_ACCESS_THIS;
13552
13553                         // An optimization for Contextful classes:
13554                         // we unwrap the proxy when we have a 'this reference'
13555
13556                         if (info.compUnwrapContextful)
13557                         {
13558                             aflags |= CORINFO_ACCESS_UNWRAP;
13559                         }
13560                     }
13561                 }
13562
13563                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13564
13565                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13566                 // handle
13567                 CorInfoType ciType = fieldInfo.fieldType;
13568                 clsHnd             = fieldInfo.structType;
13569
13570                 lclTyp = JITtype2varType(ciType);
13571
13572 #ifdef _TARGET_AMD64
13573                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13574 #endif // _TARGET_AMD64
13575
13576                 if (compIsForInlining())
13577                 {
13578                     switch (fieldInfo.fieldAccessor)
13579                     {
13580                         case CORINFO_FIELD_INSTANCE_HELPER:
13581                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13582                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13583                         case CORINFO_FIELD_STATIC_TLS:
13584
13585                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13586                             return;
13587
13588                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13589                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13590                             /* We may be able to inline the field accessors in specific instantiations of generic
13591                              * methods */
13592                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13593                             return;
13594
13595                         default:
13596                             break;
13597                     }
13598
13599                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13600                         clsHnd)
13601                     {
13602                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13603                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13604                         {
13605                             // Loading a static valuetype field usually will cause a JitHelper to be called
13606                             // for the static base. This will bloat the code.
13607                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13608
13609                             if (compInlineResult->IsFailure())
13610                             {
13611                                 return;
13612                             }
13613                         }
13614                     }
13615                 }
13616
13617                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13618                 if (isLoadAddress)
13619                 {
13620                     tiRetVal.MakeByRef();
13621                 }
13622                 else
13623                 {
13624                     tiRetVal.NormaliseForStack();
13625                 }
13626
13627                 // Perform this check always to ensure that we get field access exceptions even with
13628                 // SkipVerification.
13629                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13630
13631                 if (tiVerificationNeeded)
13632                 {
13633                     // You can also pass the unboxed struct to  LDFLD
13634                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13635                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13636                     {
13637                         bAllowPlainValueTypeAsThis = TRUE;
13638                     }
13639
13640                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13641
13642                     // If we're doing this on a heap object or from a 'safe' byref
13643                     // then the result is a safe byref too
13644                     if (isLoadAddress) // load address
13645                     {
13646                         if (fieldInfo.fieldFlags &
13647                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13648                         {
13649                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13650                             {
13651                                 tiRetVal.SetIsPermanentHomeByRef();
13652                             }
13653                         }
13654                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13655                         {
13656                             // ldflda of byref is safe if done on a gc object or on  a
13657                             // safe byref
13658                             tiRetVal.SetIsPermanentHomeByRef();
13659                         }
13660                     }
13661                 }
13662                 else
13663                 {
13664                     // tiVerificationNeeded is false.
13665                     // Raise InvalidProgramException if static load accesses non-static field
13666                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13667                     {
13668                         BADCODE("static access on an instance field");
13669                     }
13670                 }
13671
13672                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13673                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13674                 {
13675                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13676                     {
13677                         obj = gtUnusedValNode(obj);
13678                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13679                     }
13680                     obj = nullptr;
13681                 }
13682
13683                 /* Preserve 'small' int types */
13684                 if (!varTypeIsSmall(lclTyp))
13685                 {
13686                     lclTyp = genActualType(lclTyp);
13687                 }
13688
13689                 bool usesHelper = false;
13690
13691                 switch (fieldInfo.fieldAccessor)
13692                 {
13693                     case CORINFO_FIELD_INSTANCE:
13694 #ifdef FEATURE_READYTORUN_COMPILER
13695                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13696 #endif
13697                     {
13698                         bool nullcheckNeeded = false;
13699
13700                         obj = impCheckForNullPointer(obj);
13701
13702                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13703                         {
13704                             nullcheckNeeded = true;
13705                         }
13706
13707                         // If the object is a struct, what we really want is
13708                         // for the field to operate on the address of the struct.
13709                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13710                         {
13711                             assert(opcode == CEE_LDFLD && objType != nullptr);
13712
13713                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13714                         }
13715
13716                         /* Create the data member node */
13717                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13718
13719 #ifdef FEATURE_READYTORUN_COMPILER
13720                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13721                         {
13722                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13723                         }
13724 #endif
13725
13726                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13727
13728                         if (fgAddrCouldBeNull(obj))
13729                         {
13730                             op1->gtFlags |= GTF_EXCEPT;
13731                         }
13732
13733                         // If gtFldObj is a BYREF then our target is a value class and
13734                         // it could point anywhere, example a boxed class static int
13735                         if (obj->gtType == TYP_BYREF)
13736                         {
13737                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13738                         }
13739
13740                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13741                         if (StructHasOverlappingFields(typeFlags))
13742                         {
13743                             op1->gtField.gtFldMayOverlap = true;
13744                         }
13745
13746                         // wrap it in a address of operator if necessary
13747                         if (isLoadAddress)
13748                         {
13749                             op1 = gtNewOperNode(GT_ADDR,
13750                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13751                         }
13752                         else
13753                         {
13754                             if (compIsForInlining() &&
13755                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13756                                                                                    impInlineInfo->inlArgInfo))
13757                             {
13758                                 impInlineInfo->thisDereferencedFirst = true;
13759                             }
13760                         }
13761                     }
13762                     break;
13763
13764                     case CORINFO_FIELD_STATIC_TLS:
13765 #ifdef _TARGET_X86_
13766                         // Legacy TLS access is implemented as intrinsic on x86 only
13767
13768                         /* Create the data member node */
13769                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13770                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13771
13772                         if (isLoadAddress)
13773                         {
13774                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13775                         }
13776                         break;
13777 #else
13778                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13779
13780                         __fallthrough;
13781 #endif
13782
13783                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13784                     case CORINFO_FIELD_INSTANCE_HELPER:
13785                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13786                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13787                                                clsHnd, nullptr);
13788                         usesHelper = true;
13789                         break;
13790
13791                     case CORINFO_FIELD_STATIC_ADDRESS:
13792                         // Replace static read-only fields with constant if possible
13793                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13794                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13795                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13796                         {
13797                             CorInfoInitClassResult initClassResult =
13798                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13799                                                             impTokenLookupContextHandle);
13800
13801                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13802                             {
13803                                 void** pFldAddr = nullptr;
13804                                 void*  fldAddr =
13805                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13806
13807                                 // We should always be able to access this static's address directly
13808                                 assert(pFldAddr == nullptr);
13809
13810                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13811                                 goto FIELD_DONE;
13812                             }
13813                         }
13814
13815                         __fallthrough;
13816
13817                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13818                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13819                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13820                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13821                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13822                                                          lclTyp);
13823                         break;
13824
13825                     case CORINFO_FIELD_INTRINSIC_ZERO:
13826                     {
13827                         assert(aflags & CORINFO_ACCESS_GET);
13828                         op1 = gtNewIconNode(0, lclTyp);
13829                         goto FIELD_DONE;
13830                     }
13831                     break;
13832
13833                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13834                     {
13835                         assert(aflags & CORINFO_ACCESS_GET);
13836
13837                         LPVOID         pValue;
13838                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13839                         op1                = gtNewStringLiteralNode(iat, pValue);
13840                         goto FIELD_DONE;
13841                     }
13842                     break;
13843
13844                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13845                     {
13846                         assert(aflags & CORINFO_ACCESS_GET);
13847 #if BIGENDIAN
13848                         op1 = gtNewIconNode(0, lclTyp);
13849 #else
13850                         op1                     = gtNewIconNode(1, lclTyp);
13851 #endif
13852                         goto FIELD_DONE;
13853                     }
13854                     break;
13855
13856                     default:
13857                         assert(!"Unexpected fieldAccessor");
13858                 }
13859
13860                 if (!isLoadAddress)
13861                 {
13862
13863                     if (prefixFlags & PREFIX_VOLATILE)
13864                     {
13865                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13866                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13867
13868                         if (!usesHelper)
13869                         {
13870                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13871                                    (op1->OperGet() == GT_OBJ));
13872                             op1->gtFlags |= GTF_IND_VOLATILE;
13873                         }
13874                     }
13875
13876                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13877                     {
13878                         if (!usesHelper)
13879                         {
13880                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13881                                    (op1->OperGet() == GT_OBJ));
13882                             op1->gtFlags |= GTF_IND_UNALIGNED;
13883                         }
13884                     }
13885                 }
13886
13887                 /* Check if the class needs explicit initialization */
13888
13889                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13890                 {
13891                     GenTree* helperNode = impInitClass(&resolvedToken);
13892                     if (compDonotInline())
13893                     {
13894                         return;
13895                     }
13896                     if (helperNode != nullptr)
13897                     {
13898                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13899                     }
13900                 }
13901
13902             FIELD_DONE:
13903                 impPushOnStack(op1, tiRetVal);
13904             }
13905             break;
13906
13907             case CEE_STFLD:
13908             case CEE_STSFLD:
13909             {
13910
13911                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13912
13913                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13914
13915                 /* Get the CP_Fieldref index */
13916
13917                 assertImp(sz == sizeof(unsigned));
13918
13919                 _impResolveToken(CORINFO_TOKENKIND_Field);
13920
13921                 JITDUMP(" %08X", resolvedToken.token);
13922
13923                 int       aflags = CORINFO_ACCESS_SET;
13924                 GenTree*  obj    = nullptr;
13925                 typeInfo* tiObj  = nullptr;
13926                 typeInfo  tiVal;
13927
13928                 /* Pull the value from the stack */
13929                 StackEntry se = impPopStack();
13930                 op2           = se.val;
13931                 tiVal         = se.seTypeInfo;
13932                 clsHnd        = tiVal.GetClassHandle();
13933
13934                 if (opcode == CEE_STFLD)
13935                 {
13936                     tiObj = &impStackTop().seTypeInfo;
13937                     obj   = impPopStack().val;
13938
13939                     if (impIsThis(obj))
13940                     {
13941                         aflags |= CORINFO_ACCESS_THIS;
13942
13943                         // An optimization for Contextful classes:
13944                         // we unwrap the proxy when we have a 'this reference'
13945
13946                         if (info.compUnwrapContextful)
13947                         {
13948                             aflags |= CORINFO_ACCESS_UNWRAP;
13949                         }
13950                     }
13951                 }
13952
13953                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13954
13955                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13956                 // handle
13957                 CorInfoType ciType = fieldInfo.fieldType;
13958                 fieldClsHnd        = fieldInfo.structType;
13959
13960                 lclTyp = JITtype2varType(ciType);
13961
13962                 if (compIsForInlining())
13963                 {
13964                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13965                      * per-inst static? */
13966
13967                     switch (fieldInfo.fieldAccessor)
13968                     {
13969                         case CORINFO_FIELD_INSTANCE_HELPER:
13970                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13971                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13972                         case CORINFO_FIELD_STATIC_TLS:
13973
13974                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13975                             return;
13976
13977                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13978                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13979                             /* We may be able to inline the field accessors in specific instantiations of generic
13980                              * methods */
13981                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13982                             return;
13983
13984                         default:
13985                             break;
13986                     }
13987                 }
13988
13989                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13990
13991                 if (tiVerificationNeeded)
13992                 {
13993                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13994                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13995                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13996                 }
13997                 else
13998                 {
13999                     // tiVerificationNeed is false.
14000                     // Raise InvalidProgramException if static store accesses non-static field
14001                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14002                     {
14003                         BADCODE("static access on an instance field");
14004                     }
14005                 }
14006
14007                 // We are using stfld on a static field.
14008                 // We allow it, but need to eval any side-effects for obj
14009                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14010                 {
14011                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14012                     {
14013                         obj = gtUnusedValNode(obj);
14014                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14015                     }
14016                     obj = nullptr;
14017                 }
14018
14019                 /* Preserve 'small' int types */
14020                 if (!varTypeIsSmall(lclTyp))
14021                 {
14022                     lclTyp = genActualType(lclTyp);
14023                 }
14024
14025                 switch (fieldInfo.fieldAccessor)
14026                 {
14027                     case CORINFO_FIELD_INSTANCE:
14028 #ifdef FEATURE_READYTORUN_COMPILER
14029                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14030 #endif
14031                     {
14032                         obj = impCheckForNullPointer(obj);
14033
14034                         /* Create the data member node */
14035                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14036                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14037                         if (StructHasOverlappingFields(typeFlags))
14038                         {
14039                             op1->gtField.gtFldMayOverlap = true;
14040                         }
14041
14042 #ifdef FEATURE_READYTORUN_COMPILER
14043                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14044                         {
14045                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14046                         }
14047 #endif
14048
14049                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14050
14051                         if (fgAddrCouldBeNull(obj))
14052                         {
14053                             op1->gtFlags |= GTF_EXCEPT;
14054                         }
14055
14056                         // If gtFldObj is a BYREF then our target is a value class and
14057                         // it could point anywhere, example a boxed class static int
14058                         if (obj->gtType == TYP_BYREF)
14059                         {
14060                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14061                         }
14062
14063                         if (compIsForInlining() &&
14064                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14065                         {
14066                             impInlineInfo->thisDereferencedFirst = true;
14067                         }
14068                     }
14069                     break;
14070
14071                     case CORINFO_FIELD_STATIC_TLS:
14072 #ifdef _TARGET_X86_
14073                         // Legacy TLS access is implemented as intrinsic on x86 only
14074
14075                         /* Create the data member node */
14076                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14077                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14078
14079                         break;
14080 #else
14081                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14082
14083                         __fallthrough;
14084 #endif
14085
14086                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14087                     case CORINFO_FIELD_INSTANCE_HELPER:
14088                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14089                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14090                                                clsHnd, op2);
14091                         goto SPILL_APPEND;
14092
14093                     case CORINFO_FIELD_STATIC_ADDRESS:
14094                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14095                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14096                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14097                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14098                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14099                                                          lclTyp);
14100                         break;
14101
14102                     default:
14103                         assert(!"Unexpected fieldAccessor");
14104                 }
14105
14106                 // Create the member assignment, unless we have a struct.
14107                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14108                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14109
14110                 if (!deferStructAssign)
14111                 {
14112                     if (prefixFlags & PREFIX_VOLATILE)
14113                     {
14114                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14115                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14116                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14117                         op1->gtFlags |= GTF_IND_VOLATILE;
14118                     }
14119                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14120                     {
14121                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14122                         op1->gtFlags |= GTF_IND_UNALIGNED;
14123                     }
14124
14125                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14126                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14127                        importation and reads from the union as if it were a long during code generation. Though this
14128                        can potentially read garbage, one can get lucky to have this working correctly.
14129
14130                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14131                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14132                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14133                        it works correctly always.
14134
14135                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14136                        for V4.0.
14137                     */
14138                     CLANG_FORMAT_COMMENT_ANCHOR;
14139
14140 #ifndef _TARGET_64BIT_
14141                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14142                     // generated for ARM as well as x86, so the following IR will be accepted:
14143                     //     *  STMT      void
14144                     //         |  /--*  CNS_INT   int    2
14145                     //         \--*  ASG       long
14146                     //            \--*  CLS_VAR   long
14147
14148                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14149                         varTypeIsLong(op1->TypeGet()))
14150                     {
14151                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14152                     }
14153 #endif
14154
14155 #ifdef _TARGET_64BIT_
14156                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14157                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14158                     {
14159                         op2->gtType = TYP_I_IMPL;
14160                     }
14161                     else
14162                     {
14163                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14164                         //
14165                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14166                         {
14167                             op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14168                         }
14169                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14170                         //
14171                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14172                         {
14173                             op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14174                         }
14175                     }
14176 #endif
14177
14178 #if !FEATURE_X87_DOUBLES
14179                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14180                     // We insert a cast to the dest 'op1' type
14181                     //
14182                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14183                         varTypeIsFloating(op2->gtType))
14184                     {
14185                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14186                     }
14187 #endif // !FEATURE_X87_DOUBLES
14188
14189                     op1 = gtNewAssignNode(op1, op2);
14190
14191                     /* Mark the expression as containing an assignment */
14192
14193                     op1->gtFlags |= GTF_ASG;
14194                 }
14195
14196                 /* Check if the class needs explicit initialization */
14197
14198                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14199                 {
14200                     GenTree* helperNode = impInitClass(&resolvedToken);
14201                     if (compDonotInline())
14202                     {
14203                         return;
14204                     }
14205                     if (helperNode != nullptr)
14206                     {
14207                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14208                     }
14209                 }
14210
14211                 /* stfld can interfere with value classes (consider the sequence
14212                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14213                    spill all value class references from the stack. */
14214
14215                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14216                 {
14217                     assert(tiObj);
14218
14219                     if (impIsValueType(tiObj))
14220                     {
14221                         impSpillEvalStack();
14222                     }
14223                     else
14224                     {
14225                         impSpillValueClasses();
14226                     }
14227                 }
14228
14229                 /* Spill any refs to the same member from the stack */
14230
14231                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14232
14233                 /* stsfld also interferes with indirect accesses (for aliased
14234                    statics) and calls. But don't need to spill other statics
14235                    as we have explicitly spilled this particular static field. */
14236
14237                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14238
14239                 if (deferStructAssign)
14240                 {
14241                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14242                 }
14243             }
14244                 goto APPEND;
14245
14246             case CEE_NEWARR:
14247             {
14248
14249                 /* Get the class type index operand */
14250
14251                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14252
14253                 JITDUMP(" %08X", resolvedToken.token);
14254
14255                 if (!opts.IsReadyToRun())
14256                 {
14257                     // Need to restore array classes before creating array objects on the heap
14258                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14259                     if (op1 == nullptr)
14260                     { // compDonotInline()
14261                         return;
14262                     }
14263                 }
14264
14265                 if (tiVerificationNeeded)
14266                 {
14267                     // As per ECMA 'numElems' specified can be either int32 or native int.
14268                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14269
14270                     CORINFO_CLASS_HANDLE elemTypeHnd;
14271                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14272                     Verify(elemTypeHnd == nullptr ||
14273                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14274                            "array of byref-like type");
14275                 }
14276
14277                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14278
14279                 accessAllowedResult =
14280                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14281                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14282
14283                 /* Form the arglist: array class handle, size */
14284                 op2 = impPopStack().val;
14285                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14286
14287 #ifdef FEATURE_READYTORUN_COMPILER
14288                 if (opts.IsReadyToRun())
14289                 {
14290                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14291                                                     gtNewArgList(op2));
14292                     usingReadyToRunHelper = (op1 != nullptr);
14293
14294                     if (!usingReadyToRunHelper)
14295                     {
14296                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14297                         // and the newarr call with a single call to a dynamic R2R cell that will:
14298                         //      1) Load the context
14299                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14300                         //      3) Allocate the new array
14301                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14302
14303                         // Need to restore array classes before creating array objects on the heap
14304                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14305                         if (op1 == nullptr)
14306                         { // compDonotInline()
14307                             return;
14308                         }
14309                     }
14310                 }
14311
14312                 if (!usingReadyToRunHelper)
14313 #endif
14314                 {
14315                     args = gtNewArgList(op1, op2);
14316
14317                     /* Create a call to 'new' */
14318
14319                     // Note that this only works for shared generic code because the same helper is used for all
14320                     // reference array types
14321                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14322                 }
14323
14324                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14325
14326                 /* Remember that this basic block contains 'new' of an sd array */
14327
14328                 block->bbFlags |= BBF_HAS_NEWARRAY;
14329                 optMethodFlags |= OMF_HAS_NEWARRAY;
14330
14331                 /* Push the result of the call on the stack */
14332
14333                 impPushOnStack(op1, tiRetVal);
14334
14335                 callTyp = TYP_REF;
14336             }
14337             break;
14338
14339             case CEE_LOCALLOC:
14340                 if (tiVerificationNeeded)
14341                 {
14342                     Verify(false, "bad opcode");
14343                 }
14344
14345                 // We don't allow locallocs inside handlers
14346                 if (block->hasHndIndex())
14347                 {
14348                     BADCODE("Localloc can't be inside handler");
14349                 }
14350
14351                 setNeedsGSSecurityCookie();
14352
14353                 // Get the size to allocate
14354
14355                 op2 = impPopStack().val;
14356                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14357
14358                 if (verCurrentState.esStackDepth != 0)
14359                 {
14360                     BADCODE("Localloc can only be used when the stack is empty");
14361                 }
14362
14363                 // If the localloc is not in a loop and its size is a small constant,
14364                 // create a new local var of TYP_BLK and return its address.
14365                 {
14366                     bool convertedToLocal = false;
14367
14368                     // Need to aggressively fold here, as even fixed-size locallocs
14369                     // will have casts in the way.
14370                     op2 = gtFoldExpr(op2);
14371
14372                     if (op2->IsIntegralConst())
14373                     {
14374                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14375
14376                         if (allocSize == 0)
14377                         {
14378                             // Result is nullptr
14379                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14380                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14381                             convertedToLocal = true;
14382                         }
14383                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14384                         {
14385                             // Get the size threshold for local conversion
14386                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14387
14388 #ifdef DEBUG
14389                             // Optionally allow this to be modified
14390                             maxSize = JitConfig.JitStackAllocToLocalSize();
14391 #endif // DEBUG
14392
14393                             if (allocSize <= maxSize)
14394                             {
14395                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14396                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14397                                         stackallocAsLocal);
14398                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14399                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14400                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14401                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14402                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14403                                 convertedToLocal         = true;
14404                                 compGSReorderStackLayout = true;
14405                             }
14406                         }
14407                     }
14408
14409                     if (!convertedToLocal)
14410                     {
14411                         // Bail out if inlining and the localloc was not converted.
14412                         //
14413                         // Note we might consider allowing the inline, if the call
14414                         // site is not in a loop.
14415                         if (compIsForInlining())
14416                         {
14417                             InlineObservation obs = op2->IsIntegralConst()
14418                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14419                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14420                             compInlineResult->NoteFatal(obs);
14421                             return;
14422                         }
14423
14424                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14425                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14426                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14427
14428                         /* The FP register may not be back to the original value at the end
14429                            of the method, even if the frame size is 0, as localloc may
14430                            have modified it. So we will HAVE to reset it */
14431                         compLocallocUsed = true;
14432                     }
14433                     else
14434                     {
14435                         compLocallocOptimized = true;
14436                     }
14437                 }
14438
14439                 impPushOnStack(op1, tiRetVal);
14440                 break;
14441
14442             case CEE_ISINST:
14443             {
14444                 /* Get the type token */
14445                 assertImp(sz == sizeof(unsigned));
14446
14447                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14448
14449                 JITDUMP(" %08X", resolvedToken.token);
14450
14451                 if (!opts.IsReadyToRun())
14452                 {
14453                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14454                     if (op2 == nullptr)
14455                     { // compDonotInline()
14456                         return;
14457                     }
14458                 }
14459
14460                 if (tiVerificationNeeded)
14461                 {
14462                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14463                     // Even if this is a value class, we know it is boxed.
14464                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14465                 }
14466                 accessAllowedResult =
14467                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14468                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14469
14470                 op1 = impPopStack().val;
14471
14472                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14473
14474                 if (optTree != nullptr)
14475                 {
14476                     impPushOnStack(optTree, tiRetVal);
14477                 }
14478                 else
14479                 {
14480
14481 #ifdef FEATURE_READYTORUN_COMPILER
14482                     if (opts.IsReadyToRun())
14483                     {
14484                         GenTreeCall* opLookup =
14485                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14486                                                       gtNewArgList(op1));
14487                         usingReadyToRunHelper = (opLookup != nullptr);
14488                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14489
14490                         if (!usingReadyToRunHelper)
14491                         {
14492                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14493                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14494                             //      1) Load the context
14495                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14496                             //      stub
14497                             //      3) Perform the 'is instance' check on the input object
14498                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14499
14500                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14501                             if (op2 == nullptr)
14502                             { // compDonotInline()
14503                                 return;
14504                             }
14505                         }
14506                     }
14507
14508                     if (!usingReadyToRunHelper)
14509 #endif
14510                     {
14511                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14512                     }
14513                     if (compDonotInline())
14514                     {
14515                         return;
14516                     }
14517
14518                     impPushOnStack(op1, tiRetVal);
14519                 }
14520                 break;
14521             }
14522
14523             case CEE_REFANYVAL:
14524
14525                 // get the class handle and make a ICON node out of it
14526
14527                 _impResolveToken(CORINFO_TOKENKIND_Class);
14528
14529                 JITDUMP(" %08X", resolvedToken.token);
14530
14531                 op2 = impTokenToHandle(&resolvedToken);
14532                 if (op2 == nullptr)
14533                 { // compDonotInline()
14534                     return;
14535                 }
14536
14537                 if (tiVerificationNeeded)
14538                 {
14539                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14540                            "need refany");
14541                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14542                 }
14543
14544                 op1 = impPopStack().val;
14545                 // make certain it is normalized;
14546                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14547
14548                 // Call helper GETREFANY(classHandle, op1);
14549                 args = gtNewArgList(op2, op1);
14550                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14551
14552                 impPushOnStack(op1, tiRetVal);
14553                 break;
14554
14555             case CEE_REFANYTYPE:
14556
14557                 if (tiVerificationNeeded)
14558                 {
14559                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14560                            "need refany");
14561                 }
14562
14563                 op1 = impPopStack().val;
14564
14565                 // make certain it is normalized;
14566                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14567
14568                 if (op1->gtOper == GT_OBJ)
14569                 {
14570                     // Get the address of the refany
14571                     op1 = op1->gtOp.gtOp1;
14572
14573                     // Fetch the type from the correct slot
14574                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14575                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14576                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14577                 }
14578                 else
14579                 {
14580                     assertImp(op1->gtOper == GT_MKREFANY);
14581
14582                     // The pointer may have side-effects
14583                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14584                     {
14585                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14586 #ifdef DEBUG
14587                         impNoteLastILoffs();
14588 #endif
14589                     }
14590
14591                     // We already have the class handle
14592                     op1 = op1->gtOp.gtOp2;
14593                 }
14594
14595                 // convert native TypeHandle to RuntimeTypeHandle
14596                 {
14597                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14598
14599                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14600
14601                     // The handle struct is returned in register
14602                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14603
14604                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14605                 }
14606
14607                 impPushOnStack(op1, tiRetVal);
14608                 break;
14609
14610             case CEE_LDTOKEN:
14611             {
14612                 /* Get the Class index */
14613                 assertImp(sz == sizeof(unsigned));
14614                 lastLoadToken = codeAddr;
14615                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14616
14617                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14618
14619                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14620                 if (op1 == nullptr)
14621                 { // compDonotInline()
14622                     return;
14623                 }
14624
14625                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14626                 assert(resolvedToken.hClass != nullptr);
14627
14628                 if (resolvedToken.hMethod != nullptr)
14629                 {
14630                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14631                 }
14632                 else if (resolvedToken.hField != nullptr)
14633                 {
14634                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14635                 }
14636
14637                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14638
14639                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14640
14641                 // The handle struct is returned in register
14642                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14643
14644                 tiRetVal = verMakeTypeInfo(tokenType);
14645                 impPushOnStack(op1, tiRetVal);
14646             }
14647             break;
14648
14649             case CEE_UNBOX:
14650             case CEE_UNBOX_ANY:
14651             {
14652                 /* Get the Class index */
14653                 assertImp(sz == sizeof(unsigned));
14654
14655                 _impResolveToken(CORINFO_TOKENKIND_Class);
14656
14657                 JITDUMP(" %08X", resolvedToken.token);
14658
14659                 BOOL runtimeLookup;
14660                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14661                 if (op2 == nullptr)
14662                 {
14663                     assert(compDonotInline());
14664                     return;
14665                 }
14666
14667                 // Run this always so we can get access exceptions even with SkipVerification.
14668                 accessAllowedResult =
14669                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14670                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14671
14672                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14673                 {
14674                     if (tiVerificationNeeded)
14675                     {
14676                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14677                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14678                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14679                         tiRetVal.NormaliseForStack();
14680                     }
14681                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14682                     op1 = impPopStack().val;
14683                     goto CASTCLASS;
14684                 }
14685
14686                 /* Pop the object and create the unbox helper call */
14687                 /* You might think that for UNBOX_ANY we need to push a different */
14688                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14689                 /* for the intermediate pointer which we then transfer onto the OBJ */
14690                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14691                 if (tiVerificationNeeded)
14692                 {
14693                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14694                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14695
14696                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14697                     Verify(tiRetVal.IsValueClass(), "not value class");
14698                     tiRetVal.MakeByRef();
14699
14700                     // We always come from an objref, so this is safe byref
14701                     tiRetVal.SetIsPermanentHomeByRef();
14702                     tiRetVal.SetIsReadonlyByRef();
14703                 }
14704
14705                 op1 = impPopStack().val;
14706                 assertImp(op1->gtType == TYP_REF);
14707
14708                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14709                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14710
14711                 // Check legality and profitability of inline expansion for unboxing.
14712                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14713                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14714
14715                 if (canExpandInline && shouldExpandInline)
14716                 {
14717                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14718                     // we are doing normal unboxing
14719                     // inline the common case of the unbox helper
14720                     // UNBOX(exp) morphs into
14721                     // clone = pop(exp);
14722                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14723                     // push(clone + TARGET_POINTER_SIZE)
14724                     //
14725                     GenTree* cloneOperand;
14726                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14727                                        nullptr DEBUGARG("inline UNBOX clone1"));
14728                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14729
14730                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14731
14732                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14733                                        nullptr DEBUGARG("inline UNBOX clone2"));
14734                     op2 = impTokenToHandle(&resolvedToken);
14735                     if (op2 == nullptr)
14736                     { // compDonotInline()
14737                         return;
14738                     }
14739                     args = gtNewArgList(op2, op1);
14740                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14741
14742                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14743                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14744                     condBox->gtFlags |= GTF_RELOP_QMARK;
14745
14746                     // QMARK nodes cannot reside on the evaluation stack. Because there
14747                     // may be other trees on the evaluation stack that side-effect the
14748                     // sources of the UNBOX operation we must spill the stack.
14749
14750                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14751
14752                     // Create the address-expression to reference past the object header
14753                     // to the beginning of the value-type. Today this means adjusting
14754                     // past the base of the objects vtable field which is pointer sized.
14755
14756                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14757                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14758                 }
14759                 else
14760                 {
14761                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14762                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14763
14764                     // Don't optimize, just call the helper and be done with it
14765                     args = gtNewArgList(op2, op1);
14766                     op1 =
14767                         gtNewHelperCallNode(helper,
14768                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14769                 }
14770
14771                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14772                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14773                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14774                        );
14775
14776                 /*
14777                   ----------------------------------------------------------------------
14778                   | \ helper  |                         |                              |
14779                   |   \       |                         |                              |
14780                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14781                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14782                   | opcode  \ |                         |                              |
14783                   |---------------------------------------------------------------------
14784                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14785                   |           |                         | push the BYREF to this local |
14786                   |---------------------------------------------------------------------
14787                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14788                   |           | the BYREF               | For Linux when the           |
14789                   |           |                         |  struct is returned in two   |
14790                   |           |                         |  registers create a temp     |
14791                   |           |                         |  which address is passed to  |
14792                   |           |                         |  the unbox_nullable helper.  |
14793                   |---------------------------------------------------------------------
14794                 */
14795
14796                 if (opcode == CEE_UNBOX)
14797                 {
14798                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14799                     {
14800                         // Unbox nullable helper returns a struct type.
14801                         // We need to spill it to a temp so than can take the address of it.
14802                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14803                         // further along and potetially be exploitable.
14804
14805                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14806                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14807
14808                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14809                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14810                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14811
14812                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14813                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14814                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14815                     }
14816
14817                     assert(op1->gtType == TYP_BYREF);
14818                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14819                 }
14820                 else
14821                 {
14822                     assert(opcode == CEE_UNBOX_ANY);
14823
14824                     if (helper == CORINFO_HELP_UNBOX)
14825                     {
14826                         // Normal unbox helper returns a TYP_BYREF.
14827                         impPushOnStack(op1, tiRetVal);
14828                         oper = GT_OBJ;
14829                         goto OBJ;
14830                     }
14831
14832                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14833
14834 #if FEATURE_MULTIREG_RET
14835
14836                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14837                     {
14838                         // Unbox nullable helper returns a TYP_STRUCT.
14839                         // For the multi-reg case we need to spill it to a temp so that
14840                         // we can pass the address to the unbox_nullable jit helper.
14841
14842                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14843                         lvaTable[tmp].lvIsMultiRegArg = true;
14844                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14845
14846                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14847                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14848                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14849
14850                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14851                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14852                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14853
14854                         // In this case the return value of the unbox helper is TYP_BYREF.
14855                         // Make sure the right type is placed on the operand type stack.
14856                         impPushOnStack(op1, tiRetVal);
14857
14858                         // Load the struct.
14859                         oper = GT_OBJ;
14860
14861                         assert(op1->gtType == TYP_BYREF);
14862                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14863
14864                         goto OBJ;
14865                     }
14866                     else
14867
14868 #endif // !FEATURE_MULTIREG_RET
14869
14870                     {
14871                         // If non register passable struct we have it materialized in the RetBuf.
14872                         assert(op1->gtType == TYP_STRUCT);
14873                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14874                         assert(tiRetVal.IsValueClass());
14875                     }
14876                 }
14877
14878                 impPushOnStack(op1, tiRetVal);
14879             }
14880             break;
14881
14882             case CEE_BOX:
14883             {
14884                 /* Get the Class index */
14885                 assertImp(sz == sizeof(unsigned));
14886
14887                 _impResolveToken(CORINFO_TOKENKIND_Box);
14888
14889                 JITDUMP(" %08X", resolvedToken.token);
14890
14891                 if (tiVerificationNeeded)
14892                 {
14893                     typeInfo tiActual = impStackTop().seTypeInfo;
14894                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14895
14896                     Verify(verIsBoxable(tiBox), "boxable type expected");
14897
14898                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14899                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14900                            "boxed type has unsatisfied class constraints");
14901
14902                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14903
14904                     // Observation: the following code introduces a boxed value class on the stack, but,
14905                     // according to the ECMA spec, one would simply expect: tiRetVal =
14906                     // typeInfo(TI_REF,impGetObjectClass());
14907
14908                     // Push the result back on the stack,
14909                     // even if clsHnd is a value class we want the TI_REF
14910                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14911                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14912                 }
14913
14914                 accessAllowedResult =
14915                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14916                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14917
14918                 // Note BOX can be used on things that are not value classes, in which
14919                 // case we get a NOP.  However the verifier's view of the type on the
14920                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14921                 if (!eeIsValueClass(resolvedToken.hClass))
14922                 {
14923                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14924                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14925                     break;
14926                 }
14927
14928                 // Look ahead for unbox.any
14929                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14930                 {
14931                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14932
14933                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14934
14935                     // See if the resolved tokens describe types that are equal.
14936                     const TypeCompareState compare =
14937                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14938
14939                     // If so, box/unbox.any is a nop.
14940                     if (compare == TypeCompareState::Must)
14941                     {
14942                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14943                         // Skip the next unbox.any instruction
14944                         sz += sizeof(mdToken) + 1;
14945                         break;
14946                     }
14947                 }
14948
14949                 impImportAndPushBox(&resolvedToken);
14950                 if (compDonotInline())
14951                 {
14952                     return;
14953                 }
14954             }
14955             break;
14956
14957             case CEE_SIZEOF:
14958
14959                 /* Get the Class index */
14960                 assertImp(sz == sizeof(unsigned));
14961
14962                 _impResolveToken(CORINFO_TOKENKIND_Class);
14963
14964                 JITDUMP(" %08X", resolvedToken.token);
14965
14966                 if (tiVerificationNeeded)
14967                 {
14968                     tiRetVal = typeInfo(TI_INT);
14969                 }
14970
14971                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14972                 impPushOnStack(op1, tiRetVal);
14973                 break;
14974
14975             case CEE_CASTCLASS:
14976
14977                 /* Get the Class index */
14978
14979                 assertImp(sz == sizeof(unsigned));
14980
14981                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14982
14983                 JITDUMP(" %08X", resolvedToken.token);
14984
14985                 if (!opts.IsReadyToRun())
14986                 {
14987                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14988                     if (op2 == nullptr)
14989                     { // compDonotInline()
14990                         return;
14991                     }
14992                 }
14993
14994                 if (tiVerificationNeeded)
14995                 {
14996                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14997                     // box it
14998                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14999                 }
15000
15001                 accessAllowedResult =
15002                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15003                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15004
15005                 op1 = impPopStack().val;
15006
15007             /* Pop the address and create the 'checked cast' helper call */
15008
15009             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15010             // and op2 to contain code that creates the type handle corresponding to typeRef
15011             CASTCLASS:
15012             {
15013                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15014
15015                 if (optTree != nullptr)
15016                 {
15017                     impPushOnStack(optTree, tiRetVal);
15018                 }
15019                 else
15020                 {
15021
15022 #ifdef FEATURE_READYTORUN_COMPILER
15023                     if (opts.IsReadyToRun())
15024                     {
15025                         GenTreeCall* opLookup =
15026                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15027                                                       gtNewArgList(op1));
15028                         usingReadyToRunHelper = (opLookup != nullptr);
15029                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15030
15031                         if (!usingReadyToRunHelper)
15032                         {
15033                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15034                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15035                             //      1) Load the context
15036                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15037                             //      stub
15038                             //      3) Check the object on the stack for the type-cast
15039                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15040
15041                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15042                             if (op2 == nullptr)
15043                             { // compDonotInline()
15044                                 return;
15045                             }
15046                         }
15047                     }
15048
15049                     if (!usingReadyToRunHelper)
15050 #endif
15051                     {
15052                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15053                     }
15054                     if (compDonotInline())
15055                     {
15056                         return;
15057                     }
15058
15059                     /* Push the result back on the stack */
15060                     impPushOnStack(op1, tiRetVal);
15061                 }
15062             }
15063             break;
15064
15065             case CEE_THROW:
15066
15067                 if (compIsForInlining())
15068                 {
15069                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15070                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15071                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15072
15073                     /* Do we have just the exception on the stack ?*/
15074
15075                     if (verCurrentState.esStackDepth != 1)
15076                     {
15077                         /* if not, just don't inline the method */
15078
15079                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15080                         return;
15081                     }
15082                 }
15083
15084                 if (tiVerificationNeeded)
15085                 {
15086                     tiRetVal = impStackTop().seTypeInfo;
15087                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15088                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15089                     {
15090                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15091                     }
15092                 }
15093
15094                 block->bbSetRunRarely(); // any block with a throw is rare
15095                 /* Pop the exception object and create the 'throw' helper call */
15096
15097                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15098
15099             EVAL_APPEND:
15100                 if (verCurrentState.esStackDepth > 0)
15101                 {
15102                     impEvalSideEffects();
15103                 }
15104
15105                 assert(verCurrentState.esStackDepth == 0);
15106
15107                 goto APPEND;
15108
15109             case CEE_RETHROW:
15110
15111                 assert(!compIsForInlining());
15112
15113                 if (info.compXcptnsCount == 0)
15114                 {
15115                     BADCODE("rethrow outside catch");
15116                 }
15117
15118                 if (tiVerificationNeeded)
15119                 {
15120                     Verify(block->hasHndIndex(), "rethrow outside catch");
15121                     if (block->hasHndIndex())
15122                     {
15123                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15124                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15125                         if (HBtab->HasFilter())
15126                         {
15127                             // we better be in the handler clause part, not the filter part
15128                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15129                                    "rethrow in filter");
15130                         }
15131                     }
15132                 }
15133
15134                 /* Create the 'rethrow' helper call */
15135
15136                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15137
15138                 goto EVAL_APPEND;
15139
15140             case CEE_INITOBJ:
15141
15142                 assertImp(sz == sizeof(unsigned));
15143
15144                 _impResolveToken(CORINFO_TOKENKIND_Class);
15145
15146                 JITDUMP(" %08X", resolvedToken.token);
15147
15148                 if (tiVerificationNeeded)
15149                 {
15150                     typeInfo tiTo    = impStackTop().seTypeInfo;
15151                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15152
15153                     Verify(tiTo.IsByRef(), "byref expected");
15154                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15155
15156                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15157                            "type operand incompatible with type of address");
15158                 }
15159
15160                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15161                 op2  = gtNewIconNode(0);                                     // Value
15162                 op1  = impPopStack().val;                                    // Dest
15163                 op1  = gtNewBlockVal(op1, size);
15164                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15165                 goto SPILL_APPEND;
15166
15167             case CEE_INITBLK:
15168
15169                 if (tiVerificationNeeded)
15170                 {
15171                     Verify(false, "bad opcode");
15172                 }
15173
15174                 op3 = impPopStack().val; // Size
15175                 op2 = impPopStack().val; // Value
15176                 op1 = impPopStack().val; // Dest
15177
15178                 if (op3->IsCnsIntOrI())
15179                 {
15180                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15181                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15182                 }
15183                 else
15184                 {
15185                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15186                     size = 0;
15187                 }
15188                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15189
15190                 goto SPILL_APPEND;
15191
15192             case CEE_CPBLK:
15193
15194                 if (tiVerificationNeeded)
15195                 {
15196                     Verify(false, "bad opcode");
15197                 }
15198                 op3 = impPopStack().val; // Size
15199                 op2 = impPopStack().val; // Src
15200                 op1 = impPopStack().val; // Dest
15201
15202                 if (op3->IsCnsIntOrI())
15203                 {
15204                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15205                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15206                 }
15207                 else
15208                 {
15209                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15210                     size = 0;
15211                 }
15212                 if (op2->OperGet() == GT_ADDR)
15213                 {
15214                     op2 = op2->gtOp.gtOp1;
15215                 }
15216                 else
15217                 {
15218                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15219                 }
15220
15221                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15222                 goto SPILL_APPEND;
15223
15224             case CEE_CPOBJ:
15225
15226                 assertImp(sz == sizeof(unsigned));
15227
15228                 _impResolveToken(CORINFO_TOKENKIND_Class);
15229
15230                 JITDUMP(" %08X", resolvedToken.token);
15231
15232                 if (tiVerificationNeeded)
15233                 {
15234                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15235                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15236                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15237
15238                     Verify(tiFrom.IsByRef(), "expected byref source");
15239                     Verify(tiTo.IsByRef(), "expected byref destination");
15240
15241                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15242                            "type of source address incompatible with type operand");
15243                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15244                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15245                            "type operand incompatible with type of destination address");
15246                 }
15247
15248                 if (!eeIsValueClass(resolvedToken.hClass))
15249                 {
15250                     op1 = impPopStack().val; // address to load from
15251
15252                     impBashVarAddrsToI(op1);
15253
15254                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15255
15256                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15257                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15258
15259                     impPushOnStack(op1, typeInfo());
15260                     opcode = CEE_STIND_REF;
15261                     lclTyp = TYP_REF;
15262                     goto STIND_POST_VERIFY;
15263                 }
15264
15265                 op2 = impPopStack().val; // Src
15266                 op1 = impPopStack().val; // Dest
15267                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15268                 goto SPILL_APPEND;
15269
15270             case CEE_STOBJ:
15271             {
15272                 assertImp(sz == sizeof(unsigned));
15273
15274                 _impResolveToken(CORINFO_TOKENKIND_Class);
15275
15276                 JITDUMP(" %08X", resolvedToken.token);
15277
15278                 if (eeIsValueClass(resolvedToken.hClass))
15279                 {
15280                     lclTyp = TYP_STRUCT;
15281                 }
15282                 else
15283                 {
15284                     lclTyp = TYP_REF;
15285                 }
15286
15287                 if (tiVerificationNeeded)
15288                 {
15289
15290                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15291
15292                     // Make sure we have a good looking byref
15293                     Verify(tiPtr.IsByRef(), "pointer not byref");
15294                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15295                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15296                     {
15297                         compUnsafeCastUsed = true;
15298                     }
15299
15300                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15301                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15302
15303                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15304                     {
15305                         Verify(false, "type of value incompatible with type operand");
15306                         compUnsafeCastUsed = true;
15307                     }
15308
15309                     if (!tiCompatibleWith(argVal, ptrVal, false))
15310                     {
15311                         Verify(false, "type operand incompatible with type of address");
15312                         compUnsafeCastUsed = true;
15313                     }
15314                 }
15315                 else
15316                 {
15317                     compUnsafeCastUsed = true;
15318                 }
15319
15320                 if (lclTyp == TYP_REF)
15321                 {
15322                     opcode = CEE_STIND_REF;
15323                     goto STIND_POST_VERIFY;
15324                 }
15325
15326                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15327                 if (impIsPrimitive(jitTyp))
15328                 {
15329                     lclTyp = JITtype2varType(jitTyp);
15330                     goto STIND_POST_VERIFY;
15331                 }
15332
15333                 op2 = impPopStack().val; // Value
15334                 op1 = impPopStack().val; // Ptr
15335
15336                 assertImp(varTypeIsStruct(op2));
15337
15338                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15339
15340                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15341                 {
15342                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15343                 }
15344                 goto SPILL_APPEND;
15345             }
15346
15347             case CEE_MKREFANY:
15348
15349                 assert(!compIsForInlining());
15350
15351                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15352                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15353
15354                 JITDUMP("disabling struct promotion because of mkrefany\n");
15355                 fgNoStructPromotion = true;
15356
15357                 oper = GT_MKREFANY;
15358                 assertImp(sz == sizeof(unsigned));
15359
15360                 _impResolveToken(CORINFO_TOKENKIND_Class);
15361
15362                 JITDUMP(" %08X", resolvedToken.token);
15363
15364                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15365                 if (op2 == nullptr)
15366                 { // compDonotInline()
15367                     return;
15368                 }
15369
15370                 if (tiVerificationNeeded)
15371                 {
15372                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15373                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15374
15375                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15376                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15377                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15378                 }
15379
15380                 accessAllowedResult =
15381                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15382                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15383
15384                 op1 = impPopStack().val;
15385
15386                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15387                 // But JIT32 allowed it, so we continue to allow it.
15388                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15389
15390                 // MKREFANY returns a struct.  op2 is the class token.
15391                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15392
15393                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15394                 break;
15395
15396             case CEE_LDOBJ:
15397             {
15398                 oper = GT_OBJ;
15399                 assertImp(sz == sizeof(unsigned));
15400
15401                 _impResolveToken(CORINFO_TOKENKIND_Class);
15402
15403                 JITDUMP(" %08X", resolvedToken.token);
15404
15405             OBJ:
15406
15407                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15408
15409                 if (tiVerificationNeeded)
15410                 {
15411                     typeInfo tiPtr = impStackTop().seTypeInfo;
15412
15413                     // Make sure we have a byref
15414                     if (!tiPtr.IsByRef())
15415                     {
15416                         Verify(false, "pointer not byref");
15417                         compUnsafeCastUsed = true;
15418                     }
15419                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15420
15421                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15422                     {
15423                         Verify(false, "type of address incompatible with type operand");
15424                         compUnsafeCastUsed = true;
15425                     }
15426                     tiRetVal.NormaliseForStack();
15427                 }
15428                 else
15429                 {
15430                     compUnsafeCastUsed = true;
15431                 }
15432
15433                 if (eeIsValueClass(resolvedToken.hClass))
15434                 {
15435                     lclTyp = TYP_STRUCT;
15436                 }
15437                 else
15438                 {
15439                     lclTyp = TYP_REF;
15440                     opcode = CEE_LDIND_REF;
15441                     goto LDIND_POST_VERIFY;
15442                 }
15443
15444                 op1 = impPopStack().val;
15445
15446                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15447
15448                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15449                 if (impIsPrimitive(jitTyp))
15450                 {
15451                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15452
15453                     // Could point anywhere, example a boxed class static int
15454                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15455                     assertImp(varTypeIsArithmetic(op1->gtType));
15456                 }
15457                 else
15458                 {
15459                     // OBJ returns a struct
15460                     // and an inline argument which is the class token of the loaded obj
15461                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15462                 }
15463                 op1->gtFlags |= GTF_EXCEPT;
15464
15465                 if (prefixFlags & PREFIX_UNALIGNED)
15466                 {
15467                     op1->gtFlags |= GTF_IND_UNALIGNED;
15468                 }
15469
15470                 impPushOnStack(op1, tiRetVal);
15471                 break;
15472             }
15473
15474             case CEE_LDLEN:
15475                 if (tiVerificationNeeded)
15476                 {
15477                     typeInfo tiArray = impStackTop().seTypeInfo;
15478                     Verify(verIsSDArray(tiArray), "bad array");
15479                     tiRetVal = typeInfo(TI_INT);
15480                 }
15481
15482                 op1 = impPopStack().val;
15483                 if (!opts.MinOpts() && !opts.compDbgCode)
15484                 {
15485                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15486                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15487
15488                     /* Mark the block as containing a length expression */
15489
15490                     if (op1->gtOper == GT_LCL_VAR)
15491                     {
15492                         block->bbFlags |= BBF_HAS_IDX_LEN;
15493                     }
15494
15495                     op1 = arrLen;
15496                 }
15497                 else
15498                 {
15499                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15500                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15501                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15502                     op1 = gtNewIndir(TYP_INT, op1);
15503                     op1->gtFlags |= GTF_IND_ARR_LEN;
15504                 }
15505
15506                 /* Push the result back on the stack */
15507                 impPushOnStack(op1, tiRetVal);
15508                 break;
15509
15510             case CEE_BREAK:
15511                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15512                 goto SPILL_APPEND;
15513
15514             case CEE_NOP:
15515                 if (opts.compDbgCode)
15516                 {
15517                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15518                     goto SPILL_APPEND;
15519                 }
15520                 break;
15521
15522             /******************************** NYI *******************************/
15523
15524             case 0xCC:
15525                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15526
15527             case CEE_ILLEGAL:
15528             case CEE_MACRO_END:
15529
15530             default:
15531                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15532         }
15533
15534         codeAddr += sz;
15535         prevOpcode = opcode;
15536
15537         prefixFlags = 0;
15538     }
15539
15540     return;
15541 #undef _impResolveToken
15542 }
15543 #ifdef _PREFAST_
15544 #pragma warning(pop)
15545 #endif
15546
15547 // Push a local/argument treeon the operand stack
15548 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15549 {
15550     tiRetVal.NormaliseForStack();
15551
15552     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15553     {
15554         tiRetVal.SetUninitialisedObjRef();
15555     }
15556
15557     impPushOnStack(op, tiRetVal);
15558 }
15559
15560 // Load a local/argument on the operand stack
15561 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15562 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15563 {
15564     var_types lclTyp;
15565
15566     if (lvaTable[lclNum].lvNormalizeOnLoad())
15567     {
15568         lclTyp = lvaGetRealType(lclNum);
15569     }
15570     else
15571     {
15572         lclTyp = lvaGetActualType(lclNum);
15573     }
15574
15575     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15576 }
15577
15578 // Load an argument on the operand stack
15579 // Shared by the various CEE_LDARG opcodes
15580 // ilArgNum is the argument index as specified in IL.
15581 // It will be mapped to the correct lvaTable index
15582 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15583 {
15584     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15585
15586     if (compIsForInlining())
15587     {
15588         if (ilArgNum >= info.compArgsCount)
15589         {
15590             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15591             return;
15592         }
15593
15594         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15595                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15596     }
15597     else
15598     {
15599         if (ilArgNum >= info.compArgsCount)
15600         {
15601             BADCODE("Bad IL");
15602         }
15603
15604         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15605
15606         if (lclNum == info.compThisArg)
15607         {
15608             lclNum = lvaArg0Var;
15609         }
15610
15611         impLoadVar(lclNum, offset);
15612     }
15613 }
15614
15615 // Load a local on the operand stack
15616 // Shared by the various CEE_LDLOC opcodes
15617 // ilLclNum is the local index as specified in IL.
15618 // It will be mapped to the correct lvaTable index
15619 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15620 {
15621     if (tiVerificationNeeded)
15622     {
15623         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15624         Verify(info.compInitMem, "initLocals not set");
15625     }
15626
15627     if (compIsForInlining())
15628     {
15629         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15630         {
15631             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15632             return;
15633         }
15634
15635         // Get the local type
15636         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15637
15638         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15639
15640         /* Have we allocated a temp for this local? */
15641
15642         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15643
15644         // All vars of inlined methods should be !lvNormalizeOnLoad()
15645
15646         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15647         lclTyp = genActualType(lclTyp);
15648
15649         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15650     }
15651     else
15652     {
15653         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15654         {
15655             BADCODE("Bad IL");
15656         }
15657
15658         unsigned lclNum = info.compArgsCount + ilLclNum;
15659
15660         impLoadVar(lclNum, offset);
15661     }
15662 }
15663
15664 #ifdef _TARGET_ARM_
15665 /**************************************************************************************
15666  *
15667  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15668  *  dst struct, because struct promotion will turn it into a float/double variable while
15669  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15670  *  a float, but there is nothing that might prevent us from doing so. The tree however
15671  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15672  *
15673  *  tmpNum - the lcl dst variable num that is a struct.
15674  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15675  *  hClass - the type handle for the struct variable.
15676  *
15677  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15678  *        however, we could do a codegen of transferring from int to float registers
15679  *        (transfer, not a cast.)
15680  *
15681  */
15682 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15683 {
15684     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15685     {
15686         int       hfaSlots = GetHfaCount(hClass);
15687         var_types hfaType  = GetHfaType(hClass);
15688
15689         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15690         // type: struct/float at importer because the ABI calls out return in integer registers.
15691         // We don't want struct promotion to replace an expression like this:
15692         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15693         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15694         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15695             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15696         {
15697             // Make sure this struct type stays as struct so we can receive the call in a struct.
15698             lvaTable[tmpNum].lvIsMultiRegRet = true;
15699         }
15700     }
15701 }
15702 #endif // _TARGET_ARM_
15703
15704 #if FEATURE_MULTIREG_RET
15705 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15706 {
15707     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15708     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15709     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15710
15711     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15712     ret->gtFlags |= GTF_DONT_CSE;
15713
15714     assert(IsMultiRegReturnedType(hClass));
15715
15716     // Mark the var so that fields are not promoted and stay together.
15717     lvaTable[tmpNum].lvIsMultiRegRet = true;
15718
15719     return ret;
15720 }
15721 #endif // FEATURE_MULTIREG_RET
15722
15723 // do import for a return
15724 // returns false if inlining was aborted
15725 // opcode can be ret or call in the case of a tail.call
15726 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15727 {
15728     if (tiVerificationNeeded)
15729     {
15730         verVerifyThisPtrInitialised();
15731
15732         unsigned expectedStack = 0;
15733         if (info.compRetType != TYP_VOID)
15734         {
15735             typeInfo tiVal = impStackTop().seTypeInfo;
15736             typeInfo tiDeclared =
15737                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15738
15739             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15740
15741             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15742             expectedStack = 1;
15743         }
15744         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15745     }
15746
15747 #ifdef DEBUG
15748     // If we are importing an inlinee and have GC ref locals we always
15749     // need to have a spill temp for the return value.  This temp
15750     // should have been set up in advance, over in fgFindBasicBlocks.
15751     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15752     {
15753         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15754     }
15755 #endif // DEBUG
15756
15757     GenTree*             op2       = nullptr;
15758     GenTree*             op1       = nullptr;
15759     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15760
15761     if (info.compRetType != TYP_VOID)
15762     {
15763         StackEntry se = impPopStack();
15764         retClsHnd     = se.seTypeInfo.GetClassHandle();
15765         op2           = se.val;
15766
15767         if (!compIsForInlining())
15768         {
15769             impBashVarAddrsToI(op2);
15770             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15771             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15772             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15773                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15774                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15775                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15776                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15777
15778 #ifdef DEBUG
15779             if (opts.compGcChecks && info.compRetType == TYP_REF)
15780             {
15781                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15782                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15783                 // one-return BB.
15784
15785                 assert(op2->gtType == TYP_REF);
15786
15787                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15788                 GenTreeArgList* args = gtNewArgList(op2);
15789                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15790
15791                 if (verbose)
15792                 {
15793                     printf("\ncompGcChecks tree:\n");
15794                     gtDispTree(op2);
15795                 }
15796             }
15797 #endif
15798         }
15799         else
15800         {
15801             // inlinee's stack should be empty now.
15802             assert(verCurrentState.esStackDepth == 0);
15803
15804 #ifdef DEBUG
15805             if (verbose)
15806             {
15807                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15808                 gtDispTree(op2);
15809             }
15810 #endif
15811
15812             // Make sure the type matches the original call.
15813
15814             var_types returnType       = genActualType(op2->gtType);
15815             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15816             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15817             {
15818                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15819             }
15820
15821             if (returnType != originalCallType)
15822             {
15823                 JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
15824                         varTypeName(originalCallType));
15825                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15826                 return false;
15827             }
15828
15829             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15830             // expression. At this point, retExpr could already be set if there are multiple
15831             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15832             // the other blocks already set it. If there is only a single return block,
15833             // retExpr shouldn't be set. However, this is not true if we reimport a block
15834             // with a return. In that case, retExpr will be set, then the block will be
15835             // reimported, but retExpr won't get cleared as part of setting the block to
15836             // be reimported. The reimported retExpr value should be the same, so even if
15837             // we don't unconditionally overwrite it, it shouldn't matter.
15838             if (info.compRetNativeType != TYP_STRUCT)
15839             {
15840                 // compRetNativeType is not TYP_STRUCT.
15841                 // This implies it could be either a scalar type or SIMD vector type or
15842                 // a struct type that can be normalized to a scalar type.
15843
15844                 if (varTypeIsStruct(info.compRetType))
15845                 {
15846                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15847                     // adjust the type away from struct to integral
15848                     // and no normalizing
15849                     op2 = impFixupStructReturnType(op2, retClsHnd);
15850                 }
15851                 else
15852                 {
15853                     // Do we have to normalize?
15854                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15855                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15856                         fgCastNeeded(op2, fncRealRetType))
15857                     {
15858                         // Small-typed return values are normalized by the callee
15859                         op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
15860                     }
15861                 }
15862
15863                 if (fgNeedReturnSpillTemp())
15864                 {
15865                     assert(info.compRetNativeType != TYP_VOID &&
15866                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15867
15868                     // If this method returns a ref type, track the actual types seen
15869                     // in the returns.
15870                     if (info.compRetType == TYP_REF)
15871                     {
15872                         bool                 isExact      = false;
15873                         bool                 isNonNull    = false;
15874                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15875
15876                         if (impInlineInfo->retExpr == nullptr)
15877                         {
15878                             // This is the first return, so best known type is the type
15879                             // of this return value.
15880                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15881                             impInlineInfo->retExprClassHndIsExact = isExact;
15882                         }
15883                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15884                         {
15885                             // This return site type differs from earlier seen sites,
15886                             // so reset the info and we'll fall back to using the method's
15887                             // declared return type for the return spill temp.
15888                             impInlineInfo->retExprClassHnd        = nullptr;
15889                             impInlineInfo->retExprClassHndIsExact = false;
15890                         }
15891                     }
15892
15893                     // This is a bit of a workaround...
15894                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15895                     // not a struct (for example, the struct is composed of exactly one int, and the native
15896                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15897                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15898                     // to the *native* return type), and at least one of the return blocks is the result of
15899                     // a call, then we have a problem. The situation is like this (from a failed test case):
15900                     //
15901                     // inliner:
15902                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15903                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15904                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15905                     //
15906                     // inlinee:
15907                     //      ...
15908                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15909                     //      ret
15910                     //      ...
15911                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15912                     //      object&, class System.Func`1<!!0>)
15913                     //      ret
15914                     //
15915                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15916                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15917                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15918                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15919                     //
15920                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15921                     // native return type, which is what it will be set to eventually. We generate the
15922                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15923                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15924
15925                     bool restoreType = false;
15926                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15927                     {
15928                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15929                         op2->gtType = info.compRetNativeType;
15930                         restoreType = true;
15931                     }
15932
15933                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15934                                      (unsigned)CHECK_SPILL_ALL);
15935
15936                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15937
15938                     if (restoreType)
15939                     {
15940                         op2->gtType = TYP_STRUCT; // restore it to what it was
15941                     }
15942
15943                     op2 = tmpOp2;
15944
15945 #ifdef DEBUG
15946                     if (impInlineInfo->retExpr)
15947                     {
15948                         // Some other block(s) have seen the CEE_RET first.
15949                         // Better they spilled to the same temp.
15950                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15951                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15952                     }
15953 #endif
15954                 }
15955
15956 #ifdef DEBUG
15957                 if (verbose)
15958                 {
15959                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15960                     gtDispTree(op2);
15961                 }
15962 #endif
15963
15964                 // Report the return expression
15965                 impInlineInfo->retExpr = op2;
15966             }
15967             else
15968             {
15969                 // compRetNativeType is TYP_STRUCT.
15970                 // This implies that struct return via RetBuf arg or multi-reg struct return
15971
15972                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15973
15974                 // Assign the inlinee return into a spill temp.
15975                 // spill temp only exists if there are multiple return points
15976                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15977                 {
15978                     // in this case we have to insert multiple struct copies to the temp
15979                     // and the retexpr is just the temp.
15980                     assert(info.compRetNativeType != TYP_VOID);
15981                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15982
15983                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15984                                      (unsigned)CHECK_SPILL_ALL);
15985                 }
15986
15987 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15988 #if defined(_TARGET_ARM_)
15989                 // TODO-ARM64-NYI: HFA
15990                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15991                 // next ifdefs could be refactored in a single method with the ifdef inside.
15992                 if (IsHfa(retClsHnd))
15993                 {
15994 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15995 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15996                 ReturnTypeDesc retTypeDesc;
15997                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15998                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15999
16000                 if (retRegCount != 0)
16001                 {
16002                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16003                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16004                     // max allowed.)
16005                     assert(retRegCount == MAX_RET_REG_COUNT);
16006                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16007                     CLANG_FORMAT_COMMENT_ANCHOR;
16008 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16009
16010                     if (fgNeedReturnSpillTemp())
16011                     {
16012                         if (!impInlineInfo->retExpr)
16013                         {
16014 #if defined(_TARGET_ARM_)
16015                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16016 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16017                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16018                             impInlineInfo->retExpr =
16019                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16020 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16021                         }
16022                     }
16023                     else
16024                     {
16025                         impInlineInfo->retExpr = op2;
16026                     }
16027                 }
16028                 else
16029 #elif defined(_TARGET_ARM64_)
16030                 ReturnTypeDesc retTypeDesc;
16031                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16032                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16033
16034                 if (retRegCount != 0)
16035                 {
16036                     assert(!iciCall->HasRetBufArg());
16037                     assert(retRegCount >= 2);
16038                     if (fgNeedReturnSpillTemp())
16039                     {
16040                         if (!impInlineInfo->retExpr)
16041                         {
16042                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16043                             impInlineInfo->retExpr =
16044                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16045                         }
16046                     }
16047                     else
16048                     {
16049                         impInlineInfo->retExpr = op2;
16050                     }
16051                 }
16052                 else
16053 #endif // defined(_TARGET_ARM64_)
16054                 {
16055                     assert(iciCall->HasRetBufArg());
16056                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16057                     // spill temp only exists if there are multiple return points
16058                     if (fgNeedReturnSpillTemp())
16059                     {
16060                         // if this is the first return we have seen set the retExpr
16061                         if (!impInlineInfo->retExpr)
16062                         {
16063                             impInlineInfo->retExpr =
16064                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16065                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16066                         }
16067                     }
16068                     else
16069                     {
16070                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16071                     }
16072                 }
16073             }
16074         }
16075     }
16076
16077     if (compIsForInlining())
16078     {
16079         return true;
16080     }
16081
16082     if (info.compRetType == TYP_VOID)
16083     {
16084         // return void
16085         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16086     }
16087     else if (info.compRetBuffArg != BAD_VAR_NUM)
16088     {
16089         // Assign value to return buff (first param)
16090         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16091
16092         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16093         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16094
16095         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16096         CLANG_FORMAT_COMMENT_ANCHOR;
16097
16098 #if defined(_TARGET_AMD64_)
16099
16100         // x64 (System V and Win64) calling convention requires to
16101         // return the implicit return buffer explicitly (in RAX).
16102         // Change the return type to be BYREF.
16103         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16104 #else  // !defined(_TARGET_AMD64_)
16105         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16106         // In such case the return value of the function is changed to BYREF.
16107         // If profiler hook is not needed the return type of the function is TYP_VOID.
16108         if (compIsProfilerHookNeeded())
16109         {
16110             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16111         }
16112         else
16113         {
16114             // return void
16115             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16116         }
16117 #endif // !defined(_TARGET_AMD64_)
16118     }
16119     else if (varTypeIsStruct(info.compRetType))
16120     {
16121 #if !FEATURE_MULTIREG_RET
16122         // For both ARM architectures the HFA native types are maintained as structs.
16123         // Also on System V AMD64 the multireg structs returns are also left as structs.
16124         noway_assert(info.compRetNativeType != TYP_STRUCT);
16125 #endif
16126         op2 = impFixupStructReturnType(op2, retClsHnd);
16127         // return op2
16128         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16129     }
16130     else
16131     {
16132         // return op2
16133         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16134     }
16135
16136     // We must have imported a tailcall and jumped to RET
16137     if (prefixFlags & PREFIX_TAILCALL)
16138     {
16139 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16140         // Jit64 compat:
16141         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16142         //      tail.call
16143         //      pop
16144         //      ret
16145         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16146 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16147
16148         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16149
16150         // impImportCall() would have already appended TYP_VOID calls
16151         if (info.compRetType == TYP_VOID)
16152         {
16153             return true;
16154         }
16155     }
16156
16157     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16158 #ifdef DEBUG
16159     // Remember at which BC offset the tree was finished
16160     impNoteLastILoffs();
16161 #endif
16162     return true;
16163 }
16164
16165 /*****************************************************************************
16166  *  Mark the block as unimported.
16167  *  Note that the caller is responsible for calling impImportBlockPending(),
16168  *  with the appropriate stack-state
16169  */
16170
16171 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16172 {
16173 #ifdef DEBUG
16174     if (verbose && (block->bbFlags & BBF_IMPORTED))
16175     {
16176         printf("\nBB%02u will be reimported\n", block->bbNum);
16177     }
16178 #endif
16179
16180     block->bbFlags &= ~BBF_IMPORTED;
16181 }
16182
16183 /*****************************************************************************
16184  *  Mark the successors of the given block as unimported.
16185  *  Note that the caller is responsible for calling impImportBlockPending()
16186  *  for all the successors, with the appropriate stack-state.
16187  */
16188
16189 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16190 {
16191     const unsigned numSuccs = block->NumSucc();
16192     for (unsigned i = 0; i < numSuccs; i++)
16193     {
16194         impReimportMarkBlock(block->GetSucc(i));
16195     }
16196 }
16197
16198 /*****************************************************************************
16199  *
16200  *  Filter wrapper to handle only passed in exception code
16201  *  from it).
16202  */
16203
16204 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16205 {
16206     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16207     {
16208         return EXCEPTION_EXECUTE_HANDLER;
16209     }
16210
16211     return EXCEPTION_CONTINUE_SEARCH;
16212 }
16213
16214 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16215 {
16216     assert(block->hasTryIndex());
16217     assert(!compIsForInlining());
16218
16219     unsigned  tryIndex = block->getTryIndex();
16220     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16221
16222     if (isTryStart)
16223     {
16224         assert(block->bbFlags & BBF_TRY_BEG);
16225
16226         // The Stack must be empty
16227         //
16228         if (block->bbStkDepth != 0)
16229         {
16230             BADCODE("Evaluation stack must be empty on entry into a try block");
16231         }
16232     }
16233
16234     // Save the stack contents, we'll need to restore it later
16235     //
16236     SavedStack blockState;
16237     impSaveStackState(&blockState, false);
16238
16239     while (HBtab != nullptr)
16240     {
16241         if (isTryStart)
16242         {
16243             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16244             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16245             //
16246             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16247             {
16248                 // We  trigger an invalid program exception here unless we have a try/fault region.
16249                 //
16250                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16251                 {
16252                     BADCODE(
16253                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16254                 }
16255                 else
16256                 {
16257                     // Allow a try/fault region to proceed.
16258                     assert(HBtab->HasFaultHandler());
16259                 }
16260             }
16261
16262             /* Recursively process the handler block */
16263             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16264
16265             //  Construct the proper verification stack state
16266             //   either empty or one that contains just
16267             //   the Exception Object that we are dealing with
16268             //
16269             verCurrentState.esStackDepth = 0;
16270
16271             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16272             {
16273                 CORINFO_CLASS_HANDLE clsHnd;
16274
16275                 if (HBtab->HasFilter())
16276                 {
16277                     clsHnd = impGetObjectClass();
16278                 }
16279                 else
16280                 {
16281                     CORINFO_RESOLVED_TOKEN resolvedToken;
16282
16283                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16284                     resolvedToken.tokenScope   = info.compScopeHnd;
16285                     resolvedToken.token        = HBtab->ebdTyp;
16286                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16287                     info.compCompHnd->resolveToken(&resolvedToken);
16288
16289                     clsHnd = resolvedToken.hClass;
16290                 }
16291
16292                 // push catch arg the stack, spill to a temp if necessary
16293                 // Note: can update HBtab->ebdHndBeg!
16294                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16295             }
16296
16297             // Queue up the handler for importing
16298             //
16299             impImportBlockPending(hndBegBB);
16300
16301             if (HBtab->HasFilter())
16302             {
16303                 /* @VERIFICATION : Ideally the end of filter state should get
16304                    propagated to the catch handler, this is an incompleteness,
16305                    but is not a security/compliance issue, since the only
16306                    interesting state is the 'thisInit' state.
16307                    */
16308
16309                 verCurrentState.esStackDepth = 0;
16310
16311                 BasicBlock* filterBB = HBtab->ebdFilter;
16312
16313                 // push catch arg the stack, spill to a temp if necessary
16314                 // Note: can update HBtab->ebdFilter!
16315                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16316                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16317
16318                 impImportBlockPending(filterBB);
16319             }
16320         }
16321         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16322         {
16323             /* Recursively process the handler block */
16324
16325             verCurrentState.esStackDepth = 0;
16326
16327             // Queue up the fault handler for importing
16328             //
16329             impImportBlockPending(HBtab->ebdHndBeg);
16330         }
16331
16332         // Now process our enclosing try index (if any)
16333         //
16334         tryIndex = HBtab->ebdEnclosingTryIndex;
16335         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16336         {
16337             HBtab = nullptr;
16338         }
16339         else
16340         {
16341             HBtab = ehGetDsc(tryIndex);
16342         }
16343     }
16344
16345     // Restore the stack contents
16346     impRestoreStackState(&blockState);
16347 }
16348
16349 //***************************************************************
16350 // Import the instructions for the given basic block.  Perform
16351 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16352 // time, or whose verification pre-state is changed.
16353
16354 #ifdef _PREFAST_
16355 #pragma warning(push)
16356 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16357 #endif
16358 void Compiler::impImportBlock(BasicBlock* block)
16359 {
16360     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16361     // handle them specially. In particular, there is no IL to import for them, but we do need
16362     // to mark them as imported and put their successors on the pending import list.
16363     if (block->bbFlags & BBF_INTERNAL)
16364     {
16365         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16366         block->bbFlags |= BBF_IMPORTED;
16367
16368         const unsigned numSuccs = block->NumSucc();
16369         for (unsigned i = 0; i < numSuccs; i++)
16370         {
16371             impImportBlockPending(block->GetSucc(i));
16372         }
16373
16374         return;
16375     }
16376
16377     bool markImport;
16378
16379     assert(block);
16380
16381     /* Make the block globaly available */
16382
16383     compCurBB = block;
16384
16385 #ifdef DEBUG
16386     /* Initialize the debug variables */
16387     impCurOpcName = "unknown";
16388     impCurOpcOffs = block->bbCodeOffs;
16389 #endif
16390
16391     /* Set the current stack state to the merged result */
16392     verResetCurrentState(block, &verCurrentState);
16393
16394     /* Now walk the code and import the IL into GenTrees */
16395
16396     struct FilterVerificationExceptionsParam
16397     {
16398         Compiler*   pThis;
16399         BasicBlock* block;
16400     };
16401     FilterVerificationExceptionsParam param;
16402
16403     param.pThis = this;
16404     param.block = block;
16405
16406     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16407     {
16408         /* @VERIFICATION : For now, the only state propagation from try
16409            to it's handler is "thisInit" state (stack is empty at start of try).
16410            In general, for state that we track in verification, we need to
16411            model the possibility that an exception might happen at any IL
16412            instruction, so we really need to merge all states that obtain
16413            between IL instructions in a try block into the start states of
16414            all handlers.
16415
16416            However we do not allow the 'this' pointer to be uninitialized when
16417            entering most kinds try regions (only try/fault are allowed to have
16418            an uninitialized this pointer on entry to the try)
16419
16420            Fortunately, the stack is thrown away when an exception
16421            leads to a handler, so we don't have to worry about that.
16422            We DO, however, have to worry about the "thisInit" state.
16423            But only for the try/fault case.
16424
16425            The only allowed transition is from TIS_Uninit to TIS_Init.
16426
16427            So for a try/fault region for the fault handler block
16428            we will merge the start state of the try begin
16429            and the post-state of each block that is part of this try region
16430         */
16431
16432         // merge the start state of the try begin
16433         //
16434         if (pParam->block->bbFlags & BBF_TRY_BEG)
16435         {
16436             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16437         }
16438
16439         pParam->pThis->impImportBlockCode(pParam->block);
16440
16441         // As discussed above:
16442         // merge the post-state of each block that is part of this try region
16443         //
16444         if (pParam->block->hasTryIndex())
16445         {
16446             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16447         }
16448     }
16449     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16450     {
16451         verHandleVerificationFailure(block DEBUGARG(false));
16452     }
16453     PAL_ENDTRY
16454
16455     if (compDonotInline())
16456     {
16457         return;
16458     }
16459
16460     assert(!compDonotInline());
16461
16462     markImport = false;
16463
16464 SPILLSTACK:
16465
16466     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16467     bool        reimportSpillClique = false;
16468     BasicBlock* tgtBlock            = nullptr;
16469
16470     /* If the stack is non-empty, we might have to spill its contents */
16471
16472     if (verCurrentState.esStackDepth != 0)
16473     {
16474         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16475                                   // on the stack, its lifetime is hard to determine, simply
16476                                   // don't reuse such temps.
16477
16478         GenTree* addStmt = nullptr;
16479
16480         /* Do the successors of 'block' have any other predecessors ?
16481            We do not want to do some of the optimizations related to multiRef
16482            if we can reimport blocks */
16483
16484         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16485
16486         switch (block->bbJumpKind)
16487         {
16488             case BBJ_COND:
16489
16490                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16491
16492                 assert(impTreeLast);
16493                 assert(impTreeLast->gtOper == GT_STMT);
16494                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16495
16496                 addStmt     = impTreeLast;
16497                 impTreeLast = impTreeLast->gtPrev;
16498
16499                 /* Note if the next block has more than one ancestor */
16500
16501                 multRef |= block->bbNext->bbRefs;
16502
16503                 /* Does the next block have temps assigned? */
16504
16505                 baseTmp  = block->bbNext->bbStkTempsIn;
16506                 tgtBlock = block->bbNext;
16507
16508                 if (baseTmp != NO_BASE_TMP)
16509                 {
16510                     break;
16511                 }
16512
16513                 /* Try the target of the jump then */
16514
16515                 multRef |= block->bbJumpDest->bbRefs;
16516                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16517                 tgtBlock = block->bbJumpDest;
16518                 break;
16519
16520             case BBJ_ALWAYS:
16521                 multRef |= block->bbJumpDest->bbRefs;
16522                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16523                 tgtBlock = block->bbJumpDest;
16524                 break;
16525
16526             case BBJ_NONE:
16527                 multRef |= block->bbNext->bbRefs;
16528                 baseTmp  = block->bbNext->bbStkTempsIn;
16529                 tgtBlock = block->bbNext;
16530                 break;
16531
16532             case BBJ_SWITCH:
16533
16534                 BasicBlock** jmpTab;
16535                 unsigned     jmpCnt;
16536
16537                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16538
16539                 assert(impTreeLast);
16540                 assert(impTreeLast->gtOper == GT_STMT);
16541                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16542
16543                 addStmt     = impTreeLast;
16544                 impTreeLast = impTreeLast->gtPrev;
16545
16546                 jmpCnt = block->bbJumpSwt->bbsCount;
16547                 jmpTab = block->bbJumpSwt->bbsDstTab;
16548
16549                 do
16550                 {
16551                     tgtBlock = (*jmpTab);
16552
16553                     multRef |= tgtBlock->bbRefs;
16554
16555                     // Thanks to spill cliques, we should have assigned all or none
16556                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16557                     baseTmp = tgtBlock->bbStkTempsIn;
16558                     if (multRef > 1)
16559                     {
16560                         break;
16561                     }
16562                 } while (++jmpTab, --jmpCnt);
16563
16564                 break;
16565
16566             case BBJ_CALLFINALLY:
16567             case BBJ_EHCATCHRET:
16568             case BBJ_RETURN:
16569             case BBJ_EHFINALLYRET:
16570             case BBJ_EHFILTERRET:
16571             case BBJ_THROW:
16572                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16573                 break;
16574
16575             default:
16576                 noway_assert(!"Unexpected bbJumpKind");
16577                 break;
16578         }
16579
16580         assert(multRef >= 1);
16581
16582         /* Do we have a base temp number? */
16583
16584         bool newTemps = (baseTmp == NO_BASE_TMP);
16585
16586         if (newTemps)
16587         {
16588             /* Grab enough temps for the whole stack */
16589             baseTmp = impGetSpillTmpBase(block);
16590         }
16591
16592         /* Spill all stack entries into temps */
16593         unsigned level, tempNum;
16594
16595         JITDUMP("\nSpilling stack entries into temps\n");
16596         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16597         {
16598             GenTree* tree = verCurrentState.esStack[level].val;
16599
16600             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16601                the other. This should merge to a byref in unverifiable code.
16602                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16603                successor would be imported assuming there was a TYP_I_IMPL on
16604                the stack. Thus the value would not get GC-tracked. Hence,
16605                change the temp to TYP_BYREF and reimport the successors.
16606                Note: We should only allow this in unverifiable code.
16607             */
16608             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16609             {
16610                 lvaTable[tempNum].lvType = TYP_BYREF;
16611                 impReimportMarkSuccessors(block);
16612                 markImport = true;
16613             }
16614
16615 #ifdef _TARGET_64BIT_
16616             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16617             {
16618                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16619                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16620                 {
16621                     // Merge the current state into the entry state of block;
16622                     // the call to verMergeEntryStates must have changed
16623                     // the entry state of the block by merging the int local var
16624                     // and the native-int stack entry.
16625                     bool changed = false;
16626                     if (verMergeEntryStates(tgtBlock, &changed))
16627                     {
16628                         impRetypeEntryStateTemps(tgtBlock);
16629                         impReimportBlockPending(tgtBlock);
16630                         assert(changed);
16631                     }
16632                     else
16633                     {
16634                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16635                         break;
16636                     }
16637                 }
16638
16639                 // Some other block in the spill clique set this to "int", but now we have "native int".
16640                 // Change the type and go back to re-import any blocks that used the wrong type.
16641                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16642                 reimportSpillClique      = true;
16643             }
16644             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16645             {
16646                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16647                 // Insert a sign-extension to "native int" so we match the clique.
16648                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16649             }
16650
16651             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16652             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16653             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16654             // behavior instead of asserting and then generating bad code (where we save/restore the
16655             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16656             // imported already, we need to change the type of the local and reimport the spill clique.
16657             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16658             // the 'byref' size.
16659             if (!tiVerificationNeeded)
16660             {
16661                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16662                 {
16663                     // Some other block in the spill clique set this to "int", but now we have "byref".
16664                     // Change the type and go back to re-import any blocks that used the wrong type.
16665                     lvaTable[tempNum].lvType = TYP_BYREF;
16666                     reimportSpillClique      = true;
16667                 }
16668                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16669                 {
16670                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16671                     // Insert a sign-extension to "native int" so we match the clique size.
16672                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16673                 }
16674             }
16675 #endif // _TARGET_64BIT_
16676
16677 #if FEATURE_X87_DOUBLES
16678             // X87 stack doesn't differentiate between float/double
16679             // so promoting is no big deal.
16680             // For everybody else keep it as float until we have a collision and then promote
16681             // Just like for x64's TYP_INT<->TYP_I_IMPL
16682
16683             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16684             {
16685                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16686             }
16687
16688 #else // !FEATURE_X87_DOUBLES
16689
16690             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16691             {
16692                 // Some other block in the spill clique set this to "float", but now we have "double".
16693                 // Change the type and go back to re-import any blocks that used the wrong type.
16694                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16695                 reimportSpillClique      = true;
16696             }
16697             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16698             {
16699                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16700                 // Insert a cast to "double" so we match the clique.
16701                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
16702             }
16703
16704 #endif // FEATURE_X87_DOUBLES
16705
16706             /* If addStmt has a reference to tempNum (can only happen if we
16707                are spilling to the temps already used by a previous block),
16708                we need to spill addStmt */
16709
16710             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16711             {
16712                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16713
16714                 if (addTree->gtOper == GT_JTRUE)
16715                 {
16716                     GenTree* relOp = addTree->gtOp.gtOp1;
16717                     assert(relOp->OperIsCompare());
16718
16719                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16720
16721                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16722                     {
16723                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16724                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16725                         type              = genActualType(lvaTable[temp].TypeGet());
16726                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16727                     }
16728
16729                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16730                     {
16731                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16732                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16733                         type              = genActualType(lvaTable[temp].TypeGet());
16734                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16735                     }
16736                 }
16737                 else
16738                 {
16739                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16740
16741                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16742                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16743                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16744                 }
16745             }
16746
16747             /* Spill the stack entry, and replace with the temp */
16748
16749             if (!impSpillStackEntry(level, tempNum
16750 #ifdef DEBUG
16751                                     ,
16752                                     true, "Spill Stack Entry"
16753 #endif
16754                                     ))
16755             {
16756                 if (markImport)
16757                 {
16758                     BADCODE("bad stack state");
16759                 }
16760
16761                 // Oops. Something went wrong when spilling. Bad code.
16762                 verHandleVerificationFailure(block DEBUGARG(true));
16763
16764                 goto SPILLSTACK;
16765             }
16766         }
16767
16768         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16769
16770         if (addStmt)
16771         {
16772             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16773         }
16774     }
16775
16776     // Some of the append/spill logic works on compCurBB
16777
16778     assert(compCurBB == block);
16779
16780     /* Save the tree list in the block */
16781     impEndTreeList(block);
16782
16783     // impEndTreeList sets BBF_IMPORTED on the block
16784     // We do *NOT* want to set it later than this because
16785     // impReimportSpillClique might clear it if this block is both a
16786     // predecessor and successor in the current spill clique
16787     assert(block->bbFlags & BBF_IMPORTED);
16788
16789     // If we had a int/native int, or float/double collision, we need to re-import
16790     if (reimportSpillClique)
16791     {
16792         // This will re-import all the successors of block (as well as each of their predecessors)
16793         impReimportSpillClique(block);
16794
16795         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16796         const unsigned numSuccs = block->NumSucc();
16797         for (unsigned i = 0; i < numSuccs; i++)
16798         {
16799             BasicBlock* succ = block->GetSucc(i);
16800             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16801             {
16802                 impImportBlockPending(succ);
16803             }
16804         }
16805     }
16806     else // the normal case
16807     {
16808         // otherwise just import the successors of block
16809
16810         /* Does this block jump to any other blocks? */
16811         const unsigned numSuccs = block->NumSucc();
16812         for (unsigned i = 0; i < numSuccs; i++)
16813         {
16814             impImportBlockPending(block->GetSucc(i));
16815         }
16816     }
16817 }
16818 #ifdef _PREFAST_
16819 #pragma warning(pop)
16820 #endif
16821
16822 /*****************************************************************************/
16823 //
16824 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16825 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16826 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16827 // (its "pre-state").
16828
16829 void Compiler::impImportBlockPending(BasicBlock* block)
16830 {
16831 #ifdef DEBUG
16832     if (verbose)
16833     {
16834         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16835     }
16836 #endif
16837
16838     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16839     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16840     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16841
16842     // If the block has not been imported, add to pending set.
16843     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16844
16845     // Initialize bbEntryState just the first time we try to add this block to the pending list
16846     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16847     // We use NULL to indicate the 'common' state to avoid memory allocation
16848     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16849         (impGetPendingBlockMember(block) == 0))
16850     {
16851         verInitBBEntryState(block, &verCurrentState);
16852         assert(block->bbStkDepth == 0);
16853         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16854         assert(addToPending);
16855         assert(impGetPendingBlockMember(block) == 0);
16856     }
16857     else
16858     {
16859         // The stack should have the same height on entry to the block from all its predecessors.
16860         if (block->bbStkDepth != verCurrentState.esStackDepth)
16861         {
16862 #ifdef DEBUG
16863             char buffer[400];
16864             sprintf_s(buffer, sizeof(buffer),
16865                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16866                       "Previous depth was %d, current depth is %d",
16867                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16868                       verCurrentState.esStackDepth);
16869             buffer[400 - 1] = 0;
16870             NO_WAY(buffer);
16871 #else
16872             NO_WAY("Block entered with different stack depths");
16873 #endif
16874         }
16875
16876         // Additionally, if we need to verify, merge the verification state.
16877         if (tiVerificationNeeded)
16878         {
16879             // Merge the current state into the entry state of block; if this does not change the entry state
16880             // by merging, do not add the block to the pending-list.
16881             bool changed = false;
16882             if (!verMergeEntryStates(block, &changed))
16883             {
16884                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16885                 addToPending = true; // We will pop it off, and check the flag set above.
16886             }
16887             else if (changed)
16888             {
16889                 addToPending = true;
16890
16891                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16892             }
16893         }
16894
16895         if (!addToPending)
16896         {
16897             return;
16898         }
16899
16900         if (block->bbStkDepth > 0)
16901         {
16902             // We need to fix the types of any spill temps that might have changed:
16903             //   int->native int, float->double, int->byref, etc.
16904             impRetypeEntryStateTemps(block);
16905         }
16906
16907         // OK, we must add to the pending list, if it's not already in it.
16908         if (impGetPendingBlockMember(block) != 0)
16909         {
16910             return;
16911         }
16912     }
16913
16914     // Get an entry to add to the pending list
16915
16916     PendingDsc* dsc;
16917
16918     if (impPendingFree)
16919     {
16920         // We can reuse one of the freed up dscs.
16921         dsc            = impPendingFree;
16922         impPendingFree = dsc->pdNext;
16923     }
16924     else
16925     {
16926         // We have to create a new dsc
16927         dsc = new (this, CMK_Unknown) PendingDsc;
16928     }
16929
16930     dsc->pdBB                 = block;
16931     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16932     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16933
16934     // Save the stack trees for later
16935
16936     if (verCurrentState.esStackDepth)
16937     {
16938         impSaveStackState(&dsc->pdSavedStack, false);
16939     }
16940
16941     // Add the entry to the pending list
16942
16943     dsc->pdNext    = impPendingList;
16944     impPendingList = dsc;
16945     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16946
16947     // Various assertions require us to now to consider the block as not imported (at least for
16948     // the final time...)
16949     block->bbFlags &= ~BBF_IMPORTED;
16950
16951 #ifdef DEBUG
16952     if (verbose && 0)
16953     {
16954         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16955     }
16956 #endif
16957 }
16958
16959 /*****************************************************************************/
16960 //
16961 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16962 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16963 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16964
16965 void Compiler::impReimportBlockPending(BasicBlock* block)
16966 {
16967     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16968
16969     assert(block->bbFlags & BBF_IMPORTED);
16970
16971     // OK, we must add to the pending list, if it's not already in it.
16972     if (impGetPendingBlockMember(block) != 0)
16973     {
16974         return;
16975     }
16976
16977     // Get an entry to add to the pending list
16978
16979     PendingDsc* dsc;
16980
16981     if (impPendingFree)
16982     {
16983         // We can reuse one of the freed up dscs.
16984         dsc            = impPendingFree;
16985         impPendingFree = dsc->pdNext;
16986     }
16987     else
16988     {
16989         // We have to create a new dsc
16990         dsc = new (this, CMK_ImpStack) PendingDsc;
16991     }
16992
16993     dsc->pdBB = block;
16994
16995     if (block->bbEntryState)
16996     {
16997         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16998         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16999         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17000     }
17001     else
17002     {
17003         dsc->pdThisPtrInit        = TIS_Bottom;
17004         dsc->pdSavedStack.ssDepth = 0;
17005         dsc->pdSavedStack.ssTrees = nullptr;
17006     }
17007
17008     // Add the entry to the pending list
17009
17010     dsc->pdNext    = impPendingList;
17011     impPendingList = dsc;
17012     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17013
17014     // Various assertions require us to now to consider the block as not imported (at least for
17015     // the final time...)
17016     block->bbFlags &= ~BBF_IMPORTED;
17017
17018 #ifdef DEBUG
17019     if (verbose && 0)
17020     {
17021         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17022     }
17023 #endif
17024 }
17025
17026 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17027 {
17028     if (comp->impBlockListNodeFreeList == nullptr)
17029     {
17030         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17031     }
17032     else
17033     {
17034         BlockListNode* res             = comp->impBlockListNodeFreeList;
17035         comp->impBlockListNodeFreeList = res->m_next;
17036         return res;
17037     }
17038 }
17039
17040 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17041 {
17042     node->m_next             = impBlockListNodeFreeList;
17043     impBlockListNodeFreeList = node;
17044 }
17045
17046 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17047 {
17048     bool toDo = true;
17049
17050     noway_assert(!fgComputePredsDone);
17051     if (!fgCheapPredsValid)
17052     {
17053         fgComputeCheapPreds();
17054     }
17055
17056     BlockListNode* succCliqueToDo = nullptr;
17057     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17058     while (toDo)
17059     {
17060         toDo = false;
17061         // Look at the successors of every member of the predecessor to-do list.
17062         while (predCliqueToDo != nullptr)
17063         {
17064             BlockListNode* node = predCliqueToDo;
17065             predCliqueToDo      = node->m_next;
17066             BasicBlock* blk     = node->m_blk;
17067             FreeBlockListNode(node);
17068
17069             const unsigned numSuccs = blk->NumSucc();
17070             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17071             {
17072                 BasicBlock* succ = blk->GetSucc(succNum);
17073                 // If it's not already in the clique, add it, and also add it
17074                 // as a member of the successor "toDo" set.
17075                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17076                 {
17077                     callback->Visit(SpillCliqueSucc, succ);
17078                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17079                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17080                     toDo           = true;
17081                 }
17082             }
17083         }
17084         // Look at the predecessors of every member of the successor to-do list.
17085         while (succCliqueToDo != nullptr)
17086         {
17087             BlockListNode* node = succCliqueToDo;
17088             succCliqueToDo      = node->m_next;
17089             BasicBlock* blk     = node->m_blk;
17090             FreeBlockListNode(node);
17091
17092             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17093             {
17094                 BasicBlock* predBlock = pred->block;
17095                 // If it's not already in the clique, add it, and also add it
17096                 // as a member of the predecessor "toDo" set.
17097                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17098                 {
17099                     callback->Visit(SpillCliquePred, predBlock);
17100                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17101                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17102                     toDo           = true;
17103                 }
17104             }
17105         }
17106     }
17107
17108     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17109     // miss walking back to include the predecessor we started from.
17110     // This most likely cause: missing or out of date bbPreds
17111     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17112 }
17113
17114 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17115 {
17116     if (predOrSucc == SpillCliqueSucc)
17117     {
17118         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17119         blk->bbStkTempsIn = m_baseTmp;
17120     }
17121     else
17122     {
17123         assert(predOrSucc == SpillCliquePred);
17124         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17125         blk->bbStkTempsOut = m_baseTmp;
17126     }
17127 }
17128
17129 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17130 {
17131     // For Preds we could be a little smarter and just find the existing store
17132     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17133     // just re-import the whole block (just like we do for successors)
17134
17135     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17136     {
17137         // If we haven't imported this block and we're not going to (because it isn't on
17138         // the pending list) then just ignore it for now.
17139
17140         // This block has either never been imported (EntryState == NULL) or it failed
17141         // verification. Neither state requires us to force it to be imported now.
17142         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17143         return;
17144     }
17145
17146     // For successors we have a valid verCurrentState, so just mark them for reimport
17147     // the 'normal' way
17148     // Unlike predecessors, we *DO* need to reimport the current block because the
17149     // initial import had the wrong entry state types.
17150     // Similarly, blocks that are currently on the pending list, still need to call
17151     // impImportBlockPending to fixup their entry state.
17152     if (predOrSucc == SpillCliqueSucc)
17153     {
17154         m_pComp->impReimportMarkBlock(blk);
17155
17156         // Set the current stack state to that of the blk->bbEntryState
17157         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17158         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17159
17160         m_pComp->impImportBlockPending(blk);
17161     }
17162     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17163     {
17164         // As described above, we are only visiting predecessors so they can
17165         // add the appropriate casts, since we have already done that for the current
17166         // block, it does not need to be reimported.
17167         // Nor do we need to reimport blocks that are still pending, but not yet
17168         // imported.
17169         //
17170         // For predecessors, we have no state to seed the EntryState, so we just have
17171         // to assume the existing one is correct.
17172         // If the block is also a successor, it will get the EntryState properly
17173         // updated when it is visited as a successor in the above "if" block.
17174         assert(predOrSucc == SpillCliquePred);
17175         m_pComp->impReimportBlockPending(blk);
17176     }
17177 }
17178
17179 // Re-type the incoming lclVar nodes to match the varDsc.
17180 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17181 {
17182     if (blk->bbEntryState != nullptr)
17183     {
17184         EntryState* es = blk->bbEntryState;
17185         for (unsigned level = 0; level < es->esStackDepth; level++)
17186         {
17187             GenTree* tree = es->esStack[level].val;
17188             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17189             {
17190                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17191                 noway_assert(lclNum < lvaCount);
17192                 LclVarDsc* varDsc              = lvaTable + lclNum;
17193                 es->esStack[level].val->gtType = varDsc->TypeGet();
17194             }
17195         }
17196     }
17197 }
17198
17199 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17200 {
17201     if (block->bbStkTempsOut != NO_BASE_TMP)
17202     {
17203         return block->bbStkTempsOut;
17204     }
17205
17206 #ifdef DEBUG
17207     if (verbose)
17208     {
17209         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17210     }
17211 #endif // DEBUG
17212
17213     // Otherwise, choose one, and propagate to all members of the spill clique.
17214     // Grab enough temps for the whole stack.
17215     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17216     SetSpillTempsBase callback(baseTmp);
17217
17218     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17219     // to one spill clique, and similarly can only be the sucessor to one spill clique
17220     impWalkSpillCliqueFromPred(block, &callback);
17221
17222     return baseTmp;
17223 }
17224
17225 void Compiler::impReimportSpillClique(BasicBlock* block)
17226 {
17227 #ifdef DEBUG
17228     if (verbose)
17229     {
17230         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17231     }
17232 #endif // DEBUG
17233
17234     // If we get here, it is because this block is already part of a spill clique
17235     // and one predecessor had an outgoing live stack slot of type int, and this
17236     // block has an outgoing live stack slot of type native int.
17237     // We need to reset these before traversal because they have already been set
17238     // by the previous walk to determine all the members of the spill clique.
17239     impInlineRoot()->impSpillCliquePredMembers.Reset();
17240     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17241
17242     ReimportSpillClique callback(this);
17243
17244     impWalkSpillCliqueFromPred(block, &callback);
17245 }
17246
17247 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17248 // a copy of "srcState", cloning tree pointers as required.
17249 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17250 {
17251     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17252     {
17253         block->bbEntryState = nullptr;
17254         return;
17255     }
17256
17257     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17258
17259     // block->bbEntryState.esRefcount = 1;
17260
17261     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17262     block->bbEntryState->thisInitialized = TIS_Bottom;
17263
17264     if (srcState->esStackDepth > 0)
17265     {
17266         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17267         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17268
17269         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17270         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17271         {
17272             GenTree* tree                           = srcState->esStack[level].val;
17273             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17274         }
17275     }
17276
17277     if (verTrackObjCtorInitState)
17278     {
17279         verSetThisInit(block, srcState->thisInitialized);
17280     }
17281
17282     return;
17283 }
17284
17285 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17286 {
17287     assert(tis != TIS_Bottom); // Precondition.
17288     if (block->bbEntryState == nullptr)
17289     {
17290         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17291     }
17292
17293     block->bbEntryState->thisInitialized = tis;
17294 }
17295
17296 /*
17297  * Resets the current state to the state at the start of the basic block
17298  */
17299 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17300 {
17301
17302     if (block->bbEntryState == nullptr)
17303     {
17304         destState->esStackDepth    = 0;
17305         destState->thisInitialized = TIS_Bottom;
17306         return;
17307     }
17308
17309     destState->esStackDepth = block->bbEntryState->esStackDepth;
17310
17311     if (destState->esStackDepth > 0)
17312     {
17313         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17314
17315         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17316     }
17317
17318     destState->thisInitialized = block->bbThisOnEntry();
17319
17320     return;
17321 }
17322
17323 ThisInitState BasicBlock::bbThisOnEntry()
17324 {
17325     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17326 }
17327
17328 unsigned BasicBlock::bbStackDepthOnEntry()
17329 {
17330     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17331 }
17332
17333 void BasicBlock::bbSetStack(void* stackBuffer)
17334 {
17335     assert(bbEntryState);
17336     assert(stackBuffer);
17337     bbEntryState->esStack = (StackEntry*)stackBuffer;
17338 }
17339
17340 StackEntry* BasicBlock::bbStackOnEntry()
17341 {
17342     assert(bbEntryState);
17343     return bbEntryState->esStack;
17344 }
17345
17346 void Compiler::verInitCurrentState()
17347 {
17348     verTrackObjCtorInitState        = FALSE;
17349     verCurrentState.thisInitialized = TIS_Bottom;
17350
17351     if (tiVerificationNeeded)
17352     {
17353         // Track this ptr initialization
17354         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17355         {
17356             verTrackObjCtorInitState        = TRUE;
17357             verCurrentState.thisInitialized = TIS_Uninit;
17358         }
17359     }
17360
17361     // initialize stack info
17362
17363     verCurrentState.esStackDepth = 0;
17364     assert(verCurrentState.esStack != nullptr);
17365
17366     // copy current state to entry state of first BB
17367     verInitBBEntryState(fgFirstBB, &verCurrentState);
17368 }
17369
17370 Compiler* Compiler::impInlineRoot()
17371 {
17372     if (impInlineInfo == nullptr)
17373     {
17374         return this;
17375     }
17376     else
17377     {
17378         return impInlineInfo->InlineRoot;
17379     }
17380 }
17381
17382 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17383 {
17384     if (predOrSucc == SpillCliquePred)
17385     {
17386         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17387     }
17388     else
17389     {
17390         assert(predOrSucc == SpillCliqueSucc);
17391         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17392     }
17393 }
17394
17395 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17396 {
17397     if (predOrSucc == SpillCliquePred)
17398     {
17399         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17400     }
17401     else
17402     {
17403         assert(predOrSucc == SpillCliqueSucc);
17404         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17405     }
17406 }
17407
17408 /*****************************************************************************
17409  *
17410  *  Convert the instrs ("import") into our internal format (trees). The
17411  *  basic flowgraph has already been constructed and is passed in.
17412  */
17413
17414 void Compiler::impImport(BasicBlock* method)
17415 {
17416 #ifdef DEBUG
17417     if (verbose)
17418     {
17419         printf("*************** In impImport() for %s\n", info.compFullName);
17420     }
17421 #endif
17422
17423     /* Allocate the stack contents */
17424
17425     if (info.compMaxStack <= _countof(impSmallStack))
17426     {
17427         /* Use local variable, don't waste time allocating on the heap */
17428
17429         impStkSize              = _countof(impSmallStack);
17430         verCurrentState.esStack = impSmallStack;
17431     }
17432     else
17433     {
17434         impStkSize              = info.compMaxStack;
17435         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17436     }
17437
17438     // initialize the entry state at start of method
17439     verInitCurrentState();
17440
17441     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17442     Compiler* inlineRoot = impInlineRoot();
17443     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17444     {
17445         // We have initialized these previously, but to size 0.  Make them larger.
17446         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17447         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17448         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17449     }
17450     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17451     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17452     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17453     impBlockListNodeFreeList = nullptr;
17454
17455 #ifdef DEBUG
17456     impLastILoffsStmt   = nullptr;
17457     impNestedStackSpill = false;
17458 #endif
17459     impBoxTemp = BAD_VAR_NUM;
17460
17461     impPendingList = impPendingFree = nullptr;
17462
17463     /* Add the entry-point to the worker-list */
17464
17465     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17466     // from EH normalization.
17467     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17468     // out.
17469     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17470     {
17471         // Treat these as imported.
17472         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17473         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17474         method->bbFlags |= BBF_IMPORTED;
17475     }
17476
17477     impImportBlockPending(method);
17478
17479     /* Import blocks in the worker-list until there are no more */
17480
17481     while (impPendingList)
17482     {
17483         /* Remove the entry at the front of the list */
17484
17485         PendingDsc* dsc = impPendingList;
17486         impPendingList  = impPendingList->pdNext;
17487         impSetPendingBlockMember(dsc->pdBB, 0);
17488
17489         /* Restore the stack state */
17490
17491         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17492         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17493         if (verCurrentState.esStackDepth)
17494         {
17495             impRestoreStackState(&dsc->pdSavedStack);
17496         }
17497
17498         /* Add the entry to the free list for reuse */
17499
17500         dsc->pdNext    = impPendingFree;
17501         impPendingFree = dsc;
17502
17503         /* Now import the block */
17504
17505         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17506         {
17507
17508 #ifdef _TARGET_64BIT_
17509             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17510             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17511             // method for further explanation on why we raise this exception instead of making the jitted
17512             // code throw the verification exception during execution.
17513             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17514             {
17515                 BADCODE("Basic block marked as not verifiable");
17516             }
17517             else
17518 #endif // _TARGET_64BIT_
17519             {
17520                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17521                 impEndTreeList(dsc->pdBB);
17522             }
17523         }
17524         else
17525         {
17526             impImportBlock(dsc->pdBB);
17527
17528             if (compDonotInline())
17529             {
17530                 return;
17531             }
17532             if (compIsForImportOnly() && !tiVerificationNeeded)
17533             {
17534                 return;
17535             }
17536         }
17537     }
17538
17539 #ifdef DEBUG
17540     if (verbose && info.compXcptnsCount)
17541     {
17542         printf("\nAfter impImport() added block for try,catch,finally");
17543         fgDispBasicBlocks();
17544         printf("\n");
17545     }
17546
17547     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17548     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17549     {
17550         block->bbFlags &= ~BBF_VISITED;
17551     }
17552 #endif
17553
17554     assert(!compIsForInlining() || !tiVerificationNeeded);
17555 }
17556
17557 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17558 // The invariant here is that if it's not a ref or a method and has a class handle
17559 // it's a valuetype
17560 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17561 {
17562     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17563     {
17564         return true;
17565     }
17566     else
17567     {
17568         return false;
17569     }
17570 }
17571
17572 /*****************************************************************************
17573  *  Check to see if the tree is the address of a local or
17574     the address of a field in a local.
17575
17576     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17577
17578  */
17579
17580 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17581 {
17582     if (tree->gtOper != GT_ADDR)
17583     {
17584         return FALSE;
17585     }
17586
17587     GenTree* op = tree->gtOp.gtOp1;
17588     while (op->gtOper == GT_FIELD)
17589     {
17590         op = op->gtField.gtFldObj;
17591         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17592         {
17593             op = op->gtOp.gtOp1;
17594         }
17595         else
17596         {
17597             return false;
17598         }
17599     }
17600
17601     if (op->gtOper == GT_LCL_VAR)
17602     {
17603         *lclVarTreeOut = op;
17604         return TRUE;
17605     }
17606     else
17607     {
17608         return FALSE;
17609     }
17610 }
17611
17612 //------------------------------------------------------------------------
17613 // impMakeDiscretionaryInlineObservations: make observations that help
17614 // determine the profitability of a discretionary inline
17615 //
17616 // Arguments:
17617 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17618 //    inlineResult -- InlineResult accumulating information about this inline
17619 //
17620 // Notes:
17621 //    If inlining or prejitting the root, this method also makes
17622 //    various observations about the method that factor into inline
17623 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17624
17625 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17626 {
17627     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17628            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17629            );
17630
17631     // If we're really inlining, we should just have one result in play.
17632     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17633
17634     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17635     // to the trouble of estimating the native code size. Even if it did, it
17636     // shouldn't be relying on the result of this method.
17637     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17638
17639     // Note if the caller contains NEWOBJ or NEWARR.
17640     Compiler* rootCompiler = impInlineRoot();
17641
17642     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17643     {
17644         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17645     }
17646
17647     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17648     {
17649         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17650     }
17651
17652     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17653     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17654
17655     if (isSpecialMethod)
17656     {
17657         if (calleeIsStatic)
17658         {
17659             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17660         }
17661         else
17662         {
17663             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17664         }
17665     }
17666     else if (!calleeIsStatic)
17667     {
17668         // Callee is an instance method.
17669         //
17670         // Check if the callee has the same 'this' as the root.
17671         if (pInlineInfo != nullptr)
17672         {
17673             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17674             assert(thisArg);
17675             bool isSameThis = impIsThis(thisArg);
17676             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17677         }
17678     }
17679
17680     // Note if the callee's class is a promotable struct
17681     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17682     {
17683         lvaStructPromotionInfo structPromotionInfo;
17684         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17685         if (structPromotionInfo.canPromote)
17686         {
17687             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17688         }
17689     }
17690
17691 #ifdef FEATURE_SIMD
17692
17693     // Note if this method is has SIMD args or return value
17694     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17695     {
17696         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17697     }
17698
17699 #endif // FEATURE_SIMD
17700
17701     // Roughly classify callsite frequency.
17702     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17703
17704     // If this is a prejit root, or a maximally hot block...
17705     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17706     {
17707         frequency = InlineCallsiteFrequency::HOT;
17708     }
17709     // No training data.  Look for loop-like things.
17710     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17711     // However, give it to things nearby.
17712     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17713              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17714     {
17715         frequency = InlineCallsiteFrequency::LOOP;
17716     }
17717     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17718     {
17719         frequency = InlineCallsiteFrequency::WARM;
17720     }
17721     // Now modify the multiplier based on where we're called from.
17722     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17723     {
17724         frequency = InlineCallsiteFrequency::RARE;
17725     }
17726     else
17727     {
17728         frequency = InlineCallsiteFrequency::BORING;
17729     }
17730
17731     // Also capture the block weight of the call site.  In the prejit
17732     // root case, assume there's some hot call site for this method.
17733     unsigned weight = 0;
17734
17735     if (pInlineInfo != nullptr)
17736     {
17737         weight = pInlineInfo->iciBlock->bbWeight;
17738     }
17739     else
17740     {
17741         weight = BB_MAX_WEIGHT;
17742     }
17743
17744     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17745     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17746 }
17747
17748 /*****************************************************************************
17749  This method makes STATIC inlining decision based on the IL code.
17750  It should not make any inlining decision based on the context.
17751  If forceInline is true, then the inlining decision should not depend on
17752  performance heuristics (code size, etc.).
17753  */
17754
17755 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17756                               CORINFO_METHOD_INFO*  methInfo,
17757                               bool                  forceInline,
17758                               InlineResult*         inlineResult)
17759 {
17760     unsigned codeSize = methInfo->ILCodeSize;
17761
17762     // We shouldn't have made up our minds yet...
17763     assert(!inlineResult->IsDecided());
17764
17765     if (methInfo->EHcount)
17766     {
17767         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17768         return;
17769     }
17770
17771     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17772     {
17773         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17774         return;
17775     }
17776
17777     // For now we don't inline varargs (import code can't handle it)
17778
17779     if (methInfo->args.isVarArg())
17780     {
17781         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17782         return;
17783     }
17784
17785     // Reject if it has too many locals.
17786     // This is currently an implementation limit due to fixed-size arrays in the
17787     // inline info, rather than a performance heuristic.
17788
17789     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17790
17791     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17792     {
17793         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17794         return;
17795     }
17796
17797     // Make sure there aren't too many arguments.
17798     // This is currently an implementation limit due to fixed-size arrays in the
17799     // inline info, rather than a performance heuristic.
17800
17801     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17802
17803     if (methInfo->args.numArgs > MAX_INL_ARGS)
17804     {
17805         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17806         return;
17807     }
17808
17809     // Note force inline state
17810
17811     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17812
17813     // Note IL code size
17814
17815     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17816
17817     if (inlineResult->IsFailure())
17818     {
17819         return;
17820     }
17821
17822     // Make sure maxstack is not too big
17823
17824     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17825
17826     if (inlineResult->IsFailure())
17827     {
17828         return;
17829     }
17830 }
17831
17832 /*****************************************************************************
17833  */
17834
17835 void Compiler::impCheckCanInline(GenTree*               call,
17836                                  CORINFO_METHOD_HANDLE  fncHandle,
17837                                  unsigned               methAttr,
17838                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17839                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17840                                  InlineResult*          inlineResult)
17841 {
17842     // Either EE or JIT might throw exceptions below.
17843     // If that happens, just don't inline the method.
17844
17845     struct Param
17846     {
17847         Compiler*              pThis;
17848         GenTree*               call;
17849         CORINFO_METHOD_HANDLE  fncHandle;
17850         unsigned               methAttr;
17851         CORINFO_CONTEXT_HANDLE exactContextHnd;
17852         InlineResult*          result;
17853         InlineCandidateInfo**  ppInlineCandidateInfo;
17854     } param;
17855     memset(&param, 0, sizeof(param));
17856
17857     param.pThis                 = this;
17858     param.call                  = call;
17859     param.fncHandle             = fncHandle;
17860     param.methAttr              = methAttr;
17861     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17862     param.result                = inlineResult;
17863     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17864
17865     bool success = eeRunWithErrorTrap<Param>(
17866         [](Param* pParam) {
17867             DWORD                  dwRestrictions = 0;
17868             CorInfoInitClassResult initClassResult;
17869
17870 #ifdef DEBUG
17871             const char* methodName;
17872             const char* className;
17873             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17874
17875             if (JitConfig.JitNoInline())
17876             {
17877                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17878                 goto _exit;
17879             }
17880 #endif
17881
17882             /* Try to get the code address/size for the method */
17883
17884             CORINFO_METHOD_INFO methInfo;
17885             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17886             {
17887                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17888                 goto _exit;
17889             }
17890
17891             bool forceInline;
17892             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17893
17894             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17895
17896             if (pParam->result->IsFailure())
17897             {
17898                 assert(pParam->result->IsNever());
17899                 goto _exit;
17900             }
17901
17902             // Speculatively check if initClass() can be done.
17903             // If it can be done, we will try to inline the method. If inlining
17904             // succeeds, then we will do the non-speculative initClass() and commit it.
17905             // If this speculative call to initClass() fails, there is no point
17906             // trying to inline this method.
17907             initClassResult =
17908                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17909                                                            pParam->exactContextHnd /* context */,
17910                                                            TRUE /* speculative */);
17911
17912             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17913             {
17914                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17915                 goto _exit;
17916             }
17917
17918             // Given the EE the final say in whether to inline or not.
17919             // This should be last since for verifiable code, this can be expensive
17920
17921             /* VM Inline check also ensures that the method is verifiable if needed */
17922             CorInfoInline vmResult;
17923             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17924                                                                   &dwRestrictions);
17925
17926             if (vmResult == INLINE_FAIL)
17927             {
17928                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17929             }
17930             else if (vmResult == INLINE_NEVER)
17931             {
17932                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17933             }
17934
17935             if (pParam->result->IsFailure())
17936             {
17937                 // Make sure not to report this one.  It was already reported by the VM.
17938                 pParam->result->SetReported();
17939                 goto _exit;
17940             }
17941
17942             // check for unsupported inlining restrictions
17943             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17944
17945             if (dwRestrictions & INLINE_SAME_THIS)
17946             {
17947                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17948                 assert(thisArg);
17949
17950                 if (!pParam->pThis->impIsThis(thisArg))
17951                 {
17952                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17953                     goto _exit;
17954                 }
17955             }
17956
17957             /* Get the method properties */
17958
17959             CORINFO_CLASS_HANDLE clsHandle;
17960             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17961             unsigned clsAttr;
17962             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17963
17964             /* Get the return type */
17965
17966             var_types fncRetType;
17967             fncRetType = pParam->call->TypeGet();
17968
17969 #ifdef DEBUG
17970             var_types fncRealRetType;
17971             fncRealRetType = JITtype2varType(methInfo.args.retType);
17972
17973             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17974                    // <BUGNUM> VSW 288602 </BUGNUM>
17975                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17976                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17977                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17978 #endif
17979
17980             //
17981             // Allocate an InlineCandidateInfo structure
17982             //
17983             InlineCandidateInfo* pInfo;
17984             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17985
17986             pInfo->dwRestrictions  = dwRestrictions;
17987             pInfo->methInfo        = methInfo;
17988             pInfo->methAttr        = pParam->methAttr;
17989             pInfo->clsHandle       = clsHandle;
17990             pInfo->clsAttr         = clsAttr;
17991             pInfo->fncRetType      = fncRetType;
17992             pInfo->exactContextHnd = pParam->exactContextHnd;
17993             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17994             pInfo->initClassResult = initClassResult;
17995
17996             *(pParam->ppInlineCandidateInfo) = pInfo;
17997
17998         _exit:;
17999         },
18000         &param);
18001     if (!success)
18002     {
18003         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18004     }
18005 }
18006
18007 //------------------------------------------------------------------------
18008 // impInlineRecordArgInfo: record information about an inline candidate argument
18009 //
18010 // Arguments:
18011 //   pInlineInfo - inline info for the inline candidate
18012 //   curArgVal - tree for the caller actual argument value
18013 //   argNum - logical index of this argument
18014 //   inlineResult - result of ongoing inline evaluation
18015 //
18016 // Notes:
18017 //
18018 //   Checks for various inline blocking conditions and makes notes in
18019 //   the inline info arg table about the properties of the actual. These
18020 //   properties are used later by impFetchArg to determine how best to
18021 //   pass the argument into the inlinee.
18022
18023 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18024                                       GenTree*      curArgVal,
18025                                       unsigned      argNum,
18026                                       InlineResult* inlineResult)
18027 {
18028     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18029
18030     if (curArgVal->gtOper == GT_MKREFANY)
18031     {
18032         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18033         return;
18034     }
18035
18036     inlCurArgInfo->argNode = curArgVal;
18037
18038     GenTree* lclVarTree;
18039     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18040     {
18041         inlCurArgInfo->argIsByRefToStructLocal = true;
18042 #ifdef FEATURE_SIMD
18043         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18044         {
18045             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18046         }
18047 #endif // FEATURE_SIMD
18048     }
18049
18050     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18051     {
18052         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18053         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18054     }
18055
18056     if (curArgVal->gtOper == GT_LCL_VAR)
18057     {
18058         inlCurArgInfo->argIsLclVar = true;
18059
18060         /* Remember the "original" argument number */
18061         curArgVal->gtLclVar.gtLclILoffs = argNum;
18062     }
18063
18064     if ((curArgVal->OperKind() & GTK_CONST) ||
18065         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18066     {
18067         inlCurArgInfo->argIsInvariant = true;
18068         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18069         {
18070             // Abort inlining at this call site
18071             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18072             return;
18073         }
18074     }
18075
18076     // If the arg is a local that is address-taken, we can't safely
18077     // directly substitute it into the inlinee.
18078     //
18079     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18080     // that has a stronger meaning: that the arg value can change in
18081     // the method body. Using that flag prevents type propagation,
18082     // which is safe in this case.
18083     //
18084     // Instead mark the arg as having a caller local ref.
18085     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18086     {
18087         inlCurArgInfo->argHasCallerLocalRef = true;
18088     }
18089
18090 #ifdef DEBUG
18091     if (verbose)
18092     {
18093         if (inlCurArgInfo->argIsThis)
18094         {
18095             printf("thisArg:");
18096         }
18097         else
18098         {
18099             printf("\nArgument #%u:", argNum);
18100         }
18101         if (inlCurArgInfo->argIsLclVar)
18102         {
18103             printf(" is a local var");
18104         }
18105         if (inlCurArgInfo->argIsInvariant)
18106         {
18107             printf(" is a constant");
18108         }
18109         if (inlCurArgInfo->argHasGlobRef)
18110         {
18111             printf(" has global refs");
18112         }
18113         if (inlCurArgInfo->argHasCallerLocalRef)
18114         {
18115             printf(" has caller local ref");
18116         }
18117         if (inlCurArgInfo->argHasSideEff)
18118         {
18119             printf(" has side effects");
18120         }
18121         if (inlCurArgInfo->argHasLdargaOp)
18122         {
18123             printf(" has ldarga effect");
18124         }
18125         if (inlCurArgInfo->argHasStargOp)
18126         {
18127             printf(" has starg effect");
18128         }
18129         if (inlCurArgInfo->argIsByRefToStructLocal)
18130         {
18131             printf(" is byref to a struct local");
18132         }
18133
18134         printf("\n");
18135         gtDispTree(curArgVal);
18136         printf("\n");
18137     }
18138 #endif
18139 }
18140
18141 //------------------------------------------------------------------------
18142 // impInlineInitVars: setup inline information for inlinee args and locals
18143 //
18144 // Arguments:
18145 //    pInlineInfo - inline info for the inline candidate
18146 //
18147 // Notes:
18148 //    This method primarily adds caller-supplied info to the inlArgInfo
18149 //    and sets up the lclVarInfo table.
18150 //
18151 //    For args, the inlArgInfo records properties of the actual argument
18152 //    including the tree node that produces the arg value. This node is
18153 //    usually the tree node present at the call, but may also differ in
18154 //    various ways:
18155 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18156 //      expr chain for the actual node. Note this will either be the original
18157 //      call (which will be a failed inline by this point), or the return
18158 //      expression from some set of inlines.
18159 //    - when argument type casting is needed the necessary casts are added
18160 //      around the argument node.
18161 //    - if an argment can be simplified by folding then the node here is the
18162 //      folded value.
18163 //
18164 //   The method may make observations that lead to marking this candidate as
18165 //   a failed inline. If this happens the initialization is abandoned immediately
18166 //   to try and reduce the jit time cost for a failed inline.
18167
18168 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18169 {
18170     assert(!compIsForInlining());
18171
18172     GenTree*             call         = pInlineInfo->iciCall;
18173     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18174     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18175     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18176     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18177     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18178
18179     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18180
18181     /* init the argument stuct */
18182
18183     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18184
18185     /* Get hold of the 'this' pointer and the argument list proper */
18186
18187     GenTree* thisArg = call->gtCall.gtCallObjp;
18188     GenTree* argList = call->gtCall.gtCallArgs;
18189     unsigned argCnt  = 0; // Count of the arguments
18190
18191     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18192
18193     if (thisArg)
18194     {
18195         inlArgInfo[0].argIsThis = true;
18196         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18197         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18198
18199         if (inlineResult->IsFailure())
18200         {
18201             return;
18202         }
18203
18204         /* Increment the argument count */
18205         argCnt++;
18206     }
18207
18208     /* Record some information about each of the arguments */
18209     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18210
18211 #if USER_ARGS_COME_LAST
18212     unsigned typeCtxtArg = thisArg ? 1 : 0;
18213 #else  // USER_ARGS_COME_LAST
18214     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18215 #endif // USER_ARGS_COME_LAST
18216
18217     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18218     {
18219         if (argTmp == argList && hasRetBuffArg)
18220         {
18221             continue;
18222         }
18223
18224         // Ignore the type context argument
18225         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18226         {
18227             pInlineInfo->typeContextArg = typeCtxtArg;
18228             typeCtxtArg                 = 0xFFFFFFFF;
18229             continue;
18230         }
18231
18232         assert(argTmp->gtOper == GT_LIST);
18233         GenTree* arg       = argTmp->gtOp.gtOp1;
18234         GenTree* actualArg = arg->gtRetExprVal();
18235         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18236
18237         if (inlineResult->IsFailure())
18238         {
18239             return;
18240         }
18241
18242         /* Increment the argument count */
18243         argCnt++;
18244     }
18245
18246     /* Make sure we got the arg number right */
18247     assert(argCnt == methInfo->args.totalILArgs());
18248
18249 #ifdef FEATURE_SIMD
18250     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18251 #endif // FEATURE_SIMD
18252
18253     /* We have typeless opcodes, get type information from the signature */
18254
18255     if (thisArg)
18256     {
18257         var_types sigType;
18258
18259         if (clsAttr & CORINFO_FLG_VALUECLASS)
18260         {
18261             sigType = TYP_BYREF;
18262         }
18263         else
18264         {
18265             sigType = TYP_REF;
18266         }
18267
18268         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18269         lclVarInfo[0].lclHasLdlocaOp = false;
18270
18271 #ifdef FEATURE_SIMD
18272         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18273         // the inlining multiplier) for anything in that assembly.
18274         // But we only need to normalize it if it is a TYP_STRUCT
18275         // (which we need to do even if we have already set foundSIMDType).
18276         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18277         {
18278             if (sigType == TYP_STRUCT)
18279             {
18280                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18281             }
18282             foundSIMDType = true;
18283         }
18284 #endif // FEATURE_SIMD
18285         lclVarInfo[0].lclTypeInfo = sigType;
18286
18287         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18288                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18289                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18290
18291         if (genActualType(thisArg->gtType) != genActualType(sigType))
18292         {
18293             if (sigType == TYP_REF)
18294             {
18295                 /* The argument cannot be bashed into a ref (see bug 750871) */
18296                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18297                 return;
18298             }
18299
18300             /* This can only happen with byrefs <-> ints/shorts */
18301
18302             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18303             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18304
18305             if (sigType == TYP_BYREF)
18306             {
18307                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18308             }
18309             else if (thisArg->gtType == TYP_BYREF)
18310             {
18311                 assert(sigType == TYP_I_IMPL);
18312
18313                 /* If possible change the BYREF to an int */
18314                 if (thisArg->IsVarAddr())
18315                 {
18316                     thisArg->gtType              = TYP_I_IMPL;
18317                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18318                 }
18319                 else
18320                 {
18321                     /* Arguments 'int <- byref' cannot be bashed */
18322                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18323                     return;
18324                 }
18325             }
18326         }
18327     }
18328
18329     /* Init the types of the arguments and make sure the types
18330      * from the trees match the types in the signature */
18331
18332     CORINFO_ARG_LIST_HANDLE argLst;
18333     argLst = methInfo->args.args;
18334
18335     unsigned i;
18336     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18337     {
18338         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18339
18340         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18341
18342 #ifdef FEATURE_SIMD
18343         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18344         {
18345             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18346             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18347             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18348             foundSIMDType = true;
18349             if (sigType == TYP_STRUCT)
18350             {
18351                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18352                 sigType              = structType;
18353             }
18354         }
18355 #endif // FEATURE_SIMD
18356
18357         lclVarInfo[i].lclTypeInfo    = sigType;
18358         lclVarInfo[i].lclHasLdlocaOp = false;
18359
18360         /* Does the tree type match the signature type? */
18361
18362         GenTree* inlArgNode = inlArgInfo[i].argNode;
18363
18364         if (sigType != inlArgNode->gtType)
18365         {
18366             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18367                but in bad IL cases with caller-callee signature mismatches we can see other types.
18368                Intentionally reject cases with mismatches so the jit is more flexible when
18369                encountering bad IL. */
18370
18371             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18372                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18373                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18374
18375             if (!isPlausibleTypeMatch)
18376             {
18377                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18378                 return;
18379             }
18380
18381             /* Is it a narrowing or widening cast?
18382              * Widening casts are ok since the value computed is already
18383              * normalized to an int (on the IL stack) */
18384
18385             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18386             {
18387                 if (sigType == TYP_BYREF)
18388                 {
18389                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18390                 }
18391                 else if (inlArgNode->gtType == TYP_BYREF)
18392                 {
18393                     assert(varTypeIsIntOrI(sigType));
18394
18395                     /* If possible bash the BYREF to an int */
18396                     if (inlArgNode->IsVarAddr())
18397                     {
18398                         inlArgNode->gtType           = TYP_I_IMPL;
18399                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18400                     }
18401                     else
18402                     {
18403                         /* Arguments 'int <- byref' cannot be changed */
18404                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18405                         return;
18406                     }
18407                 }
18408                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18409                 {
18410                     /* Narrowing cast */
18411
18412                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18413                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18414                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18415                     {
18416                         /* We don't need to insert a cast here as the variable
18417                            was assigned a normalized value of the right type */
18418
18419                         continue;
18420                     }
18421
18422                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18423
18424                     inlArgInfo[i].argIsLclVar = false;
18425
18426                     /* Try to fold the node in case we have constant arguments */
18427
18428                     if (inlArgInfo[i].argIsInvariant)
18429                     {
18430                         inlArgNode            = gtFoldExprConst(inlArgNode);
18431                         inlArgInfo[i].argNode = inlArgNode;
18432                         assert(inlArgNode->OperIsConst());
18433                     }
18434                 }
18435 #ifdef _TARGET_64BIT_
18436                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18437                 {
18438                     // This should only happen for int -> native int widening
18439                     inlArgNode = inlArgInfo[i].argNode =
18440                         gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18441
18442                     inlArgInfo[i].argIsLclVar = false;
18443
18444                     /* Try to fold the node in case we have constant arguments */
18445
18446                     if (inlArgInfo[i].argIsInvariant)
18447                     {
18448                         inlArgNode            = gtFoldExprConst(inlArgNode);
18449                         inlArgInfo[i].argNode = inlArgNode;
18450                         assert(inlArgNode->OperIsConst());
18451                     }
18452                 }
18453 #endif // _TARGET_64BIT_
18454             }
18455         }
18456     }
18457
18458     /* Init the types of the local variables */
18459
18460     CORINFO_ARG_LIST_HANDLE localsSig;
18461     localsSig = methInfo->locals.args;
18462
18463     for (i = 0; i < methInfo->locals.numArgs; i++)
18464     {
18465         bool      isPinned;
18466         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18467
18468         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18469         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18470         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18471
18472         if (varTypeIsGC(type))
18473         {
18474             pInlineInfo->numberOfGcRefLocals++;
18475         }
18476
18477         if (isPinned)
18478         {
18479             // Pinned locals may cause inlines to fail.
18480             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18481             if (inlineResult->IsFailure())
18482             {
18483                 return;
18484             }
18485         }
18486
18487         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18488
18489         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18490         // out on the inline.
18491         if (type == TYP_STRUCT)
18492         {
18493             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18494             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18495             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18496             {
18497                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18498                 if (inlineResult->IsFailure())
18499                 {
18500                     return;
18501                 }
18502
18503                 // Do further notification in the case where the call site is rare; some policies do
18504                 // not track the relative hotness of call sites for "always" inline cases.
18505                 if (pInlineInfo->iciBlock->isRunRarely())
18506                 {
18507                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18508                     if (inlineResult->IsFailure())
18509                     {
18510
18511                         return;
18512                     }
18513                 }
18514             }
18515         }
18516
18517         localsSig = info.compCompHnd->getArgNext(localsSig);
18518
18519 #ifdef FEATURE_SIMD
18520         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18521         {
18522             foundSIMDType = true;
18523             if (featureSIMD && type == TYP_STRUCT)
18524             {
18525                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18526                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18527             }
18528         }
18529 #endif // FEATURE_SIMD
18530     }
18531
18532 #ifdef FEATURE_SIMD
18533     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
18534     {
18535         foundSIMDType = true;
18536     }
18537     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18538 #endif // FEATURE_SIMD
18539 }
18540
18541 //------------------------------------------------------------------------
18542 // impInlineFetchLocal: get a local var that represents an inlinee local
18543 //
18544 // Arguments:
18545 //    lclNum -- number of the inlinee local
18546 //    reason -- debug string describing purpose of the local var
18547 //
18548 // Returns:
18549 //    Number of the local to use
18550 //
18551 // Notes:
18552 //    This method is invoked only for locals actually used in the
18553 //    inlinee body.
18554 //
18555 //    Allocates a new temp if necessary, and copies key properties
18556 //    over from the inlinee local var info.
18557
18558 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18559 {
18560     assert(compIsForInlining());
18561
18562     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18563
18564     if (tmpNum == BAD_VAR_NUM)
18565     {
18566         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18567         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18568
18569         // The lifetime of this local might span multiple BBs.
18570         // So it is a long lifetime local.
18571         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18572
18573         // Copy over key info
18574         lvaTable[tmpNum].lvType                 = lclTyp;
18575         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18576         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18577         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18578         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18579
18580         // Copy over class handle for ref types. Note this may be a
18581         // shared type -- someday perhaps we can get the exact
18582         // signature and pass in a more precise type.
18583         if (lclTyp == TYP_REF)
18584         {
18585             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18586         }
18587
18588         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18589         {
18590             if (varTypeIsStruct(lclTyp))
18591             {
18592                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18593             }
18594             else
18595             {
18596                 // This is a wrapped primitive.  Make sure the verstate knows that
18597                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18598             }
18599         }
18600
18601 #ifdef DEBUG
18602         // Sanity check that we're properly prepared for gc ref locals.
18603         if (varTypeIsGC(lclTyp))
18604         {
18605             // Since there are gc locals we should have seen them earlier
18606             // and if there was a return value, set up the spill temp.
18607             assert(impInlineInfo->HasGcRefLocals());
18608             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18609         }
18610         else
18611         {
18612             // Make sure all pinned locals count as gc refs.
18613             assert(!inlineeLocal.lclIsPinned);
18614         }
18615 #endif // DEBUG
18616     }
18617
18618     return tmpNum;
18619 }
18620
18621 //------------------------------------------------------------------------
18622 // impInlineFetchArg: return tree node for argument value in an inlinee
18623 //
18624 // Arguments:
18625 //    lclNum -- argument number in inlinee IL
18626 //    inlArgInfo -- argument info for inlinee
18627 //    lclVarInfo -- var info for inlinee
18628 //
18629 // Returns:
18630 //    Tree for the argument's value. Often an inlinee-scoped temp
18631 //    GT_LCL_VAR but can be other tree kinds, if the argument
18632 //    expression from the caller can be directly substituted into the
18633 //    inlinee body.
18634 //
18635 // Notes:
18636 //    Must be used only for arguments -- use impInlineFetchLocal for
18637 //    inlinee locals.
18638 //
18639 //    Direct substitution is performed when the formal argument cannot
18640 //    change value in the inlinee body (no starg or ldarga), and the
18641 //    actual argument expression's value cannot be changed if it is
18642 //    substituted it into the inlinee body.
18643 //
18644 //    Even if an inlinee-scoped temp is returned here, it may later be
18645 //    "bashed" to a caller-supplied tree when arguments are actually
18646 //    passed (see fgInlinePrependStatements). Bashing can happen if
18647 //    the argument ends up being single use and other conditions are
18648 //    met. So the contents of the tree returned here may not end up
18649 //    being the ones ultimately used for the argument.
18650 //
18651 //    This method will side effect inlArgInfo. It should only be called
18652 //    for actual uses of the argument in the inlinee.
18653
18654 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18655 {
18656     // Cache the relevant arg and lcl info for this argument.
18657     // We will modify argInfo but not lclVarInfo.
18658     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18659     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18660     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18661     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18662     GenTree*             op1              = nullptr;
18663
18664     if (argInfo.argIsInvariant && !argCanBeModified)
18665     {
18666         // Directly substitute constants or addresses of locals
18667         //
18668         // Clone the constant. Note that we cannot directly use
18669         // argNode in the trees even if !argInfo.argIsUsed as this
18670         // would introduce aliasing between inlArgInfo[].argNode and
18671         // impInlineExpr. Then gtFoldExpr() could change it, causing
18672         // further references to the argument working off of the
18673         // bashed copy.
18674         op1 = gtCloneExpr(argInfo.argNode);
18675         PREFIX_ASSUME(op1 != nullptr);
18676         argInfo.argTmpNum = BAD_VAR_NUM;
18677
18678         // We may need to retype to ensure we match the callee's view of the type.
18679         // Otherwise callee-pass throughs of arguments can create return type
18680         // mismatches that block inlining.
18681         //
18682         // Note argument type mismatches that prevent inlining should
18683         // have been caught in impInlineInitVars.
18684         if (op1->TypeGet() != lclTyp)
18685         {
18686             op1->gtType = genActualType(lclTyp);
18687         }
18688     }
18689     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18690     {
18691         // Directly substitute unaliased caller locals for args that cannot be modified
18692         //
18693         // Use the caller-supplied node if this is the first use.
18694         op1               = argInfo.argNode;
18695         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18696
18697         // Use an equivalent copy if this is the second or subsequent
18698         // use, or if we need to retype.
18699         //
18700         // Note argument type mismatches that prevent inlining should
18701         // have been caught in impInlineInitVars.
18702         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18703         {
18704             assert(op1->gtOper == GT_LCL_VAR);
18705             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18706
18707             var_types newTyp = lclTyp;
18708
18709             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18710             {
18711                 newTyp = genActualType(lclTyp);
18712             }
18713
18714             // Create a new lcl var node - remember the argument lclNum
18715             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18716         }
18717     }
18718     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18719     {
18720         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18721            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18722            This way we will increase the chance for this byref to be optimized away by
18723            a subsequent "dereference" operation.
18724
18725            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18726            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18727            For example, if the caller is:
18728                 ldloca.s   V_1  // V_1 is a local struct
18729                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18730            and the callee being inlined has:
18731                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18732                     ldarga.s   ptrToInts
18733                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18734            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18735            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18736         */
18737         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18738         op1 = gtCloneExpr(argInfo.argNode);
18739     }
18740     else
18741     {
18742         /* Argument is a complex expression - it must be evaluated into a temp */
18743
18744         if (argInfo.argHasTmp)
18745         {
18746             assert(argInfo.argIsUsed);
18747             assert(argInfo.argTmpNum < lvaCount);
18748
18749             /* Create a new lcl var node - remember the argument lclNum */
18750             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18751
18752             /* This is the second or later use of the this argument,
18753             so we have to use the temp (instead of the actual arg) */
18754             argInfo.argBashTmpNode = nullptr;
18755         }
18756         else
18757         {
18758             /* First time use */
18759             assert(!argInfo.argIsUsed);
18760
18761             /* Reserve a temp for the expression.
18762             * Use a large size node as we may change it later */
18763
18764             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18765
18766             lvaTable[tmpNum].lvType = lclTyp;
18767
18768             // For ref types, determine the type of the temp.
18769             if (lclTyp == TYP_REF)
18770             {
18771                 if (!argCanBeModified)
18772                 {
18773                     // If the arg can't be modified in the method
18774                     // body, use the type of the value, if
18775                     // known. Otherwise, use the declared type.
18776                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18777                 }
18778                 else
18779                 {
18780                     // Arg might be modified, use the declared type of
18781                     // the argument.
18782                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18783                 }
18784             }
18785
18786             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18787             if (argInfo.argHasLdargaOp)
18788             {
18789                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18790             }
18791
18792             if (lclInfo.lclVerTypeInfo.IsStruct())
18793             {
18794                 if (varTypeIsStruct(lclTyp))
18795                 {
18796                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18797                 }
18798                 else
18799                 {
18800                     // This is a wrapped primitive.  Make sure the verstate knows that
18801                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18802                 }
18803             }
18804
18805             argInfo.argHasTmp = true;
18806             argInfo.argTmpNum = tmpNum;
18807
18808             // If we require strict exception order, then arguments must
18809             // be evaluated in sequence before the body of the inlined method.
18810             // So we need to evaluate them to a temp.
18811             // Also, if arguments have global or local references, we need to
18812             // evaluate them to a temp before the inlined body as the
18813             // inlined body may be modifying the global ref.
18814             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18815             // if it is a struct, because it requires some additional handling.
18816
18817             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18818                 !argInfo.argHasCallerLocalRef)
18819             {
18820                 /* Get a *LARGE* LCL_VAR node */
18821                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18822
18823                 /* Record op1 as the very first use of this argument.
18824                 If there are no further uses of the arg, we may be
18825                 able to use the actual arg node instead of the temp.
18826                 If we do see any further uses, we will clear this. */
18827                 argInfo.argBashTmpNode = op1;
18828             }
18829             else
18830             {
18831                 /* Get a small LCL_VAR node */
18832                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18833                 /* No bashing of this argument */
18834                 argInfo.argBashTmpNode = nullptr;
18835             }
18836         }
18837     }
18838
18839     // Mark this argument as used.
18840     argInfo.argIsUsed = true;
18841
18842     return op1;
18843 }
18844
18845 /******************************************************************************
18846  Is this the original "this" argument to the call being inlined?
18847
18848  Note that we do not inline methods with "starg 0", and so we do not need to
18849  worry about it.
18850 */
18851
18852 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18853 {
18854     assert(compIsForInlining());
18855     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18856 }
18857
18858 //-----------------------------------------------------------------------------
18859 // This function checks if a dereference in the inlinee can guarantee that
18860 // the "this" is non-NULL.
18861 // If we haven't hit a branch or a side effect, and we are dereferencing
18862 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18863 // then we can avoid a separate null pointer check.
18864 //
18865 // "additionalTreesToBeEvaluatedBefore"
18866 // is the set of pending trees that have not yet been added to the statement list,
18867 // and which have been removed from verCurrentState.esStack[]
18868
18869 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
18870                                                                   GenTree*    variableBeingDereferenced,
18871                                                                   InlArgInfo* inlArgInfo)
18872 {
18873     assert(compIsForInlining());
18874     assert(opts.OptEnabled(CLFLG_INLINING));
18875
18876     BasicBlock* block = compCurBB;
18877
18878     GenTree* stmt;
18879     GenTree* expr;
18880
18881     if (block != fgFirstBB)
18882     {
18883         return FALSE;
18884     }
18885
18886     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18887     {
18888         return FALSE;
18889     }
18890
18891     if (additionalTreesToBeEvaluatedBefore &&
18892         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18893     {
18894         return FALSE;
18895     }
18896
18897     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18898     {
18899         expr = stmt->gtStmt.gtStmtExpr;
18900
18901         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18902         {
18903             return FALSE;
18904         }
18905     }
18906
18907     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18908     {
18909         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18910         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18911         {
18912             return FALSE;
18913         }
18914     }
18915
18916     return TRUE;
18917 }
18918
18919 //------------------------------------------------------------------------
18920 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18921 //
18922 // Arguments:
18923 //    callNode -- call under scrutiny
18924 //    exactContextHnd -- context handle for inlining
18925 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18926 //    callInfo -- call info from VM
18927 //
18928 // Notes:
18929 //    If callNode is an inline candidate, this method sets the flag
18930 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18931 //    filled in the associated InlineCandidateInfo.
18932 //
18933 //    If callNode is not an inline candidate, and the reason is
18934 //    something that is inherent to the method being called, the
18935 //    method may be marked as "noinline" to short-circuit any
18936 //    future assessments of calls to this method.
18937
18938 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
18939                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18940                                       bool                   exactContextNeedsRuntimeLookup,
18941                                       CORINFO_CALL_INFO*     callInfo)
18942 {
18943     // Let the strategy know there's another call
18944     impInlineRoot()->m_inlineStrategy->NoteCall();
18945
18946     if (!opts.OptEnabled(CLFLG_INLINING))
18947     {
18948         /* XXX Mon 8/18/2008
18949          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18950          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18951          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18952          * figure out why we did not set MAXOPT for this compile.
18953          */
18954         assert(!compIsForInlining());
18955         return;
18956     }
18957
18958     if (compIsForImportOnly())
18959     {
18960         // Don't bother creating the inline candidate during verification.
18961         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18962         // that leads to the creation of multiple instances of Compiler.
18963         return;
18964     }
18965
18966     GenTreeCall* call = callNode->AsCall();
18967     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18968
18969     // Don't inline if not optimizing root method
18970     if (opts.compDbgCode)
18971     {
18972         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18973         return;
18974     }
18975
18976     // Don't inline if inlining into root method is disabled.
18977     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18978     {
18979         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18980         return;
18981     }
18982
18983     // Inlining candidate determination needs to honor only IL tail prefix.
18984     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18985     if (call->IsTailPrefixedCall())
18986     {
18987         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18988         return;
18989     }
18990
18991     // Tail recursion elimination takes precedence over inlining.
18992     // TODO: We may want to do some of the additional checks from fgMorphCall
18993     // here to reduce the chance we don't inline a call that won't be optimized
18994     // as a fast tail call or turned into a loop.
18995     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18996     {
18997         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18998         return;
18999     }
19000
19001     if (call->IsVirtual())
19002     {
19003         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19004         return;
19005     }
19006
19007     /* Ignore helper calls */
19008
19009     if (call->gtCallType == CT_HELPER)
19010     {
19011         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19012         return;
19013     }
19014
19015     /* Ignore indirect calls */
19016     if (call->gtCallType == CT_INDIRECT)
19017     {
19018         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19019         return;
19020     }
19021
19022     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19023      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19024      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19025
19026     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19027     unsigned              methAttr;
19028
19029     // Reuse method flags from the original callInfo if possible
19030     if (fncHandle == callInfo->hMethod)
19031     {
19032         methAttr = callInfo->methodFlags;
19033     }
19034     else
19035     {
19036         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19037     }
19038
19039 #ifdef DEBUG
19040     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19041     {
19042         methAttr |= CORINFO_FLG_FORCEINLINE;
19043     }
19044 #endif
19045
19046     // Check for COMPlus_AggressiveInlining
19047     if (compDoAggressiveInlining)
19048     {
19049         methAttr |= CORINFO_FLG_FORCEINLINE;
19050     }
19051
19052     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19053     {
19054         /* Don't bother inline blocks that are in the filter region */
19055         if (bbInCatchHandlerILRange(compCurBB))
19056         {
19057 #ifdef DEBUG
19058             if (verbose)
19059             {
19060                 printf("\nWill not inline blocks that are in the catch handler region\n");
19061             }
19062
19063 #endif
19064
19065             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19066             return;
19067         }
19068
19069         if (bbInFilterILRange(compCurBB))
19070         {
19071 #ifdef DEBUG
19072             if (verbose)
19073             {
19074                 printf("\nWill not inline blocks that are in the filter region\n");
19075             }
19076 #endif
19077
19078             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19079             return;
19080         }
19081     }
19082
19083     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19084
19085     if (opts.compNeedSecurityCheck)
19086     {
19087         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19088         return;
19089     }
19090
19091     /* Check if we tried to inline this method before */
19092
19093     if (methAttr & CORINFO_FLG_DONT_INLINE)
19094     {
19095         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19096         return;
19097     }
19098
19099     /* Cannot inline synchronized methods */
19100
19101     if (methAttr & CORINFO_FLG_SYNCH)
19102     {
19103         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19104         return;
19105     }
19106
19107     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19108
19109     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19110     {
19111         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19112         return;
19113     }
19114
19115     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19116     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19117
19118     if (inlineResult.IsFailure())
19119     {
19120         return;
19121     }
19122
19123     // The old value should be NULL
19124     assert(call->gtInlineCandidateInfo == nullptr);
19125
19126     // The new value should not be NULL.
19127     assert(inlineCandidateInfo != nullptr);
19128     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19129
19130     call->gtInlineCandidateInfo = inlineCandidateInfo;
19131
19132     // Mark the call node as inline candidate.
19133     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19134
19135     // Let the strategy know there's another candidate.
19136     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19137
19138     // Since we're not actually inlining yet, and this call site is
19139     // still just an inline candidate, there's nothing to report.
19140     inlineResult.SetReported();
19141 }
19142
19143 /******************************************************************************/
19144 // Returns true if the given intrinsic will be implemented by target-specific
19145 // instructions
19146
19147 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19148 {
19149 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19150     switch (intrinsicId)
19151     {
19152         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19153         // instructions to directly compute round/ceiling/floor.
19154         //
19155         // TODO: Because the x86 backend only targets SSE for floating-point code,
19156         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19157         //       implemented those intrinsics as x87 instructions). If this poses
19158         //       a CQ problem, it may be necessary to change the implementation of
19159         //       the helper calls to decrease call overhead or switch back to the
19160         //       x87 instructions. This is tracked by #7097.
19161         case CORINFO_INTRINSIC_Sqrt:
19162         case CORINFO_INTRINSIC_Abs:
19163             return true;
19164
19165         case CORINFO_INTRINSIC_Round:
19166         case CORINFO_INTRINSIC_Ceiling:
19167         case CORINFO_INTRINSIC_Floor:
19168             return compSupports(InstructionSet_SSE41);
19169
19170         default:
19171             return false;
19172     }
19173 #elif defined(_TARGET_ARM64_)
19174     switch (intrinsicId)
19175     {
19176         case CORINFO_INTRINSIC_Sqrt:
19177         case CORINFO_INTRINSIC_Abs:
19178         case CORINFO_INTRINSIC_Round:
19179         case CORINFO_INTRINSIC_Floor:
19180         case CORINFO_INTRINSIC_Ceiling:
19181             return true;
19182
19183         default:
19184             return false;
19185     }
19186 #elif defined(_TARGET_ARM_)
19187     switch (intrinsicId)
19188     {
19189         case CORINFO_INTRINSIC_Sqrt:
19190         case CORINFO_INTRINSIC_Abs:
19191         case CORINFO_INTRINSIC_Round:
19192             return true;
19193
19194         default:
19195             return false;
19196     }
19197 #elif defined(_TARGET_X86_)
19198     switch (intrinsicId)
19199     {
19200         case CORINFO_INTRINSIC_Sin:
19201         case CORINFO_INTRINSIC_Cos:
19202         case CORINFO_INTRINSIC_Sqrt:
19203         case CORINFO_INTRINSIC_Abs:
19204         case CORINFO_INTRINSIC_Round:
19205             return true;
19206
19207         default:
19208             return false;
19209     }
19210 #else
19211     // TODO: This portion of logic is not implemented for other arch.
19212     // The reason for returning true is that on all other arch the only intrinsic
19213     // enabled are target intrinsics.
19214     return true;
19215 #endif //_TARGET_AMD64_
19216 }
19217
19218 /******************************************************************************/
19219 // Returns true if the given intrinsic will be implemented by calling System.Math
19220 // methods.
19221
19222 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19223 {
19224     // Currently, if an math intrisic is not implemented by target-specific
19225     // intructions, it will be implemented by a System.Math call. In the
19226     // future, if we turn to implementing some of them with helper callers,
19227     // this predicate needs to be revisited.
19228     return !IsTargetIntrinsic(intrinsicId);
19229 }
19230
19231 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19232 {
19233     switch (intrinsicId)
19234     {
19235         case CORINFO_INTRINSIC_Sin:
19236         case CORINFO_INTRINSIC_Cbrt:
19237         case CORINFO_INTRINSIC_Sqrt:
19238         case CORINFO_INTRINSIC_Abs:
19239         case CORINFO_INTRINSIC_Cos:
19240         case CORINFO_INTRINSIC_Round:
19241         case CORINFO_INTRINSIC_Cosh:
19242         case CORINFO_INTRINSIC_Sinh:
19243         case CORINFO_INTRINSIC_Tan:
19244         case CORINFO_INTRINSIC_Tanh:
19245         case CORINFO_INTRINSIC_Asin:
19246         case CORINFO_INTRINSIC_Asinh:
19247         case CORINFO_INTRINSIC_Acos:
19248         case CORINFO_INTRINSIC_Acosh:
19249         case CORINFO_INTRINSIC_Atan:
19250         case CORINFO_INTRINSIC_Atan2:
19251         case CORINFO_INTRINSIC_Atanh:
19252         case CORINFO_INTRINSIC_Log10:
19253         case CORINFO_INTRINSIC_Pow:
19254         case CORINFO_INTRINSIC_Exp:
19255         case CORINFO_INTRINSIC_Ceiling:
19256         case CORINFO_INTRINSIC_Floor:
19257             return true;
19258         default:
19259             return false;
19260     }
19261 }
19262
19263 bool Compiler::IsMathIntrinsic(GenTree* tree)
19264 {
19265     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19266 }
19267
19268 //------------------------------------------------------------------------
19269 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19270 //   normal call
19271 //
19272 // Arguments:
19273 //     call -- the call node to examine/modify
19274 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19275 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19276 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19277 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19278 //
19279 // Notes:
19280 //     Virtual calls in IL will always "invoke" the base class method.
19281 //
19282 //     This transformation looks for evidence that the type of 'this'
19283 //     in the call is exactly known, is a final class or would invoke
19284 //     a final method, and if that and other safety checks pan out,
19285 //     modifies the call and the call info to create a direct call.
19286 //
19287 //     This transformation is initially done in the importer and not
19288 //     in some subsequent optimization pass because we want it to be
19289 //     upstream of inline candidate identification.
19290 //
19291 //     However, later phases may supply improved type information that
19292 //     can enable further devirtualization. We currently reinvoke this
19293 //     code after inlining, if the return value of the inlined call is
19294 //     the 'this obj' of a subsequent virtual call.
19295 //
19296 //     If devirtualization succeeds and the call's this object is the
19297 //     result of a box, the jit will ask the EE for the unboxed entry
19298 //     point. If this exists, the jit will see if it can rework the box
19299 //     to instead make a local copy. If that is doable, the call is
19300 //     updated to invoke the unboxed entry on the local copy.
19301 //
19302 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19303                                    CORINFO_METHOD_HANDLE*  method,
19304                                    unsigned*               methodFlags,
19305                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19306                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19307 {
19308     assert(call != nullptr);
19309     assert(method != nullptr);
19310     assert(methodFlags != nullptr);
19311     assert(contextHandle != nullptr);
19312
19313     // This should be a virtual vtable or virtual stub call.
19314     assert(call->IsVirtual());
19315
19316     // Bail if not optimizing
19317     if (opts.MinOpts())
19318     {
19319         return;
19320     }
19321
19322     // Bail if debuggable codegen
19323     if (opts.compDbgCode)
19324     {
19325         return;
19326     }
19327
19328 #if defined(DEBUG)
19329     // Bail if devirt is disabled.
19330     if (JitConfig.JitEnableDevirtualization() == 0)
19331     {
19332         return;
19333     }
19334
19335     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19336 #endif // DEBUG
19337
19338     // Fetch information about the virtual method we're calling.
19339     CORINFO_METHOD_HANDLE baseMethod        = *method;
19340     unsigned              baseMethodAttribs = *methodFlags;
19341
19342     if (baseMethodAttribs == 0)
19343     {
19344         // For late devirt we may not have method attributes, so fetch them.
19345         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19346     }
19347     else
19348     {
19349 #if defined(DEBUG)
19350         // Validate that callInfo has up to date method flags
19351         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19352
19353         // All the base method attributes should agree, save that
19354         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19355         // because of concurrent jitting activity.
19356         //
19357         // Note we don't look at this particular flag bit below, and
19358         // later on (if we do try and inline) we will rediscover why
19359         // the method can't be inlined, so there's no danger here in
19360         // seeing this particular flag bit in different states between
19361         // the cached and fresh values.
19362         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19363         {
19364             assert(!"mismatched method attributes");
19365         }
19366 #endif // DEBUG
19367     }
19368
19369     // In R2R mode, we might see virtual stub calls to
19370     // non-virtuals. For instance cases where the non-virtual method
19371     // is in a different assembly but is called via CALLVIRT. For
19372     // verison resilience we must allow for the fact that the method
19373     // might become virtual in some update.
19374     //
19375     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19376     // regular call+nullcheck upstream, so we won't reach this
19377     // point.
19378     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19379     {
19380         assert(call->IsVirtualStub());
19381         assert(opts.IsReadyToRun());
19382         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19383         return;
19384     }
19385
19386     // See what we know about the type of 'this' in the call.
19387     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19388     GenTree*             actualThisObj = nullptr;
19389     bool                 isExact       = false;
19390     bool                 objIsNonNull  = false;
19391     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19392
19393     // See if we have special knowlege that can get us a type or a better type.
19394     if ((objClass == nullptr) || !isExact)
19395     {
19396         actualThisObj = thisObj;
19397
19398         // Walk back through any return expression placeholders
19399         while (actualThisObj->OperGet() == GT_RET_EXPR)
19400         {
19401             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19402         }
19403
19404         // See if we landed on a call to a special intrinsic method
19405         if (actualThisObj->IsCall())
19406         {
19407             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19408             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19409             {
19410                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19411                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19412                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19413                 if (specialObjClass != nullptr)
19414                 {
19415                     objClass     = specialObjClass;
19416                     isExact      = true;
19417                     objIsNonNull = true;
19418                 }
19419             }
19420         }
19421     }
19422
19423     // Bail if we know nothing.
19424     if (objClass == nullptr)
19425     {
19426         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19427         return;
19428     }
19429
19430     // Fetch information about the class that introduced the virtual method.
19431     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19432     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19433
19434 #if !defined(FEATURE_CORECLR)
19435     // If base class is not beforefieldinit then devirtualizing may
19436     // cause us to miss a base class init trigger. Spec says we don't
19437     // need a trigger for ref class callvirts but desktop seems to
19438     // have one anyways. So defer.
19439     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19440     {
19441         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19442         return;
19443     }
19444 #endif // FEATURE_CORECLR
19445
19446     // Is the call an interface call?
19447     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19448
19449     // If the objClass is sealed (final), then we may be able to devirtualize.
19450     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19451     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19452
19453 #if defined(DEBUG)
19454     const char* callKind       = isInterface ? "interface" : "virtual";
19455     const char* objClassNote   = "[?]";
19456     const char* objClassName   = "?objClass";
19457     const char* baseClassName  = "?baseClass";
19458     const char* baseMethodName = "?baseMethod";
19459
19460     if (verbose || doPrint)
19461     {
19462         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19463         objClassName   = info.compCompHnd->getClassName(objClass);
19464         baseClassName  = info.compCompHnd->getClassName(baseClass);
19465         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19466
19467         if (verbose)
19468         {
19469             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19470                    "    class for 'this' is %s%s (attrib %08x)\n"
19471                    "    base method is %s::%s\n",
19472                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19473         }
19474     }
19475 #endif // defined(DEBUG)
19476
19477     // Bail if obj class is an interface.
19478     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19479     //   IL_021d:  ldloc.0
19480     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19481     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19482     {
19483         JITDUMP("--- obj class is interface, sorry\n");
19484         return;
19485     }
19486
19487     if (isInterface)
19488     {
19489         assert(call->IsVirtualStub());
19490         JITDUMP("--- base class is interface\n");
19491     }
19492
19493     // Fetch the method that would be called based on the declared type of 'this'
19494     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19495     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19496
19497     // If we failed to get a handle, we can't devirtualize.  This can
19498     // happen when prejitting, if the devirtualization crosses
19499     // servicing bubble boundaries.
19500     if (derivedMethod == nullptr)
19501     {
19502         JITDUMP("--- no derived method, sorry\n");
19503         return;
19504     }
19505
19506     // Fetch method attributes to see if method is marked final.
19507     DWORD      derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19508     const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19509
19510 #if defined(DEBUG)
19511     const char* derivedClassName  = "?derivedClass";
19512     const char* derivedMethodName = "?derivedMethod";
19513
19514     const char* note = "speculative";
19515     if (isExact)
19516     {
19517         note = "exact";
19518     }
19519     else if (objClassIsFinal)
19520     {
19521         note = "final class";
19522     }
19523     else if (derivedMethodIsFinal)
19524     {
19525         note = "final method";
19526     }
19527
19528     if (verbose || doPrint)
19529     {
19530         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19531         if (verbose)
19532         {
19533             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19534             gtDispTree(call);
19535         }
19536     }
19537 #endif // defined(DEBUG)
19538
19539     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19540     {
19541         // Type is not exact, and neither class or method is final.
19542         //
19543         // We could speculatively devirtualize, but there's no
19544         // reason to believe the derived method is the one that
19545         // is likely to be invoked.
19546         //
19547         // If there's currently no further overriding (that is, at
19548         // the time of jitting, objClass has no subclasses that
19549         // override this method), then perhaps we'd be willing to
19550         // make a bet...?
19551         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19552         return;
19553     }
19554
19555     // For interface calls we must have an exact type or final class.
19556     if (isInterface && !isExact && !objClassIsFinal)
19557     {
19558         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19559         return;
19560     }
19561
19562     JITDUMP("    %s; can devirtualize\n", note);
19563
19564     // Make the updates.
19565     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19566     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19567     call->gtCallMethHnd = derivedMethod;
19568     call->gtCallType    = CT_USER_FUNC;
19569
19570     // Virtual calls include an implicit null check, which we may
19571     // now need to make explicit.
19572     if (!objIsNonNull)
19573     {
19574         call->gtFlags |= GTF_CALL_NULLCHECK;
19575     }
19576
19577     // Clear the inline candidate info (may be non-null since
19578     // it's a union field used for other things by virtual
19579     // stubs)
19580     call->gtInlineCandidateInfo = nullptr;
19581
19582 #if defined(DEBUG)
19583     if (verbose)
19584     {
19585         printf("... after devirt...\n");
19586         gtDispTree(call);
19587     }
19588
19589     if (doPrint)
19590     {
19591         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19592                baseMethodName, derivedClassName, derivedMethodName, note);
19593     }
19594 #endif // defined(DEBUG)
19595
19596     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19597     if (thisObj->IsBoxedValue())
19598     {
19599         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19600
19601         // Note for some shared methods the unboxed entry point requires an extra parameter.
19602         bool                  requiresInstMethodTableArg = false;
19603         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19604             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19605
19606         if (unboxedEntryMethod != nullptr)
19607         {
19608             // Since the call is the only consumer of the box, we know the box can't escape
19609             // since it is being passed an interior pointer.
19610             //
19611             // So, revise the box to simply create a local copy, use the address of that copy
19612             // as the this pointer, and update the entry point to the unboxed entry.
19613             //
19614             // Ideally, we then inline the boxed method and and if it turns out not to modify
19615             // the copy, we can undo the copy too.
19616             if (requiresInstMethodTableArg)
19617             {
19618                 // Perform a trial box removal and ask for the type handle tree.
19619                 JITDUMP("Unboxed entry needs method table arg...\n");
19620                 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
19621
19622                 if (methodTableArg != nullptr)
19623                 {
19624                     // If that worked, turn the box into a copy to a local var
19625                     JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
19626                     GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19627
19628                     if (localCopyThis != nullptr)
19629                     {
19630                         // Pass the local var as this and the type handle as a new arg
19631                         JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
19632                         call->gtCallObjp = localCopyThis;
19633
19634                         // Prepend for R2L arg passing or empty L2R passing
19635                         if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
19636                         {
19637                             call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
19638                         }
19639                         // Append for non-empty L2R
19640                         else
19641                         {
19642                             GenTreeArgList* beforeArg = call->gtCallArgs;
19643                             while (beforeArg->Rest() != nullptr)
19644                             {
19645                                 beforeArg = beforeArg->Rest();
19646                             }
19647
19648                             beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
19649                         }
19650
19651                         call->gtCallMethHnd = unboxedEntryMethod;
19652                         derivedMethod       = unboxedEntryMethod;
19653
19654                         // Method attributes will differ because unboxed entry point is shared
19655                         const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
19656                         JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
19657                                 unboxedMethodAttribs);
19658                         derivedMethodAttribs = unboxedMethodAttribs;
19659                     }
19660                     else
19661                     {
19662                         JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
19663                     }
19664                 }
19665                 else
19666                 {
19667                     JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
19668                 }
19669             }
19670             else
19671             {
19672                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19673                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19674
19675                 if (localCopyThis != nullptr)
19676                 {
19677                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19678                     call->gtCallObjp    = localCopyThis;
19679                     call->gtCallMethHnd = unboxedEntryMethod;
19680                     derivedMethod       = unboxedEntryMethod;
19681                 }
19682                 else
19683                 {
19684                     JITDUMP("Sorry, failed to undo the box\n");
19685                 }
19686             }
19687         }
19688         else
19689         {
19690             // Many of the low-level methods on value classes won't have unboxed entries,
19691             // as they need access to the type of the object.
19692             //
19693             // Note this may be a cue for us to stack allocate the boxed object, since
19694             // we probably know that these objects don't escape.
19695             JITDUMP("Sorry, failed to find unboxed entry point\n");
19696         }
19697     }
19698
19699     // Fetch the class that introduced the derived method.
19700     //
19701     // Note this may not equal objClass, if there is a
19702     // final method that objClass inherits.
19703     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19704
19705     // Need to update call info too. This is fragile
19706     // but hopefully the derived method conforms to
19707     // the base in most other ways.
19708     *method        = derivedMethod;
19709     *methodFlags   = derivedMethodAttribs;
19710     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19711
19712     // Update context handle.
19713     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19714     {
19715         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19716     }
19717
19718 #ifdef FEATURE_READYTORUN_COMPILER
19719     if (opts.IsReadyToRun())
19720     {
19721         // For R2R, getCallInfo triggers bookkeeping on the zap
19722         // side so we need to call it here.
19723         //
19724         // First, cons up a suitable resolved token.
19725         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19726
19727         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19728         derivedResolvedToken.tokenContext = *contextHandle;
19729         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19730         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19731         derivedResolvedToken.hClass       = derivedClass;
19732         derivedResolvedToken.hMethod      = derivedMethod;
19733
19734         // Look up the new call info.
19735         CORINFO_CALL_INFO derivedCallInfo;
19736         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19737
19738         // Update the call.
19739         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19740         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19741         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19742     }
19743 #endif // FEATURE_READYTORUN_COMPILER
19744 }
19745
19746 //------------------------------------------------------------------------
19747 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19748 //   to an intrinsic returns an exact type
19749 //
19750 // Arguments:
19751 //     methodHnd -- handle for the special intrinsic method
19752 //
19753 // Returns:
19754 //     Exact class handle returned by the intrinsic call, if known.
19755 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19756
19757 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19758 {
19759     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19760
19761     CORINFO_CLASS_HANDLE result = nullptr;
19762
19763     // See what intrinisc we have...
19764     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19765     switch (ni)
19766     {
19767         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19768         {
19769             // Expect one class generic parameter; figure out which it is.
19770             CORINFO_SIG_INFO sig;
19771             info.compCompHnd->getMethodSig(methodHnd, &sig);
19772             assert(sig.sigInst.classInstCount == 1);
19773             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19774             assert(typeHnd != nullptr);
19775
19776             // Lookup can incorrect when we have __Canon as it won't appear
19777             // to implement any interface types.
19778             //
19779             // And if we do not have a final type, devirt & inlining is
19780             // unlikely to result in much simplification.
19781             //
19782             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19783             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19784             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19785
19786             if (isFinalType)
19787             {
19788                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19789                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19790                         result != nullptr ? eeGetClassName(result) : "unknown");
19791             }
19792             else
19793             {
19794                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19795             }
19796
19797             break;
19798         }
19799
19800         default:
19801         {
19802             JITDUMP("This special intrinsic not handled, sorry...\n");
19803             break;
19804         }
19805     }
19806
19807     return result;
19808 }
19809
19810 //------------------------------------------------------------------------
19811 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19812 //
19813 // Arguments:
19814 //    token - init value for the allocated token.
19815 //
19816 // Return Value:
19817 //    pointer to token into jit-allocated memory.
19818 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19819 {
19820     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19821     *memory                        = token;
19822     return memory;
19823 }
19824
19825 //------------------------------------------------------------------------
19826 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19827 //
19828 class SpillRetExprHelper
19829 {
19830 public:
19831     SpillRetExprHelper(Compiler* comp) : comp(comp)
19832     {
19833     }
19834
19835     void StoreRetExprResultsInArgs(GenTreeCall* call)
19836     {
19837         GenTree* args = call->gtCallArgs;
19838         if (args != nullptr)
19839         {
19840             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19841         }
19842         GenTree* thisArg = call->gtCallObjp;
19843         if (thisArg != nullptr)
19844         {
19845             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19846         }
19847     }
19848
19849 private:
19850     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19851     {
19852         assert((pTree != nullptr) && (*pTree != nullptr));
19853         GenTree* tree = *pTree;
19854         if ((tree->gtFlags & GTF_CALL) == 0)
19855         {
19856             // Trees with ret_expr are marked as GTF_CALL.
19857             return Compiler::WALK_SKIP_SUBTREES;
19858         }
19859         if (tree->OperGet() == GT_RET_EXPR)
19860         {
19861             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19862             walker->StoreRetExprAsLocalVar(pTree);
19863         }
19864         return Compiler::WALK_CONTINUE;
19865     }
19866
19867     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19868     {
19869         GenTree* retExpr = *pRetExpr;
19870         assert(retExpr->OperGet() == GT_RET_EXPR);
19871         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19872         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19873         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19874         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19875     }
19876
19877 private:
19878     Compiler* comp;
19879 };
19880
19881 //------------------------------------------------------------------------
19882 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19883 //                         Spill ret_expr in the call node, because they can't be cloned.
19884 //
19885 // Arguments:
19886 //    call - fat calli candidate
19887 //
19888 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19889 {
19890     setMethodHasFatPointer();
19891     call->SetFatPointerCandidate();
19892     SpillRetExprHelper helper(this);
19893     helper.StoreRetExprResultsInArgs(call);
19894 }