Remove relocations for vtable chunks (#17147)
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (!varTypeIsSmall(lclTyp))
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTree* tree     = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTree* firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTree* tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTree* stmtPrev  = stmtBefore->gtPrev;
676     stmt->gtPrev       = stmtPrev;
677     stmt->gtNext       = stmtBefore;
678     stmtPrev->gtNext   = stmt;
679     stmtBefore->gtPrev = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTree* expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTree* expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTree*    val,
729                                 unsigned    curLevel,
730                                 GenTree**   pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTree* asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTree*             val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTree**            pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTree* asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTree*   temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855 #ifdef DEBUG
856             if (verbose)
857             {
858                 printf("Calling impNormStructVal on:\n");
859                 gtDispTree(temp);
860             }
861 #endif
862             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
863 #ifdef DEBUG
864             if (verbose)
865             {
866                 printf("resulting tree:\n");
867                 gtDispTree(temp);
868             }
869 #endif
870         }
871
872         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873         treeList = gtNewListNode(temp, treeList);
874     }
875
876     if (sig != nullptr)
877     {
878         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880         {
881             // Make sure that all valuetypes (including enums) that we push are loaded.
882             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883             // all valuetypes in the method signature are already loaded.
884             // We need to be able to find the size of the valuetypes, but we cannot
885             // do a class-load from within GC.
886             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
887         }
888
889         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890         CORINFO_CLASS_HANDLE    argClass;
891         CORINFO_CLASS_HANDLE    argRealClass;
892         GenTreeArgList*         args;
893
894         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895         {
896             PREFIX_ASSUME(args != nullptr);
897
898             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899
900             // insert implied casts (from float to double or double to float)
901
902             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903             {
904                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
905             }
906             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907             {
908                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
909             }
910
911             // insert any widening or narrowing casts for backwards compatibility
912
913             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914
915             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917             {
918                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920                 // primitive types.
921                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922                 // details).
923                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924                 {
925                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
926                 }
927
928                 // Make sure that all valuetypes (including enums) that we push are loaded.
929                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930                 // all valuetypes in the method signature are already loaded.
931                 // We need to be able to find the size of the valuetypes, but we cannot
932                 // do a class-load from within GC.
933                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
934             }
935
936             argLst = info.compCompHnd->getArgNext(argLst);
937         }
938     }
939
940     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941     {
942         // Prepend the prefixTree
943
944         // Simple in-place reversal to place treeList
945         // at the end of a reversed prefixTree
946         while (prefixTree != nullptr)
947         {
948             GenTreeArgList* next = prefixTree->Rest();
949             prefixTree->Rest()   = treeList;
950             treeList             = prefixTree;
951             prefixTree           = next;
952         }
953     }
954     return treeList;
955 }
956
957 /*****************************************************************************
958  *
959  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960  *  The first "skipReverseCount" items are not reversed.
961  */
962
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
964
965 {
966     assert(skipReverseCount <= count);
967
968     GenTreeArgList* list = impPopList(count, sig);
969
970     // reverse the list
971     if (list == nullptr || skipReverseCount == count)
972     {
973         return list;
974     }
975
976     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
977     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978
979     if (skipReverseCount == 0)
980     {
981         ptr = list;
982     }
983     else
984     {
985         lastSkipNode = list;
986         // Get to the first node that needs to be reversed
987         for (unsigned i = 0; i < skipReverseCount - 1; i++)
988         {
989             lastSkipNode = lastSkipNode->Rest();
990         }
991
992         PREFIX_ASSUME(lastSkipNode != nullptr);
993         ptr = lastSkipNode->Rest();
994     }
995
996     GenTreeArgList* reversedList = nullptr;
997
998     do
999     {
1000         GenTreeArgList* tmp = ptr->Rest();
1001         ptr->Rest()         = reversedList;
1002         reversedList        = ptr;
1003         ptr                 = tmp;
1004     } while (ptr != nullptr);
1005
1006     if (skipReverseCount)
1007     {
1008         lastSkipNode->Rest() = reversedList;
1009         return list;
1010     }
1011     else
1012     {
1013         return reversedList;
1014     }
1015 }
1016
1017 /*****************************************************************************
1018    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1019    class of type 'clsHnd'.  It returns the tree that should be appended to the
1020    statement list that represents the assignment.
1021    Temp assignments may be appended to impTreeList if spilling is necessary.
1022    curLevel is the stack level for which a spill may be being done.
1023  */
1024
1025 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1026                                    GenTree*             src,
1027                                    CORINFO_CLASS_HANDLE structHnd,
1028                                    unsigned             curLevel,
1029                                    GenTree**            pAfterStmt, /* = NULL */
1030                                    BasicBlock*          block       /* = NULL */
1031                                    )
1032 {
1033     assert(varTypeIsStruct(dest));
1034
1035     while (dest->gtOper == GT_COMMA)
1036     {
1037         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1038
1039         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1040         if (pAfterStmt)
1041         {
1042             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1043         }
1044         else
1045         {
1046             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1047         }
1048
1049         // set dest to the second thing
1050         dest = dest->gtOp.gtOp2;
1051     }
1052
1053     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1055
1056     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1058     {
1059         // Make this a NOP
1060         return gtNewNothingNode();
1061     }
1062
1063     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064     // or re-creating a Blk node if it is.
1065     GenTree* destAddr;
1066
1067     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1068     {
1069         destAddr = dest->gtOp.gtOp1;
1070     }
1071     else
1072     {
1073         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1074     }
1075
1076     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1077 }
1078
1079 /*****************************************************************************/
1080
1081 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1082                                       GenTree*             src,
1083                                       CORINFO_CLASS_HANDLE structHnd,
1084                                       unsigned             curLevel,
1085                                       GenTree**            pAfterStmt, /* = NULL */
1086                                       BasicBlock*          block       /* = NULL */
1087                                       )
1088 {
1089     var_types destType;
1090     GenTree*  dest      = nullptr;
1091     unsigned  destFlags = 0;
1092
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095     // TODO-ARM-BUG: Does ARM need this?
1096     // TODO-ARM64-BUG: Does ARM64 need this?
1097     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100            (src->TypeGet() != TYP_STRUCT &&
1101             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103     assert(varTypeIsStruct(src));
1104
1105     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107            src->gtOper == GT_COMMA ||
1108            (src->TypeGet() != TYP_STRUCT &&
1109             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111     if (destAddr->OperGet() == GT_ADDR)
1112     {
1113         GenTree* destNode = destAddr->gtGetOp1();
1114         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115         // will be morphed, don't insert an OBJ(ADDR).
1116         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1120                 )
1121         {
1122             dest = destNode;
1123         }
1124         destType = destNode->TypeGet();
1125     }
1126     else
1127     {
1128         destType = src->TypeGet();
1129     }
1130
1131     var_types asgType = src->TypeGet();
1132
1133     if (src->gtOper == GT_CALL)
1134     {
1135         if (src->AsCall()->TreatAsHasRetBufArg(this))
1136         {
1137             // Case of call returning a struct via hidden retbuf arg
1138
1139             // insert the return value buffer into the argument list as first byref parameter
1140             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1141
1142             // now returns void, not a struct
1143             src->gtType = TYP_VOID;
1144
1145             // return the morphed call node
1146             return src;
1147         }
1148         else
1149         {
1150             // Case of call returning a struct in one or more registers.
1151
1152             var_types returnType = (var_types)src->gtCall.gtReturnType;
1153
1154             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155             src->gtType = genActualType(returnType);
1156
1157             // First we try to change this to "LclVar/LclFld = call"
1158             //
1159             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1160             {
1161                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162                 // That is, the IR will be of the form lclVar = call for multi-reg return
1163                 //
1164                 GenTree* lcl = destAddr->gtOp.gtOp1;
1165                 if (src->AsCall()->HasMultiRegRetVal())
1166                 {
1167                     // Mark the struct LclVar as used in a MultiReg return context
1168                     //  which currently makes it non promotable.
1169                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170                     // handle multireg returns.
1171                     lcl->gtFlags |= GTF_DONT_CSE;
1172                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1173                 }
1174                 else // The call result is not a multireg return
1175                 {
1176                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177                     lcl->ChangeOper(GT_LCL_FLD);
1178                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179                     lcl->gtType = src->gtType;
1180                     asgType     = src->gtType;
1181                 }
1182
1183                 dest = lcl;
1184
1185 #if defined(_TARGET_ARM_)
1186                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187                 // but that method has not been updadted to include ARM.
1188                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189                 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1193
1194                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196                 // handle multireg returns.
1197                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198                 // non-multireg returns.
1199                 lcl->gtFlags |= GTF_DONT_CSE;
1200                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1201 #endif
1202             }
1203             else // we don't have a GT_ADDR of a GT_LCL_VAR
1204             {
1205                 // !!! The destination could be on stack. !!!
1206                 // This flag will let us choose the correct write barrier.
1207                 asgType   = returnType;
1208                 destFlags = GTF_IND_TGTANYWHERE;
1209             }
1210         }
1211     }
1212     else if (src->gtOper == GT_RET_EXPR)
1213     {
1214         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215         noway_assert(call->gtOper == GT_CALL);
1216
1217         if (call->HasRetBufArg())
1218         {
1219             // insert the return value buffer into the argument list as first byref parameter
1220             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1221
1222             // now returns void, not a struct
1223             src->gtType  = TYP_VOID;
1224             call->gtType = TYP_VOID;
1225
1226             // We already have appended the write to 'dest' GT_CALL's args
1227             // So now we just return an empty node (pruning the GT_RET_EXPR)
1228             return src;
1229         }
1230         else
1231         {
1232             // Case of inline method returning a struct in one or more registers.
1233             //
1234             var_types returnType = (var_types)call->gtReturnType;
1235
1236             // We won't need a return buffer
1237             asgType      = returnType;
1238             src->gtType  = genActualType(returnType);
1239             call->gtType = src->gtType;
1240
1241             // If we've changed the type, and it no longer matches a local destination,
1242             // we must use an indirection.
1243             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1244             {
1245                 dest = nullptr;
1246             }
1247
1248             // !!! The destination could be on stack. !!!
1249             // This flag will let us choose the correct write barrier.
1250             destFlags = GTF_IND_TGTANYWHERE;
1251         }
1252     }
1253     else if (src->OperIsBlk())
1254     {
1255         asgType = impNormStructType(structHnd);
1256         if (src->gtOper == GT_OBJ)
1257         {
1258             assert(src->gtObj.gtClass == structHnd);
1259         }
1260     }
1261     else if (src->gtOper == GT_INDEX)
1262     {
1263         asgType = impNormStructType(structHnd);
1264         assert(src->gtIndex.gtStructElemClass == structHnd);
1265     }
1266     else if (src->gtOper == GT_MKREFANY)
1267     {
1268         // Since we are assigning the result of a GT_MKREFANY,
1269         // "destAddr" must point to a refany.
1270
1271         GenTree* destAddrClone;
1272         destAddr =
1273             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1274
1275         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1281         GenTree* typeSlot =
1282             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1283
1284         // append the assign of the pointer value
1285         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1286         if (pAfterStmt)
1287         {
1288             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1289         }
1290         else
1291         {
1292             impAppendTree(asg, curLevel, impCurStmtOffs);
1293         }
1294
1295         // return the assign of the type value, to be appended
1296         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1297     }
1298     else if (src->gtOper == GT_COMMA)
1299     {
1300         // The second thing is the struct or its address.
1301         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1302         if (pAfterStmt)
1303         {
1304             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1305         }
1306         else
1307         {
1308             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1309         }
1310
1311         // Evaluate the second thing using recursion.
1312         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1313     }
1314     else if (src->IsLocal())
1315     {
1316         asgType = src->TypeGet();
1317     }
1318     else if (asgType == TYP_STRUCT)
1319     {
1320         asgType     = impNormStructType(structHnd);
1321         src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323         if (asgType == TYP_STRUCT)
1324         {
1325             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1327         }
1328 #endif
1329     }
1330     if (dest == nullptr)
1331     {
1332         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333         // if this is a known struct type.
1334         if (asgType == TYP_STRUCT)
1335         {
1336             dest = gtNewObjNode(structHnd, destAddr);
1337             gtSetObjGcInfo(dest->AsObj());
1338             // Although an obj as a call argument was always assumed to be a globRef
1339             // (which is itself overly conservative), that is not true of the operands
1340             // of a block assignment.
1341             dest->gtFlags &= ~GTF_GLOB_REF;
1342             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1343         }
1344         else if (varTypeIsStruct(asgType))
1345         {
1346             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1347         }
1348         else
1349         {
1350             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1351         }
1352     }
1353     else
1354     {
1355         dest->gtType = asgType;
1356     }
1357
1358     dest->gtFlags |= destFlags;
1359     destFlags = dest->gtFlags;
1360
1361     // return an assignment node, to be appended
1362     GenTree* asgNode = gtNewAssignNode(dest, src);
1363     gtBlockOpInit(asgNode, dest, src, false);
1364
1365     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1366     // of assignments.
1367     if ((destFlags & GTF_DONT_CSE) == 0)
1368     {
1369         dest->gtFlags &= ~(GTF_DONT_CSE);
1370     }
1371     return asgNode;
1372 }
1373
1374 /*****************************************************************************
1375    Given a struct value, and the class handle for that structure, return
1376    the expression for the address for that structure value.
1377
1378    willDeref - does the caller guarantee to dereference the pointer.
1379 */
1380
1381 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1382                                     CORINFO_CLASS_HANDLE structHnd,
1383                                     unsigned             curLevel,
1384                                     bool                 willDeref)
1385 {
1386     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1387
1388     var_types type = structVal->TypeGet();
1389
1390     genTreeOps oper = structVal->gtOper;
1391
1392     if (oper == GT_OBJ && willDeref)
1393     {
1394         assert(structVal->gtObj.gtClass == structHnd);
1395         return (structVal->gtObj.Addr());
1396     }
1397     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1398              structVal->OperIsSimdHWIntrinsic())
1399     {
1400         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1401
1402         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1403
1404         // The 'return value' is now the temp itself
1405
1406         type          = genActualType(lvaTable[tmpNum].TypeGet());
1407         GenTree* temp = gtNewLclvNode(tmpNum, type);
1408         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1409         return temp;
1410     }
1411     else if (oper == GT_COMMA)
1412     {
1413         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1414
1415         GenTree* oldTreeLast  = impTreeLast;
1416         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417         structVal->gtType     = TYP_BYREF;
1418
1419         if (oldTreeLast != impTreeLast)
1420         {
1421             // Some temp assignment statement was placed on the statement list
1422             // for Op2, but that would be out of order with op1, so we need to
1423             // spill op1 onto the statement list after whatever was last
1424             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426             structVal->gtOp.gtOp1 = gtNewNothingNode();
1427         }
1428
1429         return (structVal);
1430     }
1431
1432     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1433 }
1434
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 //                    and optionally determine the GC layout of the struct.
1438 //
1439 // Arguments:
1440 //    structHnd       - The class handle for the struct type of interest.
1441 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 //                      into which the gcLayout will be written.
1443 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 //                      which will be set to the number of GC fields in the struct.
1445 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 //                      type, set to the SIMD base type
1447 //
1448 // Return Value:
1449 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1452 //
1453 // Assumptions:
1454 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1456 //
1457 // Notes:
1458 //    Normalizing the type involves examining the struct type to determine if it should
1459 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1460 //    for full enregistration, e.g. TYP_SIMD16.
1461
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1463                                       BYTE*                gcLayout,
1464                                       unsigned*            pNumGCVars,
1465                                       var_types*           pSimdBaseType)
1466 {
1467     assert(structHnd != NO_CLASS_HANDLE);
1468
1469     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470     var_types   structType  = TYP_STRUCT;
1471
1472     // On coreclr the check for GC includes a "may" to account for the special
1473     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1475     // pointer.
1476     const bool mayContainGCPtrs =
1477         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1478
1479 #ifdef FEATURE_SIMD
1480     // Check to see if this is a SIMD type.
1481     if (featureSIMD && !mayContainGCPtrs)
1482     {
1483         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1484
1485         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1486         {
1487             unsigned int sizeBytes;
1488             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489             if (simdBaseType != TYP_UNKNOWN)
1490             {
1491                 assert(sizeBytes == originalSize);
1492                 structType = getSIMDTypeForSize(sizeBytes);
1493                 if (pSimdBaseType != nullptr)
1494                 {
1495                     *pSimdBaseType = simdBaseType;
1496                 }
1497                 // Also indicate that we use floating point registers.
1498                 compFloatingPointUsed = true;
1499             }
1500         }
1501     }
1502 #endif // FEATURE_SIMD
1503
1504     // Fetch GC layout info if requested
1505     if (gcLayout != nullptr)
1506     {
1507         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1508
1509         // Verify that the quick test up above via the class attributes gave a
1510         // safe view of the type's GCness.
1511         //
1512         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513         // does not report any gc fields.
1514
1515         assert(mayContainGCPtrs || (numGCVars == 0));
1516
1517         if (pNumGCVars != nullptr)
1518         {
1519             *pNumGCVars = numGCVars;
1520         }
1521     }
1522     else
1523     {
1524         // Can't safely ask for number of GC pointers without also
1525         // asking for layout.
1526         assert(pNumGCVars == nullptr);
1527     }
1528
1529     return structType;
1530 }
1531
1532 //****************************************************************************
1533 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1535 //
1536 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1537                                     CORINFO_CLASS_HANDLE structHnd,
1538                                     unsigned             curLevel,
1539                                     bool                 forceNormalization /*=false*/)
1540 {
1541     assert(forceNormalization || varTypeIsStruct(structVal));
1542     assert(structHnd != NO_CLASS_HANDLE);
1543     var_types structType = structVal->TypeGet();
1544     bool      makeTemp   = false;
1545     if (structType == TYP_STRUCT)
1546     {
1547         structType = impNormStructType(structHnd);
1548     }
1549     bool                 alreadyNormalized = false;
1550     GenTreeLclVarCommon* structLcl         = nullptr;
1551
1552     genTreeOps oper = structVal->OperGet();
1553     switch (oper)
1554     {
1555         // GT_RETURN and GT_MKREFANY don't capture the handle.
1556         case GT_RETURN:
1557             break;
1558         case GT_MKREFANY:
1559             alreadyNormalized = true;
1560             break;
1561
1562         case GT_CALL:
1563             structVal->gtCall.gtRetClsHnd = structHnd;
1564             makeTemp                      = true;
1565             break;
1566
1567         case GT_RET_EXPR:
1568             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1569             makeTemp                         = true;
1570             break;
1571
1572         case GT_ARGPLACE:
1573             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1574             break;
1575
1576         case GT_INDEX:
1577             // This will be transformed to an OBJ later.
1578             alreadyNormalized                    = true;
1579             structVal->gtIndex.gtStructElemClass = structHnd;
1580             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1581             break;
1582
1583         case GT_FIELD:
1584             // Wrap it in a GT_OBJ.
1585             structVal->gtType = structType;
1586             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1587             break;
1588
1589         case GT_LCL_VAR:
1590         case GT_LCL_FLD:
1591             structLcl = structVal->AsLclVarCommon();
1592             // Wrap it in a GT_OBJ.
1593             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1594             __fallthrough;
1595
1596         case GT_OBJ:
1597         case GT_BLK:
1598         case GT_DYN_BLK:
1599         case GT_ASG:
1600             // These should already have the appropriate type.
1601             assert(structVal->gtType == structType);
1602             alreadyNormalized = true;
1603             break;
1604
1605         case GT_IND:
1606             assert(structVal->gtType == structType);
1607             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608             alreadyNormalized = true;
1609             break;
1610
1611 #ifdef FEATURE_SIMD
1612         case GT_SIMD:
1613             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1614             break;
1615 #endif // FEATURE_SIMD
1616 #ifdef FEATURE_HW_INTRINSICS
1617         case GT_HWIntrinsic:
1618             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1619             break;
1620 #endif
1621
1622         case GT_COMMA:
1623         {
1624             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625             GenTree* blockNode = structVal->gtOp.gtOp2;
1626             assert(blockNode->gtType == structType);
1627
1628             // Is this GT_COMMA(op1, GT_COMMA())?
1629             GenTree* parent = structVal;
1630             if (blockNode->OperGet() == GT_COMMA)
1631             {
1632                 // Find the last node in the comma chain.
1633                 do
1634                 {
1635                     assert(blockNode->gtType == structType);
1636                     parent    = blockNode;
1637                     blockNode = blockNode->gtOp.gtOp2;
1638                 } while (blockNode->OperGet() == GT_COMMA);
1639             }
1640
1641             if (blockNode->OperGet() == GT_FIELD)
1642             {
1643                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1645             }
1646
1647 #ifdef FEATURE_SIMD
1648             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1649             {
1650                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651                 alreadyNormalized  = true;
1652             }
1653             else
1654 #endif
1655             {
1656                 noway_assert(blockNode->OperIsBlk());
1657
1658                 // Sink the GT_COMMA below the blockNode addr.
1659                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1660                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1661                 //
1662                 // In case of a chained GT_COMMA case, we sink the last
1663                 // GT_COMMA below the blockNode addr.
1664                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1665                 assert(blockNodeAddr->gtType == TYP_BYREF);
1666                 GenTree* commaNode    = parent;
1667                 commaNode->gtType     = TYP_BYREF;
1668                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1669                 blockNode->gtOp.gtOp1 = commaNode;
1670                 if (parent == structVal)
1671                 {
1672                     structVal = blockNode;
1673                 }
1674                 alreadyNormalized = true;
1675             }
1676         }
1677         break;
1678
1679         default:
1680             noway_assert(!"Unexpected node in impNormStructVal()");
1681             break;
1682     }
1683     structVal->gtType  = structType;
1684     GenTree* structObj = structVal;
1685
1686     if (!alreadyNormalized || forceNormalization)
1687     {
1688         if (makeTemp)
1689         {
1690             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1691
1692             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1693
1694             // The structVal is now the temp itself
1695
1696             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1697             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1698             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1699         }
1700         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1701         {
1702             // Wrap it in a GT_OBJ
1703             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1704         }
1705     }
1706
1707     if (structLcl != nullptr)
1708     {
1709         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1710         // so we don't set GTF_EXCEPT here.
1711         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1712         {
1713             structObj->gtFlags &= ~GTF_GLOB_REF;
1714         }
1715     }
1716     else
1717     {
1718         // In general a OBJ is an indirection and could raise an exception.
1719         structObj->gtFlags |= GTF_EXCEPT;
1720     }
1721     return (structObj);
1722 }
1723
1724 /******************************************************************************/
1725 // Given a type token, generate code that will evaluate to the correct
1726 // handle representation of that token (type handle, field handle, or method handle)
1727 //
1728 // For most cases, the handle is determined at compile-time, and the code
1729 // generated is simply an embedded handle.
1730 //
1731 // Run-time lookup is required if the enclosing method is shared between instantiations
1732 // and the token refers to formal type parameters whose instantiation is not known
1733 // at compile-time.
1734 //
1735 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1736                                     BOOL*                   pRuntimeLookup /* = NULL */,
1737                                     BOOL                    mustRestoreHandle /* = FALSE */,
1738                                     BOOL                    importParent /* = FALSE */)
1739 {
1740     assert(!fgGlobalMorph);
1741
1742     CORINFO_GENERICHANDLE_RESULT embedInfo;
1743     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1744
1745     if (pRuntimeLookup)
1746     {
1747         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1748     }
1749
1750     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1751     {
1752         switch (embedInfo.handleType)
1753         {
1754             case CORINFO_HANDLETYPE_CLASS:
1755                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1756                 break;
1757
1758             case CORINFO_HANDLETYPE_METHOD:
1759                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1760                 break;
1761
1762             case CORINFO_HANDLETYPE_FIELD:
1763                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1764                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1765                 break;
1766
1767             default:
1768                 break;
1769         }
1770     }
1771
1772     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1773     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1774                                       embedInfo.compileTimeHandle);
1775
1776     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1777     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1780     }
1781
1782     return result;
1783 }
1784
1785 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                    CORINFO_LOOKUP*         pLookup,
1787                                    unsigned                handleFlags,
1788                                    void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                              unsigned              handleFlags,
1828                                              void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1843 }
1844
1845 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1854     {
1855         return nullptr;
1856     }
1857
1858     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1859
1860     op1->setEntryPoint(lookup);
1861
1862     return op1;
1863 }
1864 #endif
1865
1866 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1867 {
1868     GenTree* op1 = nullptr;
1869
1870     switch (pCallInfo->kind)
1871     {
1872         case CORINFO_CALL:
1873             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1874
1875 #ifdef FEATURE_READYTORUN_COMPILER
1876             if (opts.IsReadyToRun())
1877             {
1878                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1879             }
1880             else
1881             {
1882                 op1->gtFptrVal.gtEntryPoint.addr       = nullptr;
1883                 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1884             }
1885 #endif
1886             break;
1887
1888         case CORINFO_CALL_CODE_POINTER:
1889             if (compIsForInlining())
1890             {
1891                 // Don't import runtime lookups when inlining
1892                 // Inlining has to be aborted in such a case
1893                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1894                 return nullptr;
1895             }
1896
1897             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1898             break;
1899
1900         default:
1901             noway_assert(!"unknown call kind");
1902             break;
1903     }
1904
1905     return op1;
1906 }
1907
1908 //------------------------------------------------------------------------
1909 // getRuntimeContextTree: find pointer to context for runtime lookup.
1910 //
1911 // Arguments:
1912 //    kind - lookup kind.
1913 //
1914 // Return Value:
1915 //    Return GenTree pointer to generic shared context.
1916 //
1917 // Notes:
1918 //    Reports about generic context using.
1919
1920 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1921 {
1922     GenTree* ctxTree = nullptr;
1923
1924     // Collectible types requires that for shared generic code, if we use the generic context parameter
1925     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1926     // context parameter is this that we don't need the eager reporting logic.)
1927     lvaGenericsContextUseCount++;
1928
1929     if (kind == CORINFO_LOOKUP_THISOBJ)
1930     {
1931         // this Object
1932         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1933
1934         // Vtable pointer of this object
1935         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1936         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1937         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1938     }
1939     else
1940     {
1941         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1942
1943         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1944     }
1945     return ctxTree;
1946 }
1947
1948 /*****************************************************************************/
1949 /* Import a dictionary lookup to access a handle in code shared between
1950    generic instantiations.
1951    The lookup depends on the typeContext which is only available at
1952    runtime, and not at compile-time.
1953    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1954    The cases are:
1955
1956    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1957       instantiation-specific handle, and the tokens to lookup the handle.
1958    2. pLookup->indirections != CORINFO_USEHELPER :
1959       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1960           to get the handle.
1961       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1962           If it is non-NULL, it is the handle required. Else, call a helper
1963           to lookup the handle.
1964  */
1965
1966 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1967                                           CORINFO_LOOKUP*         pLookup,
1968                                           void*                   compileTimeHandle)
1969 {
1970
1971     // This method can only be called from the importer instance of the Compiler.
1972     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1973     assert(!compIsForInlining());
1974
1975     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1976
1977     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1978     // It's available only via the run-time helper function
1979     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1980     {
1981 #ifdef FEATURE_READYTORUN_COMPILER
1982         if (opts.IsReadyToRun())
1983         {
1984             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1985                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1986         }
1987 #endif
1988         GenTree* argNode =
1989             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1990         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1991
1992         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1993     }
1994
1995     // Slot pointer
1996     GenTree* slotPtrTree = ctxTree;
1997
1998     if (pRuntimeLookup->testForNull)
1999     {
2000         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2001                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2002     }
2003
2004     GenTree* indOffTree = nullptr;
2005
2006     // Applied repeated indirections
2007     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2008     {
2009         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2010         {
2011             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2012                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2013         }
2014
2015         if (i != 0)
2016         {
2017             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2020         }
2021
2022         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2023         {
2024             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2025         }
2026
2027         if (pRuntimeLookup->offsets[i] != 0)
2028         {
2029             slotPtrTree =
2030                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2031         }
2032     }
2033
2034     // No null test required
2035     if (!pRuntimeLookup->testForNull)
2036     {
2037         if (pRuntimeLookup->indirections == 0)
2038         {
2039             return slotPtrTree;
2040         }
2041
2042         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2044
2045         if (!pRuntimeLookup->testForFixup)
2046         {
2047             return slotPtrTree;
2048         }
2049
2050         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2051
2052         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2053         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2054
2055         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2056         // downcast the pointer to a TYP_INT on 64-bit targets
2057         slot = impImplicitIorI4Cast(slot, TYP_INT);
2058         // Use a GT_AND to check for the lowest bit and indirect if it is set
2059         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2060         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2061         relop->gtFlags |= GTF_RELOP_QMARK;
2062
2063         // slot = GT_IND(slot - 1)
2064         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2065         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2066         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2067         indir->gtFlags |= GTF_IND_NONFAULTING;
2068         indir->gtFlags |= GTF_IND_INVARIANT;
2069
2070         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2071         GenTree* asg   = gtNewAssignNode(slot, indir);
2072         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2073         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2074         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2075
2076         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2077     }
2078
2079     assert(pRuntimeLookup->indirections != 0);
2080
2081     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2082
2083     // Extract the handle
2084     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2085     handle->gtFlags |= GTF_IND_NONFAULTING;
2086
2087     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2088                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2089
2090     // Call to helper
2091     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2092
2093     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2094     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2095
2096     // Check for null and possibly call helper
2097     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2098     relop->gtFlags |= GTF_RELOP_QMARK;
2099
2100     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2101                                                        gtNewNothingNode(), // do nothing if nonnull
2102                                                        helperCall);
2103
2104     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2105
2106     unsigned tmp;
2107     if (handleCopy->IsLocal())
2108     {
2109         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2110     }
2111     else
2112     {
2113         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2114     }
2115
2116     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2117     return gtNewLclvNode(tmp, TYP_I_IMPL);
2118 }
2119
2120 /******************************************************************************
2121  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2122  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2123  *     else, grab a new temp.
2124  *  For structs (which can be pushed on the stack using obj, etc),
2125  *  special handling is needed
2126  */
2127
2128 struct RecursiveGuard
2129 {
2130 public:
2131     RecursiveGuard()
2132     {
2133         m_pAddress = nullptr;
2134     }
2135
2136     ~RecursiveGuard()
2137     {
2138         if (m_pAddress)
2139         {
2140             *m_pAddress = false;
2141         }
2142     }
2143
2144     void Init(bool* pAddress, bool bInitialize)
2145     {
2146         assert(pAddress && *pAddress == false && "Recursive guard violation");
2147         m_pAddress = pAddress;
2148
2149         if (bInitialize)
2150         {
2151             *m_pAddress = true;
2152         }
2153     }
2154
2155 protected:
2156     bool* m_pAddress;
2157 };
2158
2159 bool Compiler::impSpillStackEntry(unsigned level,
2160                                   unsigned tnum
2161 #ifdef DEBUG
2162                                   ,
2163                                   bool        bAssertOnRecursion,
2164                                   const char* reason
2165 #endif
2166                                   )
2167 {
2168
2169 #ifdef DEBUG
2170     RecursiveGuard guard;
2171     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2172 #endif
2173
2174     GenTree* tree = verCurrentState.esStack[level].val;
2175
2176     /* Allocate a temp if we haven't been asked to use a particular one */
2177
2178     if (tiVerificationNeeded)
2179     {
2180         // Ignore bad temp requests (they will happen with bad code and will be
2181         // catched when importing the destblock)
2182         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2183         {
2184             return false;
2185         }
2186     }
2187     else
2188     {
2189         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2190         {
2191             return false;
2192         }
2193     }
2194
2195     bool isNewTemp = false;
2196
2197     if (tnum == BAD_VAR_NUM)
2198     {
2199         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2200         isNewTemp = true;
2201     }
2202     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2203     {
2204         // if verification is needed and tnum's type is incompatible with
2205         // type on that stack, we grab a new temp. This is safe since
2206         // we will throw a verification exception in the dest block.
2207
2208         var_types valTyp = tree->TypeGet();
2209         var_types dstTyp = lvaTable[tnum].TypeGet();
2210
2211         // if the two types are different, we return. This will only happen with bad code and will
2212         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2213         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2214             !(
2215 #ifndef _TARGET_64BIT_
2216                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2217 #endif // !_TARGET_64BIT_
2218                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2219         {
2220             if (verNeedsVerification())
2221             {
2222                 return false;
2223             }
2224         }
2225     }
2226
2227     /* Assign the spilled entry to the temp */
2228     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2229
2230     // If temp is newly introduced and a ref type, grab what type info we can.
2231     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2232     {
2233         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2234         lvaSetClass(tnum, tree, stkHnd);
2235     }
2236
2237     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2238     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2239     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2240     verCurrentState.esStack[level].val = temp;
2241
2242     return true;
2243 }
2244
2245 /*****************************************************************************
2246  *
2247  *  Ensure that the stack has only spilled values
2248  */
2249
2250 void Compiler::impSpillStackEnsure(bool spillLeaves)
2251 {
2252     assert(!spillLeaves || opts.compDbgCode);
2253
2254     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2255     {
2256         GenTree* tree = verCurrentState.esStack[level].val;
2257
2258         if (!spillLeaves && tree->OperIsLeaf())
2259         {
2260             continue;
2261         }
2262
2263         // Temps introduced by the importer itself don't need to be spilled
2264
2265         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2266
2267         if (isTempLcl)
2268         {
2269             continue;
2270         }
2271
2272         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2273     }
2274 }
2275
2276 void Compiler::impSpillEvalStack()
2277 {
2278     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2279     {
2280         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2281     }
2282 }
2283
2284 /*****************************************************************************
2285  *
2286  *  If the stack contains any trees with side effects in them, assign those
2287  *  trees to temps and append the assignments to the statement list.
2288  *  On return the stack is guaranteed to be empty.
2289  */
2290
2291 inline void Compiler::impEvalSideEffects()
2292 {
2293     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2294     verCurrentState.esStackDepth = 0;
2295 }
2296
2297 /*****************************************************************************
2298  *
2299  *  If the stack contains any trees with side effects in them, assign those
2300  *  trees to temps and replace them on the stack with refs to their temps.
2301  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2302  */
2303
2304 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2305 {
2306     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2307
2308     /* Before we make any appends to the tree list we must spill the
2309      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2310
2311     impSpillSpecialSideEff();
2312
2313     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2314     {
2315         chkLevel = verCurrentState.esStackDepth;
2316     }
2317
2318     assert(chkLevel <= verCurrentState.esStackDepth);
2319
2320     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2321
2322     for (unsigned i = 0; i < chkLevel; i++)
2323     {
2324         GenTree* tree = verCurrentState.esStack[i].val;
2325
2326         GenTree* lclVarTree;
2327
2328         if ((tree->gtFlags & spillFlags) != 0 ||
2329             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2330              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2331              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2332                                            // lvAddrTaken flag.
2333         {
2334             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2335         }
2336     }
2337 }
2338
2339 /*****************************************************************************
2340  *
2341  *  If the stack contains any trees with special side effects in them, assign
2342  *  those trees to temps and replace them on the stack with refs to their temps.
2343  */
2344
2345 inline void Compiler::impSpillSpecialSideEff()
2346 {
2347     // Only exception objects need to be carefully handled
2348
2349     if (!compCurBB->bbCatchTyp)
2350     {
2351         return;
2352     }
2353
2354     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2355     {
2356         GenTree* tree = verCurrentState.esStack[level].val;
2357         // Make sure if we have an exception object in the sub tree we spill ourselves.
2358         if (gtHasCatchArg(tree))
2359         {
2360             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2361         }
2362     }
2363 }
2364
2365 /*****************************************************************************
2366  *
2367  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2368  */
2369
2370 void Compiler::impSpillValueClasses()
2371 {
2372     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2373     {
2374         GenTree* tree = verCurrentState.esStack[level].val;
2375
2376         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2377         {
2378             // Tree walk was aborted, which means that we found a
2379             // value class on the stack.  Need to spill that
2380             // stack entry.
2381
2382             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2383         }
2384     }
2385 }
2386
2387 /*****************************************************************************
2388  *
2389  *  Callback that checks if a tree node is TYP_STRUCT
2390  */
2391
2392 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2393 {
2394     fgWalkResult walkResult = WALK_CONTINUE;
2395
2396     if ((*pTree)->gtType == TYP_STRUCT)
2397     {
2398         // Abort the walk and indicate that we found a value class
2399
2400         walkResult = WALK_ABORT;
2401     }
2402
2403     return walkResult;
2404 }
2405
2406 /*****************************************************************************
2407  *
2408  *  If the stack contains any trees with references to local #lclNum, assign
2409  *  those trees to temps and replace their place on the stack with refs to
2410  *  their temps.
2411  */
2412
2413 void Compiler::impSpillLclRefs(ssize_t lclNum)
2414 {
2415     /* Before we make any appends to the tree list we must spill the
2416      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2417
2418     impSpillSpecialSideEff();
2419
2420     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2421     {
2422         GenTree* tree = verCurrentState.esStack[level].val;
2423
2424         /* If the tree may throw an exception, and the block has a handler,
2425            then we need to spill assignments to the local if the local is
2426            live on entry to the handler.
2427            Just spill 'em all without considering the liveness */
2428
2429         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2430
2431         /* Skip the tree if it doesn't have an affected reference,
2432            unless xcptnCaught */
2433
2434         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2435         {
2436             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2437         }
2438     }
2439 }
2440
2441 /*****************************************************************************
2442  *
2443  *  Push catch arg onto the stack.
2444  *  If there are jumps to the beginning of the handler, insert basic block
2445  *  and spill catch arg to a temp. Update the handler block if necessary.
2446  *
2447  *  Returns the basic block of the actual handler.
2448  */
2449
2450 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2451 {
2452     // Do not inject the basic block twice on reimport. This should be
2453     // hit only under JIT stress. See if the block is the one we injected.
2454     // Note that EH canonicalization can inject internal blocks here. We might
2455     // be able to re-use such a block (but we don't, right now).
2456     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2457         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2458     {
2459         GenTree* tree = hndBlk->bbTreeList;
2460
2461         if (tree != nullptr && tree->gtOper == GT_STMT)
2462         {
2463             tree = tree->gtStmt.gtStmtExpr;
2464             assert(tree != nullptr);
2465
2466             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2467                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2468             {
2469                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2470
2471                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2472
2473                 return hndBlk->bbNext;
2474             }
2475         }
2476
2477         // If we get here, it must have been some other kind of internal block. It's possible that
2478         // someone prepended something to our injected block, but that's unlikely.
2479     }
2480
2481     /* Push the exception address value on the stack */
2482     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2483
2484     /* Mark the node as having a side-effect - i.e. cannot be
2485      * moved around since it is tied to a fixed location (EAX) */
2486     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2487
2488 #if defined(JIT32_GCENCODER)
2489     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2490 #else
2491     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2492 #endif // defined(JIT32_GCENCODER)
2493
2494     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2495     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2496     {
2497         if (hndBlk->bbRefs == 1)
2498         {
2499             hndBlk->bbRefs++;
2500         }
2501
2502         /* Create extra basic block for the spill */
2503         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2504         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2505         newBlk->setBBWeight(hndBlk->bbWeight);
2506         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2507
2508         /* Account for the new link we are about to create */
2509         hndBlk->bbRefs++;
2510
2511         /* Spill into a temp */
2512         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2513         lvaTable[tempNum].lvType = TYP_REF;
2514         arg                      = gtNewTempAssign(tempNum, arg);
2515
2516         hndBlk->bbStkTempsIn = tempNum;
2517
2518         /* Report the debug info. impImportBlockCode won't treat
2519          * the actual handler as exception block and thus won't do it for us. */
2520         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2521         {
2522             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2523             arg            = gtNewStmt(arg, impCurStmtOffs);
2524         }
2525
2526         fgInsertStmtAtEnd(newBlk, arg);
2527
2528         arg = gtNewLclvNode(tempNum, TYP_REF);
2529     }
2530
2531     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2532
2533     return hndBlk;
2534 }
2535
2536 /*****************************************************************************
2537  *
2538  *  Given a tree, clone it. *pClone is set to the cloned tree.
2539  *  Returns the original tree if the cloning was easy,
2540  *   else returns the temp to which the tree had to be spilled to.
2541  *  If the tree has side-effects, it will be spilled to a temp.
2542  */
2543
2544 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2545                                 GenTree**            pClone,
2546                                 CORINFO_CLASS_HANDLE structHnd,
2547                                 unsigned             curLevel,
2548                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2549 {
2550     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2551     {
2552         GenTree* clone = gtClone(tree, true);
2553
2554         if (clone)
2555         {
2556             *pClone = clone;
2557             return tree;
2558         }
2559     }
2560
2561     /* Store the operand in a temp and return the temp */
2562
2563     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2564
2565     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2566     // return a struct type. It also may modify the struct type to a more
2567     // specialized type (e.g. a SIMD type).  So we will get the type from
2568     // the lclVar AFTER calling impAssignTempGen().
2569
2570     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2571     var_types type = genActualType(lvaTable[temp].TypeGet());
2572
2573     *pClone = gtNewLclvNode(temp, type);
2574     return gtNewLclvNode(temp, type);
2575 }
2576
2577 /*****************************************************************************
2578  * Remember the IL offset (including stack-empty info) for the trees we will
2579  * generate now.
2580  */
2581
2582 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2583 {
2584     if (compIsForInlining())
2585     {
2586         GenTree* callStmt = impInlineInfo->iciStmt;
2587         assert(callStmt->gtOper == GT_STMT);
2588         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2589     }
2590     else
2591     {
2592         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2593         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2594         impCurStmtOffs    = offs | stkBit;
2595     }
2596 }
2597
2598 /*****************************************************************************
2599  * Returns current IL offset with stack-empty and call-instruction info incorporated
2600  */
2601 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2602 {
2603     if (compIsForInlining())
2604     {
2605         return BAD_IL_OFFSET;
2606     }
2607     else
2608     {
2609         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2610         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2611         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2612         return offs | stkBit | callInstructionBit;
2613     }
2614 }
2615
2616 //------------------------------------------------------------------------
2617 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2618 //
2619 // Arguments:
2620 //    prevOpcode - last importer opcode
2621 //
2622 // Return Value:
2623 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2624 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2625 {
2626     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2627     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2628     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2629 }
2630
2631 /*****************************************************************************
2632  *
2633  *  Remember the instr offset for the statements
2634  *
2635  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2636  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2637  *  as some of the trees corresponding to code up to impCurOpcOffs might
2638  *  still be sitting on the stack.
2639  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2640  *  This should be called when an opcode finally/explicitly causes
2641  *  impAppendTree(tree) to be called (as opposed to being called because of
2642  *  a spill caused by the opcode)
2643  */
2644
2645 #ifdef DEBUG
2646
2647 void Compiler::impNoteLastILoffs()
2648 {
2649     if (impLastILoffsStmt == nullptr)
2650     {
2651         // We should have added a statement for the current basic block
2652         // Is this assert correct ?
2653
2654         assert(impTreeLast);
2655         assert(impTreeLast->gtOper == GT_STMT);
2656
2657         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2658     }
2659     else
2660     {
2661         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2662         impLastILoffsStmt                          = nullptr;
2663     }
2664 }
2665
2666 #endif // DEBUG
2667
2668 /*****************************************************************************
2669  * We don't create any GenTree (excluding spills) for a branch.
2670  * For debugging info, we need a placeholder so that we can note
2671  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2672  */
2673
2674 void Compiler::impNoteBranchOffs()
2675 {
2676     if (opts.compDbgCode)
2677     {
2678         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2679     }
2680 }
2681
2682 /*****************************************************************************
2683  * Locate the next stmt boundary for which we need to record info.
2684  * We will have to spill the stack at such boundaries if it is not
2685  * already empty.
2686  * Returns the next stmt boundary (after the start of the block)
2687  */
2688
2689 unsigned Compiler::impInitBlockLineInfo()
2690 {
2691     /* Assume the block does not correspond with any IL offset. This prevents
2692        us from reporting extra offsets. Extra mappings can cause confusing
2693        stepping, especially if the extra mapping is a jump-target, and the
2694        debugger does not ignore extra mappings, but instead rewinds to the
2695        nearest known offset */
2696
2697     impCurStmtOffsSet(BAD_IL_OFFSET);
2698
2699     if (compIsForInlining())
2700     {
2701         return ~0;
2702     }
2703
2704     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2705
2706     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2707     {
2708         impCurStmtOffsSet(blockOffs);
2709     }
2710
2711     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2712     {
2713         impCurStmtOffsSet(blockOffs);
2714     }
2715
2716     /* Always report IL offset 0 or some tests get confused.
2717        Probably a good idea anyways */
2718
2719     if (blockOffs == 0)
2720     {
2721         impCurStmtOffsSet(blockOffs);
2722     }
2723
2724     if (!info.compStmtOffsetsCount)
2725     {
2726         return ~0;
2727     }
2728
2729     /* Find the lowest explicit stmt boundary within the block */
2730
2731     /* Start looking at an entry that is based on our instr offset */
2732
2733     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2734
2735     if (index >= info.compStmtOffsetsCount)
2736     {
2737         index = info.compStmtOffsetsCount - 1;
2738     }
2739
2740     /* If we've guessed too far, back up */
2741
2742     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2743     {
2744         index--;
2745     }
2746
2747     /* If we guessed short, advance ahead */
2748
2749     while (info.compStmtOffsets[index] < blockOffs)
2750     {
2751         index++;
2752
2753         if (index == info.compStmtOffsetsCount)
2754         {
2755             return info.compStmtOffsetsCount;
2756         }
2757     }
2758
2759     assert(index < info.compStmtOffsetsCount);
2760
2761     if (info.compStmtOffsets[index] == blockOffs)
2762     {
2763         /* There is an explicit boundary for the start of this basic block.
2764            So we will start with bbCodeOffs. Else we will wait until we
2765            get to the next explicit boundary */
2766
2767         impCurStmtOffsSet(blockOffs);
2768
2769         index++;
2770     }
2771
2772     return index;
2773 }
2774
2775 /*****************************************************************************/
2776
2777 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2778 {
2779     switch (opcode)
2780     {
2781         case CEE_CALL:
2782         case CEE_CALLI:
2783         case CEE_CALLVIRT:
2784             return true;
2785
2786         default:
2787             return false;
2788     }
2789 }
2790
2791 /*****************************************************************************/
2792
2793 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2794 {
2795     switch (opcode)
2796     {
2797         case CEE_CALL:
2798         case CEE_CALLI:
2799         case CEE_CALLVIRT:
2800         case CEE_JMP:
2801         case CEE_NEWOBJ:
2802         case CEE_NEWARR:
2803             return true;
2804
2805         default:
2806             return false;
2807     }
2808 }
2809
2810 /*****************************************************************************/
2811
2812 // One might think it is worth caching these values, but results indicate
2813 // that it isn't.
2814 // In addition, caching them causes SuperPMI to be unable to completely
2815 // encapsulate an individual method context.
2816 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2817 {
2818     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2819     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2820     return refAnyClass;
2821 }
2822
2823 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2824 {
2825     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2826     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2827     return typeHandleClass;
2828 }
2829
2830 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2831 {
2832     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2833     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2834     return argIteratorClass;
2835 }
2836
2837 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2838 {
2839     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2840     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2841     return stringClass;
2842 }
2843
2844 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2845 {
2846     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2847     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2848     return objectClass;
2849 }
2850
2851 /*****************************************************************************
2852  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2853  *  set its type to TYP_BYREF when we create it. We know if it can be
2854  *  changed to TYP_I_IMPL only at the point where we use it
2855  */
2856
2857 /* static */
2858 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2859 {
2860     if (tree1->IsVarAddr())
2861     {
2862         tree1->gtType = TYP_I_IMPL;
2863     }
2864
2865     if (tree2 && tree2->IsVarAddr())
2866     {
2867         tree2->gtType = TYP_I_IMPL;
2868     }
2869 }
2870
2871 /*****************************************************************************
2872  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2873  *  to make that an explicit cast in our trees, so any implicit casts that
2874  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2875  *  turned into explicit casts here.
2876  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2877  */
2878
2879 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2880 {
2881     var_types currType   = genActualType(tree->gtType);
2882     var_types wantedType = genActualType(dstTyp);
2883
2884     if (wantedType != currType)
2885     {
2886         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2887         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2888         {
2889             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2890             {
2891                 tree->gtType = TYP_I_IMPL;
2892             }
2893         }
2894 #ifdef _TARGET_64BIT_
2895         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2896         {
2897             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2898             tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2899         }
2900         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2901         {
2902             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2903             tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2904         }
2905 #endif // _TARGET_64BIT_
2906     }
2907
2908     return tree;
2909 }
2910
2911 /*****************************************************************************
2912  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2913  *  but we want to make that an explicit cast in our trees, so any implicit casts
2914  *  that exist in the IL are turned into explicit casts here.
2915  */
2916
2917 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2918 {
2919 #ifndef LEGACY_BACKEND
2920     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2921     {
2922         tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2923     }
2924 #endif // !LEGACY_BACKEND
2925
2926     return tree;
2927 }
2928
2929 //------------------------------------------------------------------------
2930 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2931 //    with a GT_COPYBLK node.
2932 //
2933 // Arguments:
2934 //    sig - The InitializeArray signature.
2935 //
2936 // Return Value:
2937 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2938 //    nullptr otherwise.
2939 //
2940 // Notes:
2941 //    The function recognizes the following IL pattern:
2942 //      ldc <length> or a list of ldc <lower bound>/<length>
2943 //      newarr or newobj
2944 //      dup
2945 //      ldtoken <field handle>
2946 //      call InitializeArray
2947 //    The lower bounds need not be constant except when the array rank is 1.
2948 //    The function recognizes all kinds of arrays thus enabling a small runtime
2949 //    such as CoreRT to skip providing an implementation for InitializeArray.
2950
2951 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2952 {
2953     assert(sig->numArgs == 2);
2954
2955     GenTree* fieldTokenNode = impStackTop(0).val;
2956     GenTree* arrayLocalNode = impStackTop(1).val;
2957
2958     //
2959     // Verify that the field token is known and valid.  Note that It's also
2960     // possible for the token to come from reflection, in which case we cannot do
2961     // the optimization and must therefore revert to calling the helper.  You can
2962     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2963     //
2964
2965     // Check to see if the ldtoken helper call is what we see here.
2966     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2967         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2968     {
2969         return nullptr;
2970     }
2971
2972     // Strip helper call away
2973     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2974
2975     if (fieldTokenNode->gtOper == GT_IND)
2976     {
2977         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2978     }
2979
2980     // Check for constant
2981     if (fieldTokenNode->gtOper != GT_CNS_INT)
2982     {
2983         return nullptr;
2984     }
2985
2986     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2987     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2988     {
2989         return nullptr;
2990     }
2991
2992     //
2993     // We need to get the number of elements in the array and the size of each element.
2994     // We verify that the newarr statement is exactly what we expect it to be.
2995     // If it's not then we just return NULL and we don't optimize this call
2996     //
2997
2998     //
2999     // It is possible the we don't have any statements in the block yet
3000     //
3001     if (impTreeLast->gtOper != GT_STMT)
3002     {
3003         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3004         return nullptr;
3005     }
3006
3007     //
3008     // We start by looking at the last statement, making sure it's an assignment, and
3009     // that the target of the assignment is the array passed to InitializeArray.
3010     //
3011     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3012     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3013         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3014         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3015     {
3016         return nullptr;
3017     }
3018
3019     //
3020     // Make sure that the object being assigned is a helper call.
3021     //
3022
3023     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3024     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3025     {
3026         return nullptr;
3027     }
3028
3029     //
3030     // Verify that it is one of the new array helpers.
3031     //
3032
3033     bool isMDArray = false;
3034
3035     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3036         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3037         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3038         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3039 #ifdef FEATURE_READYTORUN_COMPILER
3040         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3041         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3042 #endif
3043             )
3044     {
3045         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3046         {
3047             return nullptr;
3048         }
3049
3050         isMDArray = true;
3051     }
3052
3053     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3054
3055     //
3056     // Make sure we found a compile time handle to the array
3057     //
3058
3059     if (!arrayClsHnd)
3060     {
3061         return nullptr;
3062     }
3063
3064     unsigned rank = 0;
3065     S_UINT32 numElements;
3066
3067     if (isMDArray)
3068     {
3069         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3070
3071         if (rank == 0)
3072         {
3073             return nullptr;
3074         }
3075
3076         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3077         assert(tokenArg != nullptr);
3078         GenTreeArgList* numArgsArg = tokenArg->Rest();
3079         assert(numArgsArg != nullptr);
3080         GenTreeArgList* argsArg = numArgsArg->Rest();
3081         assert(argsArg != nullptr);
3082
3083         //
3084         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3085         // so at least one length must be present and the rank can't exceed 32 so there can
3086         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3087         //
3088
3089         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3090             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3091         {
3092             return nullptr;
3093         }
3094
3095         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3096         bool     lowerBoundsSpecified;
3097
3098         if (numArgs == rank * 2)
3099         {
3100             lowerBoundsSpecified = true;
3101         }
3102         else if (numArgs == rank)
3103         {
3104             lowerBoundsSpecified = false;
3105
3106             //
3107             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3108             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3109             // we get a SDArray as well, see the for loop below.
3110             //
3111
3112             if (rank == 1)
3113             {
3114                 isMDArray = false;
3115             }
3116         }
3117         else
3118         {
3119             return nullptr;
3120         }
3121
3122         //
3123         // The rank is known to be at least 1 so we can start with numElements being 1
3124         // to avoid the need to special case the first dimension.
3125         //
3126
3127         numElements = S_UINT32(1);
3128
3129         struct Match
3130         {
3131             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3132             {
3133                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3134                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3135             }
3136
3137             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3138             {
3139                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3140                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3141                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3142             }
3143
3144             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3145             {
3146                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3147                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3148             }
3149
3150             static bool IsComma(GenTree* tree)
3151             {
3152                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3153             }
3154         };
3155
3156         unsigned argIndex = 0;
3157         GenTree* comma;
3158
3159         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3160         {
3161             if (lowerBoundsSpecified)
3162             {
3163                 //
3164                 // In general lower bounds can be ignored because they're not needed to
3165                 // calculate the total number of elements. But for single dimensional arrays
3166                 // we need to know if the lower bound is 0 because in this case the runtime
3167                 // creates a SDArray and this affects the way the array data offset is calculated.
3168                 //
3169
3170                 if (rank == 1)
3171                 {
3172                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3173                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3174                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3175
3176                     if (lowerBoundNode->IsIntegralConst(0))
3177                     {
3178                         isMDArray = false;
3179                     }
3180                 }
3181
3182                 comma = comma->gtGetOp2();
3183                 argIndex++;
3184             }
3185
3186             GenTree* lengthNodeAssign = comma->gtGetOp1();
3187             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3188             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3189
3190             if (!lengthNode->IsCnsIntOrI())
3191             {
3192                 return nullptr;
3193             }
3194
3195             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3196             argIndex++;
3197         }
3198
3199         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3200
3201         if (argIndex != numArgs)
3202         {
3203             return nullptr;
3204         }
3205     }
3206     else
3207     {
3208         //
3209         // Make sure there are exactly two arguments:  the array class and
3210         // the number of elements.
3211         //
3212
3213         GenTree* arrayLengthNode;
3214
3215         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3216 #ifdef FEATURE_READYTORUN_COMPILER
3217         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3218         {
3219             // Array length is 1st argument for readytorun helper
3220             arrayLengthNode = args->Current();
3221         }
3222         else
3223 #endif
3224         {
3225             // Array length is 2nd argument for regular helper
3226             arrayLengthNode = args->Rest()->Current();
3227         }
3228
3229         //
3230         // Make sure that the number of elements look valid.
3231         //
3232         if (arrayLengthNode->gtOper != GT_CNS_INT)
3233         {
3234             return nullptr;
3235         }
3236
3237         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3238
3239         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3240         {
3241             return nullptr;
3242         }
3243     }
3244
3245     CORINFO_CLASS_HANDLE elemClsHnd;
3246     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3247
3248     //
3249     // Note that genTypeSize will return zero for non primitive types, which is exactly
3250     // what we want (size will then be 0, and we will catch this in the conditional below).
3251     // Note that we don't expect this to fail for valid binaries, so we assert in the
3252     // non-verification case (the verification case should not assert but rather correctly
3253     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3254     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3255     // why.
3256     //
3257
3258     S_UINT32 elemSize(genTypeSize(elementType));
3259     S_UINT32 size = elemSize * S_UINT32(numElements);
3260
3261     if (size.IsOverflow())
3262     {
3263         return nullptr;
3264     }
3265
3266     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3267     {
3268         assert(verNeedsVerification());
3269         return nullptr;
3270     }
3271
3272     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3273     if (!initData)
3274     {
3275         return nullptr;
3276     }
3277
3278     //
3279     // At this point we are ready to commit to implementing the InitializeArray
3280     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3281     // return the struct assignment node.
3282     //
3283
3284     impPopStack();
3285     impPopStack();
3286
3287     const unsigned blkSize = size.Value();
3288     unsigned       dataOffset;
3289
3290     if (isMDArray)
3291     {
3292         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3293     }
3294     else
3295     {
3296         dataOffset = eeGetArrayDataOffset(elementType);
3297     }
3298
3299     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3300     GenTree* blk = gtNewBlockVal(dst, blkSize);
3301     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3302
3303     return gtNewBlkOpNode(blk,     // dst
3304                           src,     // src
3305                           blkSize, // size
3306                           false,   // volatil
3307                           true);   // copyBlock
3308 }
3309
3310 //------------------------------------------------------------------------
3311 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3312 //
3313 // Arguments:
3314 //    newobjThis - for constructor calls, the tree for the newly allocated object
3315 //    clsHnd - handle for the intrinsic method's class
3316 //    method - handle for the intrinsic method
3317 //    sig    - signature of the intrinsic method
3318 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3319 //    memberRef - the token for the intrinsic method
3320 //    readonlyCall - true if call has a readonly prefix
3321 //    tailCall - true if call is in tail position
3322 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3323 //       if call is not constrained
3324 //    constraintCallThisTransform -- this transform to apply for a constrained call
3325 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3326 //       for "traditional" jit intrinsics
3327 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3328 //       that is amenable to special downstream optimization opportunities
3329 //
3330 // Returns:
3331 //    IR tree to use in place of the call, or nullptr if the jit should treat
3332 //    the intrinsic call like a normal call.
3333 //
3334 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3335 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3336 //
3337 //    isSpecial set true if the expansion is subject to special
3338 //    optimizations later in the jit processing
3339 //
3340 // Notes:
3341 //    On success the IR tree may be a call to a different method or an inline
3342 //    sequence. If it is a call, then the intrinsic processing here is responsible
3343 //    for handling all the special cases, as upon return to impImportCall
3344 //    expanded intrinsics bypass most of the normal call processing.
3345 //
3346 //    Intrinsics are generally not recognized in minopts and debug codegen.
3347 //
3348 //    However, certain traditional intrinsics are identifed as "must expand"
3349 //    if there is no fallback implmentation to invoke; these must be handled
3350 //    in all codegen modes.
3351 //
3352 //    New style intrinsics (where the fallback implementation is in IL) are
3353 //    identified as "must expand" if they are invoked from within their
3354 //    own method bodies.
3355 //
3356
3357 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3358                                 CORINFO_CLASS_HANDLE    clsHnd,
3359                                 CORINFO_METHOD_HANDLE   method,
3360                                 CORINFO_SIG_INFO*       sig,
3361                                 unsigned                methodFlags,
3362                                 int                     memberRef,
3363                                 bool                    readonlyCall,
3364                                 bool                    tailCall,
3365                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3366                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3367                                 CorInfoIntrinsics*      pIntrinsicID,
3368                                 bool*                   isSpecialIntrinsic)
3369 {
3370     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3371
3372     bool              mustExpand  = false;
3373     bool              isSpecial   = false;
3374     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3375     NamedIntrinsic    ni          = NI_Illegal;
3376
3377     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3378     {
3379         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3380     }
3381
3382     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3383     {
3384         // The recursive calls to Jit intrinsics are must-expand by convention.
3385         mustExpand = mustExpand || gtIsRecursiveCall(method);
3386
3387         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3388         {
3389             ni = lookupNamedIntrinsic(method);
3390
3391 #ifdef FEATURE_HW_INTRINSICS
3392             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3393             {
3394                 return impHWIntrinsic(ni, method, sig, mustExpand);
3395             }
3396 #endif // FEATURE_HW_INTRINSICS
3397         }
3398     }
3399
3400     *pIntrinsicID = intrinsicID;
3401
3402 #ifndef _TARGET_ARM_
3403     genTreeOps interlockedOperator;
3404 #endif
3405
3406     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3407     {
3408         // must be done regardless of DbgCode and MinOpts
3409         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3410     }
3411 #ifdef _TARGET_64BIT_
3412     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3413     {
3414         // must be done regardless of DbgCode and MinOpts
3415         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3416     }
3417 #else
3418     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3419 #endif
3420
3421     GenTree* retNode = nullptr;
3422
3423     // Under debug and minopts, only expand what is required.
3424     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3425     {
3426         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3427         return retNode;
3428     }
3429
3430     var_types callType = JITtype2varType(sig->retType);
3431
3432     /* First do the intrinsics which are always smaller than a call */
3433
3434     switch (intrinsicID)
3435     {
3436         GenTree* op1;
3437         GenTree* op2;
3438
3439         case CORINFO_INTRINSIC_Sin:
3440         case CORINFO_INTRINSIC_Cbrt:
3441         case CORINFO_INTRINSIC_Sqrt:
3442         case CORINFO_INTRINSIC_Abs:
3443         case CORINFO_INTRINSIC_Cos:
3444         case CORINFO_INTRINSIC_Round:
3445         case CORINFO_INTRINSIC_Cosh:
3446         case CORINFO_INTRINSIC_Sinh:
3447         case CORINFO_INTRINSIC_Tan:
3448         case CORINFO_INTRINSIC_Tanh:
3449         case CORINFO_INTRINSIC_Asin:
3450         case CORINFO_INTRINSIC_Asinh:
3451         case CORINFO_INTRINSIC_Acos:
3452         case CORINFO_INTRINSIC_Acosh:
3453         case CORINFO_INTRINSIC_Atan:
3454         case CORINFO_INTRINSIC_Atan2:
3455         case CORINFO_INTRINSIC_Atanh:
3456         case CORINFO_INTRINSIC_Log10:
3457         case CORINFO_INTRINSIC_Pow:
3458         case CORINFO_INTRINSIC_Exp:
3459         case CORINFO_INTRINSIC_Ceiling:
3460         case CORINFO_INTRINSIC_Floor:
3461             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3462             break;
3463
3464 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3465         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3466         case CORINFO_INTRINSIC_InterlockedAdd32:
3467             interlockedOperator = GT_LOCKADD;
3468             goto InterlockedBinOpCommon;
3469         case CORINFO_INTRINSIC_InterlockedXAdd32:
3470             interlockedOperator = GT_XADD;
3471             goto InterlockedBinOpCommon;
3472         case CORINFO_INTRINSIC_InterlockedXchg32:
3473             interlockedOperator = GT_XCHG;
3474             goto InterlockedBinOpCommon;
3475
3476 #ifdef _TARGET_64BIT_
3477         case CORINFO_INTRINSIC_InterlockedAdd64:
3478             interlockedOperator = GT_LOCKADD;
3479             goto InterlockedBinOpCommon;
3480         case CORINFO_INTRINSIC_InterlockedXAdd64:
3481             interlockedOperator = GT_XADD;
3482             goto InterlockedBinOpCommon;
3483         case CORINFO_INTRINSIC_InterlockedXchg64:
3484             interlockedOperator = GT_XCHG;
3485             goto InterlockedBinOpCommon;
3486 #endif // _TARGET_AMD64_
3487
3488         InterlockedBinOpCommon:
3489             assert(callType != TYP_STRUCT);
3490             assert(sig->numArgs == 2);
3491
3492             op2 = impPopStack().val;
3493             op1 = impPopStack().val;
3494
3495             // This creates:
3496             //   val
3497             // XAdd
3498             //   addr
3499             //     field (for example)
3500             //
3501             // In the case where the first argument is the address of a local, we might
3502             // want to make this *not* make the var address-taken -- but atomic instructions
3503             // on a local are probably pretty useless anyway, so we probably don't care.
3504
3505             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3506             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3507             retNode = op1;
3508             break;
3509 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3510
3511         case CORINFO_INTRINSIC_MemoryBarrier:
3512
3513             assert(sig->numArgs == 0);
3514
3515             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3516             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3517             retNode = op1;
3518             break;
3519
3520 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3521         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3522         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3523 #ifdef _TARGET_64BIT_
3524         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3525 #endif
3526         {
3527             assert(callType != TYP_STRUCT);
3528             assert(sig->numArgs == 3);
3529             GenTree* op3;
3530
3531             op3 = impPopStack().val; // comparand
3532             op2 = impPopStack().val; // value
3533             op1 = impPopStack().val; // location
3534
3535             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3536
3537             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3538             retNode = node;
3539             break;
3540         }
3541 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3542
3543         case CORINFO_INTRINSIC_StringLength:
3544             op1 = impPopStack().val;
3545             if (!opts.MinOpts() && !opts.compDbgCode)
3546             {
3547                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3548                 op1                   = arrLen;
3549             }
3550             else
3551             {
3552                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3553                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3554                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3555                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3556             }
3557
3558             // Getting the length of a null string should throw
3559             op1->gtFlags |= GTF_EXCEPT;
3560
3561             retNode = op1;
3562             break;
3563
3564         case CORINFO_INTRINSIC_StringGetChar:
3565             op2 = impPopStack().val;
3566             op1 = impPopStack().val;
3567             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3568             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3569             retNode = op1;
3570             break;
3571
3572         case CORINFO_INTRINSIC_InitializeArray:
3573             retNode = impInitializeArrayIntrinsic(sig);
3574             break;
3575
3576         case CORINFO_INTRINSIC_Array_Address:
3577         case CORINFO_INTRINSIC_Array_Get:
3578         case CORINFO_INTRINSIC_Array_Set:
3579             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3580             break;
3581
3582         case CORINFO_INTRINSIC_GetTypeFromHandle:
3583             op1 = impStackTop(0).val;
3584             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3585                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3586             {
3587                 op1 = impPopStack().val;
3588                 // Change call to return RuntimeType directly.
3589                 op1->gtType = TYP_REF;
3590                 retNode     = op1;
3591             }
3592             // Call the regular function.
3593             break;
3594
3595         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3596             op1 = impStackTop(0).val;
3597             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3598                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3599             {
3600                 // Old tree
3601                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3602                 //
3603                 // New tree
3604                 // TreeToGetNativeTypeHandle
3605
3606                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3607                 // to that helper.
3608
3609                 op1 = impPopStack().val;
3610
3611                 // Get native TypeHandle argument to old helper
3612                 op1 = op1->gtCall.gtCallArgs;
3613                 assert(op1->OperIsList());
3614                 assert(op1->gtOp.gtOp2 == nullptr);
3615                 op1     = op1->gtOp.gtOp1;
3616                 retNode = op1;
3617             }
3618             // Call the regular function.
3619             break;
3620
3621 #ifndef LEGACY_BACKEND
3622         case CORINFO_INTRINSIC_Object_GetType:
3623         {
3624             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3625             op1 = impStackTop(0).val;
3626
3627             // If we're calling GetType on a boxed value, just get the type directly.
3628             if (op1->IsBoxedValue())
3629             {
3630                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3631
3632                 // Try and clean up the box. Obtain the handle we
3633                 // were going to pass to the newobj.
3634                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3635
3636                 if (boxTypeHandle != nullptr)
3637                 {
3638                     // Note we don't need to play the TYP_STRUCT games here like
3639                     // do for LDTOKEN since the return value of this operator is Type,
3640                     // not RuntimeTypeHandle.
3641                     impPopStack();
3642                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3643                     GenTree*        runtimeType =
3644                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3645                     retNode = runtimeType;
3646                 }
3647             }
3648
3649             // If we have a constrained callvirt with a "box this" transform
3650             // we know we have a value class and hence an exact type.
3651             //
3652             // If so, instead of boxing and then extracting the type, just
3653             // construct the type directly.
3654             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3655                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3656             {
3657                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3658                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3659                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3660
3661                 if (isSafeToOptimize)
3662                 {
3663                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3664                     impPopStack();
3665                     GenTree* typeHandleOp =
3666                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3667                     if (typeHandleOp == nullptr)
3668                     {
3669                         assert(compDonotInline());
3670                         return nullptr;
3671                     }
3672                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3673                     GenTree*        runtimeType =
3674                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3675                     retNode = runtimeType;
3676                 }
3677             }
3678
3679 #ifdef DEBUG
3680             if (retNode != nullptr)
3681             {
3682                 JITDUMP("Optimized result for call to GetType is\n");
3683                 if (verbose)
3684                 {
3685                     gtDispTree(retNode);
3686                 }
3687             }
3688 #endif
3689
3690             // Else expand as an intrinsic, unless the call is constrained,
3691             // in which case we defer expansion to allow impImportCall do the
3692             // special constraint processing.
3693             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3694             {
3695                 JITDUMP("Expanding as special intrinsic\n");
3696                 impPopStack();
3697                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3698
3699                 // Set the CALL flag to indicate that the operator is implemented by a call.
3700                 // Set also the EXCEPTION flag because the native implementation of
3701                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3702                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3703                 retNode = op1;
3704                 // Might be further optimizable, so arrange to leave a mark behind
3705                 isSpecial = true;
3706             }
3707
3708             if (retNode == nullptr)
3709             {
3710                 JITDUMP("Leaving as normal call\n");
3711                 // Might be further optimizable, so arrange to leave a mark behind
3712                 isSpecial = true;
3713             }
3714
3715             break;
3716         }
3717
3718 #endif
3719         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3720         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3721         // substitution.  The parameter byref will be assigned into the newly allocated object.
3722         case CORINFO_INTRINSIC_ByReference_Ctor:
3723         {
3724             // Remove call to constructor and directly assign the byref passed
3725             // to the call to the first slot of the ByReference struct.
3726             op1                                    = impPopStack().val;
3727             GenTree*             thisptr           = newobjThis;
3728             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3729             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3730             GenTree*             assign            = gtNewAssignNode(field, op1);
3731             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3732             assert(byReferenceStruct != nullptr);
3733             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3734             retNode = assign;
3735             break;
3736         }
3737         // Implement ptr value getter for ByReference struct.
3738         case CORINFO_INTRINSIC_ByReference_Value:
3739         {
3740             op1                         = impPopStack().val;
3741             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3742             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3743             retNode                     = field;
3744             break;
3745         }
3746         case CORINFO_INTRINSIC_Span_GetItem:
3747         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3748         {
3749             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3750             //
3751             // For Span<T>
3752             //   Comma
3753             //     BoundsCheck(index, s->_length)
3754             //     s->_pointer + index * sizeof(T)
3755             //
3756             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3757             //
3758             // Signature should show one class type parameter, which
3759             // we need to examine.
3760             assert(sig->sigInst.classInstCount == 1);
3761             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3762             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3763             assert(elemSize > 0);
3764
3765             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3766
3767             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3768                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3769
3770             GenTree* index          = impPopStack().val;
3771             GenTree* ptrToSpan      = impPopStack().val;
3772             GenTree* indexClone     = nullptr;
3773             GenTree* ptrToSpanClone = nullptr;
3774
3775 #if defined(DEBUG)
3776             if (verbose)
3777             {
3778                 printf("with ptr-to-span\n");
3779                 gtDispTree(ptrToSpan);
3780                 printf("and index\n");
3781                 gtDispTree(index);
3782             }
3783 #endif // defined(DEBUG)
3784
3785             // We need to use both index and ptr-to-span twice, so clone or spill.
3786             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3787                                  nullptr DEBUGARG("Span.get_Item index"));
3788             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3789                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3790
3791             // Bounds check
3792             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3793             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3794             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3795             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3796                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3797
3798             // Element access
3799             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3800             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3801             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3802             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3803             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3804             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3805             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3806
3807             // Prepare result
3808             var_types resultType = JITtype2varType(sig->retType);
3809             assert(resultType == result->TypeGet());
3810             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3811
3812             break;
3813         }
3814
3815         case CORINFO_INTRINSIC_GetRawHandle:
3816         {
3817             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3818             CORINFO_RESOLVED_TOKEN resolvedToken;
3819             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3820             resolvedToken.tokenScope   = info.compScopeHnd;
3821             resolvedToken.token        = memberRef;
3822             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3823
3824             CORINFO_GENERICHANDLE_RESULT embedInfo;
3825             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3826
3827             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3828                                                  embedInfo.compileTimeHandle);
3829             if (rawHandle == nullptr)
3830             {
3831                 return nullptr;
3832             }
3833
3834             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3835
3836             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3837             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3838
3839             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3840             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3841             var_types resultType = JITtype2varType(sig->retType);
3842             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3843
3844             break;
3845         }
3846
3847         case CORINFO_INTRINSIC_TypeEQ:
3848         case CORINFO_INTRINSIC_TypeNEQ:
3849         {
3850             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3851             op1              = impStackTop(1).val;
3852             op2              = impStackTop(0).val;
3853             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3854             if (optTree != nullptr)
3855             {
3856                 // Success, clean up the evaluation stack.
3857                 impPopStack();
3858                 impPopStack();
3859
3860                 // See if we can optimize even further, to a handle compare.
3861                 optTree = gtFoldTypeCompare(optTree);
3862
3863                 // See if we can now fold a handle compare to a constant.
3864                 optTree = gtFoldExpr(optTree);
3865
3866                 retNode = optTree;
3867             }
3868             else
3869             {
3870                 // Retry optimizing these later
3871                 isSpecial = true;
3872             }
3873             break;
3874         }
3875
3876         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3877         case CORINFO_INTRINSIC_GetManagedThreadId:
3878         {
3879             // Retry optimizing these during morph
3880             isSpecial = true;
3881             break;
3882         }
3883
3884         default:
3885             /* Unknown intrinsic */
3886             intrinsicID = CORINFO_INTRINSIC_Illegal;
3887             break;
3888     }
3889
3890     // Look for new-style jit intrinsics by name
3891     if (ni != NI_Illegal)
3892     {
3893         assert(retNode == nullptr);
3894         switch (ni)
3895         {
3896             case NI_System_Enum_HasFlag:
3897             {
3898                 GenTree* thisOp  = impStackTop(1).val;
3899                 GenTree* flagOp  = impStackTop(0).val;
3900                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3901
3902                 if (optTree != nullptr)
3903                 {
3904                     // Optimization successful. Pop the stack for real.
3905                     impPopStack();
3906                     impPopStack();
3907                     retNode = optTree;
3908                 }
3909                 else
3910                 {
3911                     // Retry optimizing this during morph.
3912                     isSpecial = true;
3913                 }
3914
3915                 break;
3916             }
3917
3918             case NI_MathF_Round:
3919             case NI_Math_Round:
3920             {
3921                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3922                 // to simplify the transition, we will just treat it as if it was still the
3923                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3924                 // everywhere else.
3925
3926                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3927                 break;
3928             }
3929
3930             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3931             {
3932                 // Flag for later handling during devirtualization.
3933                 isSpecial = true;
3934                 break;
3935             }
3936
3937             default:
3938                 break;
3939         }
3940     }
3941
3942     if (mustExpand)
3943     {
3944         if (retNode == nullptr)
3945         {
3946             NO_WAY("JIT must expand the intrinsic!");
3947         }
3948     }
3949
3950     // Optionally report if this intrinsic is special
3951     // (that is, potentially re-optimizable during morph).
3952     if (isSpecialIntrinsic != nullptr)
3953     {
3954         *isSpecialIntrinsic = isSpecial;
3955     }
3956
3957     return retNode;
3958 }
3959
3960 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3961                                     CORINFO_SIG_INFO*     sig,
3962                                     var_types             callType,
3963                                     CorInfoIntrinsics     intrinsicID,
3964                                     bool                  tailCall)
3965 {
3966     GenTree* op1;
3967     GenTree* op2;
3968
3969     assert(callType != TYP_STRUCT);
3970     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3971            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3972            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3973            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3974            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3975            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3976            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3977            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3980            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3981
3982     op1 = nullptr;
3983
3984 #if defined(LEGACY_BACKEND)
3985     if (IsTargetIntrinsic(intrinsicID))
3986 #elif !defined(_TARGET_X86_)
3987     // Intrinsics that are not implemented directly by target instructions will
3988     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3989     // don't do this optimization, because
3990     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3991     //  b) It will be non-trivial task or too late to re-materialize a surviving
3992     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3993     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3994 #else
3995     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3996     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3997     // code generation for certain EH constructs.
3998     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3999 #endif
4000     {
4001         switch (sig->numArgs)
4002         {
4003             case 1:
4004                 op1 = impPopStack().val;
4005
4006 #if FEATURE_X87_DOUBLES
4007
4008                 // X87 stack doesn't differentiate between float/double
4009                 // so it doesn't need a cast, but everybody else does
4010                 // Just double check it is at least a FP type
4011                 noway_assert(varTypeIsFloating(op1));
4012
4013 #else // FEATURE_X87_DOUBLES
4014                 assert(varTypeIsFloating(op1));
4015
4016                 if (op1->TypeGet() != callType)
4017                 {
4018                     op1 = gtNewCastNode(callType, op1, false, callType);
4019                 }
4020
4021 #endif // FEATURE_X87_DOUBLES
4022
4023                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4024                 break;
4025
4026             case 2:
4027                 op2 = impPopStack().val;
4028                 op1 = impPopStack().val;
4029
4030 #if FEATURE_X87_DOUBLES
4031
4032                 // X87 stack doesn't differentiate between float/double
4033                 // so it doesn't need a cast, but everybody else does
4034                 // Just double check it is at least a FP type
4035                 noway_assert(varTypeIsFloating(op2));
4036                 noway_assert(varTypeIsFloating(op1));
4037
4038 #else // FEATURE_X87_DOUBLES
4039                 assert(varTypeIsFloating(op1));
4040                 assert(varTypeIsFloating(op2));
4041
4042                 if (op2->TypeGet() != callType)
4043                 {
4044                     op2 = gtNewCastNode(callType, op2, false, callType);
4045                 }
4046                 if (op1->TypeGet() != callType)
4047                 {
4048                     op1 = gtNewCastNode(callType, op1, false, callType);
4049                 }
4050
4051 #endif // FEATURE_X87_DOUBLES
4052
4053                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4054                 break;
4055
4056             default:
4057                 NO_WAY("Unsupported number of args for Math Instrinsic");
4058         }
4059
4060 #ifndef LEGACY_BACKEND
4061         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4062         {
4063             op1->gtFlags |= GTF_CALL;
4064         }
4065 #endif
4066     }
4067
4068     return op1;
4069 }
4070
4071 //------------------------------------------------------------------------
4072 // lookupNamedIntrinsic: map method to jit named intrinsic value
4073 //
4074 // Arguments:
4075 //    method -- method handle for method
4076 //
4077 // Return Value:
4078 //    Id for the named intrinsic, or Illegal if none.
4079 //
4080 // Notes:
4081 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4082 //    otherwise it is not a named jit intrinsic.
4083 //
4084
4085 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4086 {
4087     NamedIntrinsic result = NI_Illegal;
4088
4089     const char* className     = nullptr;
4090     const char* namespaceName = nullptr;
4091     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4092
4093     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4094     {
4095         return result;
4096     }
4097
4098     if (strcmp(namespaceName, "System") == 0)
4099     {
4100         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4101         {
4102             result = NI_System_Enum_HasFlag;
4103         }
4104         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4105         {
4106             result = NI_MathF_Round;
4107         }
4108         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4109         {
4110             result = NI_Math_Round;
4111         }
4112     }
4113     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4114     {
4115         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4116         {
4117             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4118         }
4119     }
4120
4121 #ifdef FEATURE_HW_INTRINSICS
4122 #if defined(_TARGET_XARCH_)
4123     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4124     {
4125         InstructionSet isa = lookupHWIntrinsicISA(className);
4126         result             = lookupHWIntrinsic(methodName, isa);
4127     }
4128 #elif defined(_TARGET_ARM64_)
4129     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4130     {
4131         result = lookupHWIntrinsic(className, methodName);
4132     }
4133 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4134 #error Unsupported platform
4135 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4136 #endif // FEATURE_HW_INTRINSICS
4137     return result;
4138 }
4139
4140 /*****************************************************************************/
4141
4142 GenTree* Compiler::impArrayAccessIntrinsic(
4143     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4144 {
4145     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4146        the following, as it generates fatter code.
4147     */
4148
4149     if (compCodeOpt() == SMALL_CODE)
4150     {
4151         return nullptr;
4152     }
4153
4154     /* These intrinsics generate fatter (but faster) code and are only
4155        done if we don't need SMALL_CODE */
4156
4157     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4158
4159     // The rank 1 case is special because it has to handle two array formats
4160     // we will simply not do that case
4161     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4162     {
4163         return nullptr;
4164     }
4165
4166     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4167     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4168
4169     // For the ref case, we will only be able to inline if the types match
4170     // (verifier checks for this, we don't care for the nonverified case and the
4171     // type is final (so we don't need to do the cast)
4172     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4173     {
4174         // Get the call site signature
4175         CORINFO_SIG_INFO LocalSig;
4176         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4177         assert(LocalSig.hasThis());
4178
4179         CORINFO_CLASS_HANDLE actualElemClsHnd;
4180
4181         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4182         {
4183             // Fetch the last argument, the one that indicates the type we are setting.
4184             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4185             for (unsigned r = 0; r < rank; r++)
4186             {
4187                 argType = info.compCompHnd->getArgNext(argType);
4188             }
4189
4190             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4191             actualElemClsHnd = argInfo.GetClassHandle();
4192         }
4193         else
4194         {
4195             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4196
4197             // Fetch the return type
4198             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4199             assert(retInfo.IsByRef());
4200             actualElemClsHnd = retInfo.GetClassHandle();
4201         }
4202
4203         // if it's not final, we can't do the optimization
4204         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4205         {
4206             return nullptr;
4207         }
4208     }
4209
4210     unsigned arrayElemSize;
4211     if (elemType == TYP_STRUCT)
4212     {
4213         assert(arrElemClsHnd);
4214
4215         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4216     }
4217     else
4218     {
4219         arrayElemSize = genTypeSize(elemType);
4220     }
4221
4222     if ((unsigned char)arrayElemSize != arrayElemSize)
4223     {
4224         // arrayElemSize would be truncated as an unsigned char.
4225         // This means the array element is too large. Don't do the optimization.
4226         return nullptr;
4227     }
4228
4229     GenTree* val = nullptr;
4230
4231     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4232     {
4233         // Assignment of a struct is more work, and there are more gets than sets.
4234         if (elemType == TYP_STRUCT)
4235         {
4236             return nullptr;
4237         }
4238
4239         val = impPopStack().val;
4240         assert(genActualType(elemType) == genActualType(val->gtType) ||
4241                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4242                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4243                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4244     }
4245
4246     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4247
4248     GenTree* inds[GT_ARR_MAX_RANK];
4249     for (unsigned k = rank; k > 0; k--)
4250     {
4251         inds[k - 1] = impPopStack().val;
4252     }
4253
4254     GenTree* arr = impPopStack().val;
4255     assert(arr->gtType == TYP_REF);
4256
4257     GenTree* arrElem =
4258         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4259                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4260
4261     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4262     {
4263         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4264     }
4265
4266     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4267     {
4268         assert(val != nullptr);
4269         return gtNewAssignNode(arrElem, val);
4270     }
4271     else
4272     {
4273         return arrElem;
4274     }
4275 }
4276
4277 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4278 {
4279     unsigned i;
4280
4281     // do some basic checks first
4282     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4283     {
4284         return FALSE;
4285     }
4286
4287     if (verCurrentState.esStackDepth > 0)
4288     {
4289         // merge stack types
4290         StackEntry* parentStack = block->bbStackOnEntry();
4291         StackEntry* childStack  = verCurrentState.esStack;
4292
4293         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4294         {
4295             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4296             {
4297                 return FALSE;
4298             }
4299         }
4300     }
4301
4302     // merge initialization status of this ptr
4303
4304     if (verTrackObjCtorInitState)
4305     {
4306         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4307         assert(verCurrentState.thisInitialized != TIS_Bottom);
4308
4309         // If the successor block's thisInit state is unknown, copy it from the current state.
4310         if (block->bbThisOnEntry() == TIS_Bottom)
4311         {
4312             *changed = true;
4313             verSetThisInit(block, verCurrentState.thisInitialized);
4314         }
4315         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4316         {
4317             if (block->bbThisOnEntry() != TIS_Top)
4318             {
4319                 *changed = true;
4320                 verSetThisInit(block, TIS_Top);
4321
4322                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4323                 {
4324                     // The block is bad. Control can flow through the block to any handler that catches the
4325                     // verification exception, but the importer ignores bad blocks and therefore won't model
4326                     // this flow in the normal way. To complete the merge into the bad block, the new state
4327                     // needs to be manually pushed to the handlers that may be reached after the verification
4328                     // exception occurs.
4329                     //
4330                     // Usually, the new state was already propagated to the relevant handlers while processing
4331                     // the predecessors of the bad block. The exception is when the bad block is at the start
4332                     // of a try region, meaning it is protected by additional handlers that do not protect its
4333                     // predecessors.
4334                     //
4335                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4336                     {
4337                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4338                         // recursive calls back into this code path (if successors of the current bad block are
4339                         // also bad blocks).
4340                         //
4341                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4342                         verCurrentState.thisInitialized = TIS_Top;
4343                         impVerifyEHBlock(block, true);
4344                         verCurrentState.thisInitialized = origTIS;
4345                     }
4346                 }
4347             }
4348         }
4349     }
4350     else
4351     {
4352         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4353     }
4354
4355     return TRUE;
4356 }
4357
4358 /*****************************************************************************
4359  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4360  *   already logged it (presumably in a more detailed fashion than done here)
4361  * 'bVerificationException' is true for a verification exception, false for a
4362  *   "call unauthorized by host" exception.
4363  */
4364
4365 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4366 {
4367     block->bbJumpKind = BBJ_THROW;
4368     block->bbFlags |= BBF_FAILED_VERIFICATION;
4369
4370     impCurStmtOffsSet(block->bbCodeOffs);
4371
4372 #ifdef DEBUG
4373     // we need this since BeginTreeList asserts otherwise
4374     impTreeList = impTreeLast = nullptr;
4375     block->bbFlags &= ~BBF_IMPORTED;
4376
4377     if (logMsg)
4378     {
4379         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4380                 block->bbCodeOffs, block->bbCodeOffsEnd));
4381         if (verbose)
4382         {
4383             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4384         }
4385     }
4386
4387     if (JitConfig.DebugBreakOnVerificationFailure())
4388     {
4389         DebugBreak();
4390     }
4391 #endif
4392
4393     impBeginTreeList();
4394
4395     // if the stack is non-empty evaluate all the side-effects
4396     if (verCurrentState.esStackDepth > 0)
4397     {
4398         impEvalSideEffects();
4399     }
4400     assert(verCurrentState.esStackDepth == 0);
4401
4402     GenTree* op1 =
4403         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4404     // verCurrentState.esStackDepth = 0;
4405     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4406
4407     // The inliner is not able to handle methods that require throw block, so
4408     // make sure this methods never gets inlined.
4409     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4410 }
4411
4412 /*****************************************************************************
4413  *
4414  */
4415 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4416
4417 {
4418     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4419     // slightly different mechanism in which it calls the JIT to perform IL verification:
4420     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4421     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4422     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4423     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4424     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4425     // to fail upon runtime of the jitted method.
4426     //
4427     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4428     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4429     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4430     // we detect these two conditions, instead of generating a throw statement inside the offending
4431     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4432     // to return false and make RyuJIT behave the same way JIT64 does.
4433     //
4434     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4435     // RyuJIT for the time being until we completely replace JIT64.
4436     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4437
4438     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4439     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4440     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4441     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4442     // be turned off during importation).
4443     CLANG_FORMAT_COMMENT_ANCHOR;
4444
4445 #ifdef _TARGET_64BIT_
4446
4447 #ifdef DEBUG
4448     bool canSkipVerificationResult =
4449         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4450     assert(tiVerificationNeeded || canSkipVerificationResult);
4451 #endif // DEBUG
4452
4453     // Add the non verifiable flag to the compiler
4454     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4455     {
4456         tiIsVerifiableCode = FALSE;
4457     }
4458 #endif //_TARGET_64BIT_
4459     verResetCurrentState(block, &verCurrentState);
4460     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4461
4462 #ifdef DEBUG
4463     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4464 #endif                   // DEBUG
4465 }
4466
4467 /******************************************************************************/
4468 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4469 {
4470     assert(ciType < CORINFO_TYPE_COUNT);
4471
4472     typeInfo tiResult;
4473     switch (ciType)
4474     {
4475         case CORINFO_TYPE_STRING:
4476         case CORINFO_TYPE_CLASS:
4477             tiResult = verMakeTypeInfo(clsHnd);
4478             if (!tiResult.IsType(TI_REF))
4479             { // type must be consistent with element type
4480                 return typeInfo();
4481             }
4482             break;
4483
4484 #ifdef _TARGET_64BIT_
4485         case CORINFO_TYPE_NATIVEINT:
4486         case CORINFO_TYPE_NATIVEUINT:
4487             if (clsHnd)
4488             {
4489                 // If we have more precise information, use it
4490                 return verMakeTypeInfo(clsHnd);
4491             }
4492             else
4493             {
4494                 return typeInfo::nativeInt();
4495             }
4496             break;
4497 #endif // _TARGET_64BIT_
4498
4499         case CORINFO_TYPE_VALUECLASS:
4500         case CORINFO_TYPE_REFANY:
4501             tiResult = verMakeTypeInfo(clsHnd);
4502             // type must be constant with element type;
4503             if (!tiResult.IsValueClass())
4504             {
4505                 return typeInfo();
4506             }
4507             break;
4508         case CORINFO_TYPE_VAR:
4509             return verMakeTypeInfo(clsHnd);
4510
4511         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4512         case CORINFO_TYPE_VOID:
4513             return typeInfo();
4514             break;
4515
4516         case CORINFO_TYPE_BYREF:
4517         {
4518             CORINFO_CLASS_HANDLE childClassHandle;
4519             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4520             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4521         }
4522         break;
4523
4524         default:
4525             if (clsHnd)
4526             { // If we have more precise information, use it
4527                 return typeInfo(TI_STRUCT, clsHnd);
4528             }
4529             else
4530             {
4531                 return typeInfo(JITtype2tiType(ciType));
4532             }
4533     }
4534     return tiResult;
4535 }
4536
4537 /******************************************************************************/
4538
4539 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4540 {
4541     if (clsHnd == nullptr)
4542     {
4543         return typeInfo();
4544     }
4545
4546     // Byrefs should only occur in method and local signatures, which are accessed
4547     // using ICorClassInfo and ICorClassInfo.getChildType.
4548     // So findClass() and getClassAttribs() should not be called for byrefs
4549
4550     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4551     {
4552         assert(!"Did findClass() return a Byref?");
4553         return typeInfo();
4554     }
4555
4556     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4557
4558     if (attribs & CORINFO_FLG_VALUECLASS)
4559     {
4560         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4561
4562         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4563         // not occur here, so we may want to change this to an assert instead.
4564         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4565         {
4566             return typeInfo();
4567         }
4568
4569 #ifdef _TARGET_64BIT_
4570         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4571         {
4572             return typeInfo::nativeInt();
4573         }
4574 #endif // _TARGET_64BIT_
4575
4576         if (t != CORINFO_TYPE_UNDEF)
4577         {
4578             return (typeInfo(JITtype2tiType(t)));
4579         }
4580         else if (bashStructToRef)
4581         {
4582             return (typeInfo(TI_REF, clsHnd));
4583         }
4584         else
4585         {
4586             return (typeInfo(TI_STRUCT, clsHnd));
4587         }
4588     }
4589     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4590     {
4591         // See comment in _typeInfo.h for why we do it this way.
4592         return (typeInfo(TI_REF, clsHnd, true));
4593     }
4594     else
4595     {
4596         return (typeInfo(TI_REF, clsHnd));
4597     }
4598 }
4599
4600 /******************************************************************************/
4601 BOOL Compiler::verIsSDArray(typeInfo ti)
4602 {
4603     if (ti.IsNullObjRef())
4604     { // nulls are SD arrays
4605         return TRUE;
4606     }
4607
4608     if (!ti.IsType(TI_REF))
4609     {
4610         return FALSE;
4611     }
4612
4613     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4614     {
4615         return FALSE;
4616     }
4617     return TRUE;
4618 }
4619
4620 /******************************************************************************/
4621 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4622 /* Returns an error type if anything goes wrong */
4623
4624 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4625 {
4626     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4627
4628     if (!verIsSDArray(arrayObjectType))
4629     {
4630         return typeInfo();
4631     }
4632
4633     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4634     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4635
4636     return verMakeTypeInfo(ciType, childClassHandle);
4637 }
4638
4639 /*****************************************************************************
4640  */
4641 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4642 {
4643     CORINFO_CLASS_HANDLE classHandle;
4644     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4645
4646     var_types type = JITtype2varType(ciType);
4647     if (varTypeIsGC(type))
4648     {
4649         // For efficiency, getArgType only returns something in classHandle for
4650         // value types.  For other types that have addition type info, you
4651         // have to call back explicitly
4652         classHandle = info.compCompHnd->getArgClass(sig, args);
4653         if (!classHandle)
4654         {
4655             NO_WAY("Could not figure out Class specified in argument or local signature");
4656         }
4657     }
4658
4659     return verMakeTypeInfo(ciType, classHandle);
4660 }
4661
4662 /*****************************************************************************/
4663
4664 // This does the expensive check to figure out whether the method
4665 // needs to be verified. It is called only when we fail verification,
4666 // just before throwing the verification exception.
4667
4668 BOOL Compiler::verNeedsVerification()
4669 {
4670     // If we have previously determined that verification is NOT needed
4671     // (for example in Compiler::compCompile), that means verification is really not needed.
4672     // Return the same decision we made before.
4673     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4674
4675     if (!tiVerificationNeeded)
4676     {
4677         return tiVerificationNeeded;
4678     }
4679
4680     assert(tiVerificationNeeded);
4681
4682     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4683     // obtain the answer.
4684     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4685         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4686
4687     // canSkipVerification will return one of the following three values:
4688     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4689     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4690     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4691     //     but need to insert a callout to the VM to ask during runtime
4692     //     whether to skip verification or not.
4693
4694     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4695     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4696     {
4697         tiRuntimeCalloutNeeded = true;
4698     }
4699
4700     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4701     {
4702         // Dev10 706080 - Testers don't like the assert, so just silence it
4703         // by not using the macros that invoke debugAssert.
4704         badCode();
4705     }
4706
4707     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4708     // The following line means we will NOT do jit time verification if canSkipVerification
4709     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4710     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4711     return tiVerificationNeeded;
4712 }
4713
4714 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4715 {
4716     if (ti.IsByRef())
4717     {
4718         return TRUE;
4719     }
4720     if (!ti.IsType(TI_STRUCT))
4721     {
4722         return FALSE;
4723     }
4724     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4725 }
4726
4727 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4728 {
4729     if (ti.IsPermanentHomeByRef())
4730     {
4731         return TRUE;
4732     }
4733     else
4734     {
4735         return FALSE;
4736     }
4737 }
4738
4739 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4740 {
4741     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4742             || ti.IsUnboxedGenericTypeVar() ||
4743             (ti.IsType(TI_STRUCT) &&
4744              // exclude byreflike structs
4745              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4746 }
4747
4748 // Is it a boxed value type?
4749 bool Compiler::verIsBoxedValueType(typeInfo ti)
4750 {
4751     if (ti.GetType() == TI_REF)
4752     {
4753         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4754         return !!eeIsValueClass(clsHnd);
4755     }
4756     else
4757     {
4758         return false;
4759     }
4760 }
4761
4762 /*****************************************************************************
4763  *
4764  *  Check if a TailCall is legal.
4765  */
4766
4767 bool Compiler::verCheckTailCallConstraint(
4768     OPCODE                  opcode,
4769     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4770     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4771     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4772                                                        // return false to the caller.
4773                                                        // If false, it will throw.
4774     )
4775 {
4776     DWORD            mflags;
4777     CORINFO_SIG_INFO sig;
4778     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4779                                    // this counter is used to keep track of how many items have been
4780                                    // virtually popped
4781
4782     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4783     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4784     unsigned              methodClassFlgs = 0;
4785
4786     assert(impOpcodeIsCallOpcode(opcode));
4787
4788     if (compIsForInlining())
4789     {
4790         return false;
4791     }
4792
4793     // for calli, VerifyOrReturn that this is not a virtual method
4794     if (opcode == CEE_CALLI)
4795     {
4796         /* Get the call sig */
4797         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4798
4799         // We don't know the target method, so we have to infer the flags, or
4800         // assume the worst-case.
4801         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4802     }
4803     else
4804     {
4805         methodHnd = pResolvedToken->hMethod;
4806
4807         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4808
4809         // When verifying generic code we pair the method handle with its
4810         // owning class to get the exact method signature.
4811         methodClassHnd = pResolvedToken->hClass;
4812         assert(methodClassHnd);
4813
4814         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4815
4816         // opcode specific check
4817         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4818     }
4819
4820     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4821     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4822
4823     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4824     {
4825         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4826     }
4827
4828     // check compatibility of the arguments
4829     unsigned int argCount;
4830     argCount = sig.numArgs;
4831     CORINFO_ARG_LIST_HANDLE args;
4832     args = sig.args;
4833     while (argCount--)
4834     {
4835         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4836
4837         // check that the argument is not a byref for tailcalls
4838         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4839
4840         // For unsafe code, we might have parameters containing pointer to the stack location.
4841         // Disallow the tailcall for this kind.
4842         CORINFO_CLASS_HANDLE classHandle;
4843         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4844         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4845
4846         args = info.compCompHnd->getArgNext(args);
4847     }
4848
4849     // update popCount
4850     popCount += sig.numArgs;
4851
4852     // check for 'this' which is on non-static methods, not called via NEWOBJ
4853     if (!(mflags & CORINFO_FLG_STATIC))
4854     {
4855         // Always update the popCount.
4856         // This is crucial for the stack calculation to be correct.
4857         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4858         popCount++;
4859
4860         if (opcode == CEE_CALLI)
4861         {
4862             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4863             // on the stack.
4864             if (tiThis.IsValueClass())
4865             {
4866                 tiThis.MakeByRef();
4867             }
4868             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4869         }
4870         else
4871         {
4872             // Check type compatibility of the this argument
4873             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4874             if (tiDeclaredThis.IsValueClass())
4875             {
4876                 tiDeclaredThis.MakeByRef();
4877             }
4878
4879             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4880         }
4881     }
4882
4883     // Tail calls on constrained calls should be illegal too:
4884     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4885     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4886
4887     // Get the exact view of the signature for an array method
4888     if (sig.retType != CORINFO_TYPE_VOID)
4889     {
4890         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4891         {
4892             assert(opcode != CEE_CALLI);
4893             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4894         }
4895     }
4896
4897     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4898     typeInfo tiCallerRetType =
4899         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4900
4901     // void return type gets morphed into the error type, so we have to treat them specially here
4902     if (sig.retType == CORINFO_TYPE_VOID)
4903     {
4904         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4905                                   speculative);
4906     }
4907     else
4908     {
4909         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4910                                                    NormaliseForStack(tiCallerRetType), true),
4911                                   "tailcall return mismatch", speculative);
4912     }
4913
4914     // for tailcall, stack must be empty
4915     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4916
4917     return true; // Yes, tailcall is legal
4918 }
4919
4920 /*****************************************************************************
4921  *
4922  *  Checks the IL verification rules for the call
4923  */
4924
4925 void Compiler::verVerifyCall(OPCODE                  opcode,
4926                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4927                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4928                              bool                    tailCall,
4929                              bool                    readonlyCall,
4930                              const BYTE*             delegateCreateStart,
4931                              const BYTE*             codeAddr,
4932                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4933 {
4934     DWORD             mflags;
4935     CORINFO_SIG_INFO* sig      = nullptr;
4936     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4937                                     // this counter is used to keep track of how many items have been
4938                                     // virtually popped
4939
4940     // for calli, VerifyOrReturn that this is not a virtual method
4941     if (opcode == CEE_CALLI)
4942     {
4943         Verify(false, "Calli not verifiable");
4944         return;
4945     }
4946
4947     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4948     mflags = callInfo->verMethodFlags;
4949
4950     sig = &callInfo->verSig;
4951
4952     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4953     {
4954         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4955     }
4956
4957     // opcode specific check
4958     unsigned methodClassFlgs = callInfo->classFlags;
4959     switch (opcode)
4960     {
4961         case CEE_CALLVIRT:
4962             // cannot do callvirt on valuetypes
4963             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4964             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4965             break;
4966
4967         case CEE_NEWOBJ:
4968         {
4969             assert(!tailCall); // Importer should not allow this
4970             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4971                            "newobj must be on instance");
4972
4973             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4974             {
4975                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4976                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4977                 typeInfo tiDeclaredFtn =
4978                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4979                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4980
4981                 assert(popCount == 0);
4982                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4983                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4984
4985                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4986                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4987                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4988                                "delegate object type mismatch");
4989
4990                 CORINFO_CLASS_HANDLE objTypeHandle =
4991                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4992
4993                 // the method signature must be compatible with the delegate's invoke method
4994
4995                 // check that for virtual functions, the type of the object used to get the
4996                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4997                 // since this is a bit of work to determine in general, we pattern match stylized
4998                 // code sequences
4999
5000                 // the delegate creation code check, which used to be done later, is now done here
5001                 // so we can read delegateMethodRef directly from
5002                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5003                 // we then use it in our call to isCompatibleDelegate().
5004
5005                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5006                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5007                                "must create delegates with certain IL");
5008
5009                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5010                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5011                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5012                 delegateResolvedToken.token        = delegateMethodRef;
5013                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5014                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5015
5016                 CORINFO_CALL_INFO delegateCallInfo;
5017                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5018                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5019
5020                 BOOL isOpenDelegate = FALSE;
5021                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5022                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5023                                                                       &isOpenDelegate),
5024                                "function incompatible with delegate");
5025
5026                 // check the constraints on the target method
5027                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5028                                "delegate target has unsatisfied class constraints");
5029                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5030                                                                             tiActualFtn.GetMethod()),
5031                                "delegate target has unsatisfied method constraints");
5032
5033                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5034                 // for additional verification rules for delegates
5035                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5036                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5037                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5038                 {
5039
5040                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5041 #ifdef DEBUG
5042                         && StrictCheckForNonVirtualCallToVirtualMethod()
5043 #endif
5044                             )
5045                     {
5046                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5047                         {
5048                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5049                                                verIsBoxedValueType(tiActualObj),
5050                                            "The 'this' parameter to the call must be either the calling method's "
5051                                            "'this' parameter or "
5052                                            "a boxed value type.");
5053                         }
5054                     }
5055                 }
5056
5057                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5058                 {
5059                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5060
5061                     Verify(targetIsStatic || !isOpenDelegate,
5062                            "Unverifiable creation of an open instance delegate for a protected member.");
5063
5064                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5065                                                                 ? info.compClassHnd
5066                                                                 : tiActualObj.GetClassHandleForObjRef();
5067
5068                     // In the case of protected methods, it is a requirement that the 'this'
5069                     // pointer be a subclass of the current context.  Perform this check.
5070                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5071                            "Accessing protected method through wrong type.");
5072                 }
5073                 goto DONE_ARGS;
5074             }
5075         }
5076         // fall thru to default checks
5077         default:
5078             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5079     }
5080     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5081                    "can only newobj a delegate constructor");
5082
5083     // check compatibility of the arguments
5084     unsigned int argCount;
5085     argCount = sig->numArgs;
5086     CORINFO_ARG_LIST_HANDLE args;
5087     args = sig->args;
5088     while (argCount--)
5089     {
5090         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5091
5092         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5093         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5094
5095         args = info.compCompHnd->getArgNext(args);
5096     }
5097
5098 DONE_ARGS:
5099
5100     // update popCount
5101     popCount += sig->numArgs;
5102
5103     // check for 'this' which are is non-static methods, not called via NEWOBJ
5104     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5105     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5106     {
5107         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5108         popCount++;
5109
5110         // If it is null, we assume we can access it (since it will AV shortly)
5111         // If it is anything but a reference class, there is no hierarchy, so
5112         // again, we don't need the precise instance class to compute 'protected' access
5113         if (tiThis.IsType(TI_REF))
5114         {
5115             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5116         }
5117
5118         // Check type compatibility of the this argument
5119         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5120         if (tiDeclaredThis.IsValueClass())
5121         {
5122             tiDeclaredThis.MakeByRef();
5123         }
5124
5125         // If this is a call to the base class .ctor, set thisPtr Init for
5126         // this block.
5127         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5128         {
5129             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5130                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5131             {
5132                 assert(verCurrentState.thisInitialized !=
5133                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5134                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5135                                "Call to base class constructor when 'this' is possibly initialized");
5136                 // Otherwise, 'this' is now initialized.
5137                 verCurrentState.thisInitialized = TIS_Init;
5138                 tiThis.SetInitialisedObjRef();
5139             }
5140             else
5141             {
5142                 // We allow direct calls to value type constructors
5143                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5144                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5145                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5146                                "Bad call to a constructor");
5147             }
5148         }
5149
5150         if (pConstrainedResolvedToken != nullptr)
5151         {
5152             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5153
5154             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5155
5156             // We just dereference this and test for equality
5157             tiThis.DereferenceByRef();
5158             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5159                            "this type mismatch with constrained type operand");
5160
5161             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5162             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5163         }
5164
5165         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5166         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5167         {
5168             tiDeclaredThis.SetIsReadonlyByRef();
5169         }
5170
5171         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5172
5173         if (tiThis.IsByRef())
5174         {
5175             // Find the actual type where the method exists (as opposed to what is declared
5176             // in the metadata). This is to prevent passing a byref as the "this" argument
5177             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5178
5179             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5180             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5181                            "Call to base type of valuetype (which is never a valuetype)");
5182         }
5183
5184         // Rules for non-virtual call to a non-final virtual method:
5185
5186         // Define:
5187         // The "this" pointer is considered to be "possibly written" if
5188         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5189         //   (or)
5190         //   2. It has been stored to (STARG.0) anywhere in the method.
5191
5192         // A non-virtual call to a non-final virtual method is only allowed if
5193         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5194         //   (or)
5195         //   2. The this pointer passed to the callee is the current method's this pointer.
5196         //      (and) The current method's this pointer is not "possibly written".
5197
5198         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5199         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5200         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5201         // hard and more error prone.
5202
5203         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5204 #ifdef DEBUG
5205             && StrictCheckForNonVirtualCallToVirtualMethod()
5206 #endif
5207                 )
5208         {
5209             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5210             {
5211                 VerifyOrReturn(
5212                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5213                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5214                     "a boxed value type.");
5215             }
5216         }
5217     }
5218
5219     // check any constraints on the callee's class and type parameters
5220     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5221                    "method has unsatisfied class constraints");
5222     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5223                    "method has unsatisfied method constraints");
5224
5225     if (mflags & CORINFO_FLG_PROTECTED)
5226     {
5227         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5228                        "Can't access protected method");
5229     }
5230
5231     // Get the exact view of the signature for an array method
5232     if (sig->retType != CORINFO_TYPE_VOID)
5233     {
5234         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5235     }
5236
5237     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5238     // The methods supported by array types are under the control of the EE
5239     // so we can trust that only the Address operation returns a byref.
5240     if (readonlyCall)
5241     {
5242         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5243         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5244                        "unexpected use of readonly prefix");
5245     }
5246
5247     // Verify the tailcall
5248     if (tailCall)
5249     {
5250         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5251     }
5252 }
5253
5254 /*****************************************************************************
5255  *  Checks that a delegate creation is done using the following pattern:
5256  *     dup
5257  *     ldvirtftn targetMemberRef
5258  *  OR
5259  *     ldftn targetMemberRef
5260  *
5261  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5262  *  not in this basic block)
5263  *
5264  *  targetMemberRef is read from the code sequence.
5265  *  targetMemberRef is validated iff verificationNeeded.
5266  */
5267
5268 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5269                                         const BYTE*  codeAddr,
5270                                         mdMemberRef& targetMemberRef)
5271 {
5272     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5273     {
5274         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5275         return TRUE;
5276     }
5277     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5278     {
5279         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5280         return TRUE;
5281     }
5282
5283     return FALSE;
5284 }
5285
5286 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5287 {
5288     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5289     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5290     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5291     if (!tiCompatibleWith(value, normPtrVal, true))
5292     {
5293         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5294         compUnsafeCastUsed = true;
5295     }
5296     return ptrVal;
5297 }
5298
5299 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5300 {
5301     assert(!instrType.IsStruct());
5302
5303     typeInfo ptrVal;
5304     if (ptr.IsByRef())
5305     {
5306         ptrVal = DereferenceByRef(ptr);
5307         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5308         {
5309             Verify(false, "bad pointer");
5310             compUnsafeCastUsed = true;
5311         }
5312         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5313         {
5314             Verify(false, "pointer not consistent with instr");
5315             compUnsafeCastUsed = true;
5316         }
5317     }
5318     else
5319     {
5320         Verify(false, "pointer not byref");
5321         compUnsafeCastUsed = true;
5322     }
5323
5324     return ptrVal;
5325 }
5326
5327 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5328 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5329 // ld*flda or a st*fld.
5330 // 'enclosingClass' is given if we are accessing a field in some specific type.
5331
5332 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5333                               const CORINFO_FIELD_INFO& fieldInfo,
5334                               const typeInfo*           tiThis,
5335                               BOOL                      mutator,
5336                               BOOL                      allowPlainStructAsThis)
5337 {
5338     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5339     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5340     CORINFO_CLASS_HANDLE instanceClass =
5341         info.compClassHnd; // for statics, we imagine the instance is the current class.
5342
5343     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5344     if (mutator)
5345     {
5346         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5347         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5348         {
5349             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5350                        info.compIsStatic == isStaticField,
5351                    "bad use of initonly field (set or address taken)");
5352         }
5353     }
5354
5355     if (tiThis == nullptr)
5356     {
5357         Verify(isStaticField, "used static opcode with non-static field");
5358     }
5359     else
5360     {
5361         typeInfo tThis = *tiThis;
5362
5363         if (allowPlainStructAsThis && tThis.IsValueClass())
5364         {
5365             tThis.MakeByRef();
5366         }
5367
5368         // If it is null, we assume we can access it (since it will AV shortly)
5369         // If it is anything but a refernce class, there is no hierarchy, so
5370         // again, we don't need the precise instance class to compute 'protected' access
5371         if (tiThis->IsType(TI_REF))
5372         {
5373             instanceClass = tiThis->GetClassHandleForObjRef();
5374         }
5375
5376         // Note that even if the field is static, we require that the this pointer
5377         // satisfy the same constraints as a non-static field  This happens to
5378         // be simpler and seems reasonable
5379         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5380         if (tiDeclaredThis.IsValueClass())
5381         {
5382             tiDeclaredThis.MakeByRef();
5383
5384             // we allow read-only tThis, on any field access (even stores!), because if the
5385             // class implementor wants to prohibit stores he should make the field private.
5386             // we do this by setting the read-only bit on the type we compare tThis to.
5387             tiDeclaredThis.SetIsReadonlyByRef();
5388         }
5389         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5390         {
5391             // Any field access is legal on "uninitialized" this pointers.
5392             // The easiest way to implement this is to simply set the
5393             // initialized bit for the duration of the type check on the
5394             // field access only.  It does not change the state of the "this"
5395             // for the function as a whole. Note that the "tThis" is a copy
5396             // of the original "this" type (*tiThis) passed in.
5397             tThis.SetInitialisedObjRef();
5398         }
5399
5400         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5401     }
5402
5403     // Presently the JIT does not check that we don't store or take the address of init-only fields
5404     // since we cannot guarantee their immutability and it is not a security issue.
5405
5406     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5407     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5408                    "field has unsatisfied class constraints");
5409     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5410     {
5411         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5412                "Accessing protected method through wrong type.");
5413     }
5414 }
5415
5416 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5417 {
5418     if (tiOp1.IsNumberType())
5419     {
5420 #ifdef _TARGET_64BIT_
5421         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5422 #else  // _TARGET_64BIT
5423         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5424         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5425         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5426         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5427 #endif // !_TARGET_64BIT_
5428     }
5429     else if (tiOp1.IsObjRef())
5430     {
5431         switch (opcode)
5432         {
5433             case CEE_BEQ_S:
5434             case CEE_BEQ:
5435             case CEE_BNE_UN_S:
5436             case CEE_BNE_UN:
5437             case CEE_CEQ:
5438             case CEE_CGT_UN:
5439                 break;
5440             default:
5441                 Verify(FALSE, "Cond not allowed on object types");
5442         }
5443         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5444     }
5445     else if (tiOp1.IsByRef())
5446     {
5447         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5448     }
5449     else
5450     {
5451         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5452     }
5453 }
5454
5455 void Compiler::verVerifyThisPtrInitialised()
5456 {
5457     if (verTrackObjCtorInitState)
5458     {
5459         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5460     }
5461 }
5462
5463 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5464 {
5465     // Either target == context, in this case calling an alternate .ctor
5466     // Or target is the immediate parent of context
5467
5468     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5469 }
5470
5471 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5472                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5473                                       CORINFO_CALL_INFO*      pCallInfo)
5474 {
5475     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5476     {
5477         NO_WAY("Virtual call to a function added via EnC is not supported");
5478     }
5479
5480     // CoreRT generic virtual method
5481     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5482     {
5483         GenTree* runtimeMethodHandle = nullptr;
5484         if (pCallInfo->exactContextNeedsRuntimeLookup)
5485         {
5486             runtimeMethodHandle =
5487                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5488         }
5489         else
5490         {
5491             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5492         }
5493         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5494                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5495     }
5496
5497 #ifdef FEATURE_READYTORUN_COMPILER
5498     if (opts.IsReadyToRun())
5499     {
5500         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5501         {
5502             GenTreeCall* call =
5503                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5504
5505             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5506
5507             return call;
5508         }
5509
5510         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5511         if (IsTargetAbi(CORINFO_CORERT_ABI))
5512         {
5513             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5514
5515             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5516                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5517         }
5518     }
5519 #endif
5520
5521     // Get the exact descriptor for the static callsite
5522     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5523     if (exactTypeDesc == nullptr)
5524     { // compDonotInline()
5525         return nullptr;
5526     }
5527
5528     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5529     if (exactMethodDesc == nullptr)
5530     { // compDonotInline()
5531         return nullptr;
5532     }
5533
5534     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5535
5536     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5537
5538     helpArgs = gtNewListNode(thisPtr, helpArgs);
5539
5540     // Call helper function.  This gets the target address of the final destination callsite.
5541
5542     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5543 }
5544
5545 //------------------------------------------------------------------------
5546 // impImportAndPushBox: build and import a value-type box
5547 //
5548 // Arguments:
5549 //   pResolvedToken - resolved token from the box operation
5550 //
5551 // Return Value:
5552 //   None.
5553 //
5554 // Side Effects:
5555 //   The value to be boxed is popped from the stack, and a tree for
5556 //   the boxed value is pushed. This method may create upstream
5557 //   statements, spill side effecting trees, and create new temps.
5558 //
5559 //   If importing an inlinee, we may also discover the inline must
5560 //   fail. If so there is no new value pushed on the stack. Callers
5561 //   should use CompDoNotInline after calling this method to see if
5562 //   ongoing importation should be aborted.
5563 //
5564 // Notes:
5565 //   Boxing of ref classes results in the same value as the value on
5566 //   the top of the stack, so is handled inline in impImportBlockCode
5567 //   for the CEE_BOX case. Only value or primitive type boxes make it
5568 //   here.
5569 //
5570 //   Boxing for nullable types is done via a helper call; boxing
5571 //   of other value types is expanded inline or handled via helper
5572 //   call, depending on the jit's codegen mode.
5573 //
5574 //   When the jit is operating in size and time constrained modes,
5575 //   using a helper call here can save jit time and code size. But it
5576 //   also may inhibit cleanup optimizations that could have also had a
5577 //   even greater benefit effect on code size and jit time. An optimal
5578 //   strategy may need to peek ahead and see if it is easy to tell how
5579 //   the box is being used. For now, we defer.
5580
5581 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5582 {
5583     // Spill any special side effects
5584     impSpillSpecialSideEff();
5585
5586     // Get get the expression to box from the stack.
5587     GenTree*             op1       = nullptr;
5588     GenTree*             op2       = nullptr;
5589     StackEntry           se        = impPopStack();
5590     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5591     GenTree*             exprToBox = se.val;
5592
5593     // Look at what helper we should use.
5594     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5595
5596     // Determine what expansion to prefer.
5597     //
5598     // In size/time/debuggable constrained modes, the helper call
5599     // expansion for box is generally smaller and is preferred, unless
5600     // the value to box is a struct that comes from a call. In that
5601     // case the call can construct its return value directly into the
5602     // box payload, saving possibly some up-front zeroing.
5603     //
5604     // Currently primitive type boxes always get inline expanded. We may
5605     // want to do the same for small structs if they don't come from
5606     // calls and don't have GC pointers, since explicitly copying such
5607     // structs is cheap.
5608     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5609     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5610     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5611     bool expandInline    = canExpandInline && !optForSize;
5612
5613     if (expandInline)
5614     {
5615         JITDUMP(" inline allocate/copy sequence\n");
5616
5617         // we are doing 'normal' boxing.  This means that we can inline the box operation
5618         // Box(expr) gets morphed into
5619         // temp = new(clsHnd)
5620         // cpobj(temp+4, expr, clsHnd)
5621         // push temp
5622         // The code paths differ slightly below for structs and primitives because
5623         // "cpobj" differs in these cases.  In one case you get
5624         //    impAssignStructPtr(temp+4, expr, clsHnd)
5625         // and the other you get
5626         //    *(temp+4) = expr
5627
5628         if (opts.MinOpts() || opts.compDbgCode)
5629         {
5630             // For minopts/debug code, try and minimize the total number
5631             // of box temps by reusing an existing temp when possible.
5632             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5633             {
5634                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5635             }
5636         }
5637         else
5638         {
5639             // When optimizing, use a new temp for each box operation
5640             // since we then know the exact class of the box temp.
5641             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5642             lvaTable[impBoxTemp].lvType = TYP_REF;
5643             const bool isExact          = true;
5644             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5645         }
5646
5647         // needs to stay in use until this box expression is appended
5648         // some other node.  We approximate this by keeping it alive until
5649         // the opcode stack becomes empty
5650         impBoxTempInUse = true;
5651
5652 #ifdef FEATURE_READYTORUN_COMPILER
5653         bool usingReadyToRunHelper = false;
5654
5655         if (opts.IsReadyToRun())
5656         {
5657             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5658             usingReadyToRunHelper = (op1 != nullptr);
5659         }
5660
5661         if (!usingReadyToRunHelper)
5662 #endif
5663         {
5664             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5665             // and the newfast call with a single call to a dynamic R2R cell that will:
5666             //      1) Load the context
5667             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5668             //      3) Allocate and return the new object for boxing
5669             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5670
5671             // Ensure that the value class is restored
5672             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5673             if (op2 == nullptr)
5674             {
5675                 // We must be backing out of an inline.
5676                 assert(compDonotInline());
5677                 return;
5678             }
5679
5680             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5681                                     pResolvedToken->hClass, TYP_REF, op2);
5682         }
5683
5684         /* Remember that this basic block contains 'new' of an object, and so does this method */
5685         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5686         optMethodFlags |= OMF_HAS_NEWOBJ;
5687
5688         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5689
5690         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5691
5692         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5693         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5694         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5695
5696         if (varTypeIsStruct(exprToBox))
5697         {
5698             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5699             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5700         }
5701         else
5702         {
5703             var_types lclTyp = exprToBox->TypeGet();
5704             if (lclTyp == TYP_BYREF)
5705             {
5706                 lclTyp = TYP_I_IMPL;
5707             }
5708             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5709             if (impIsPrimitive(jitType))
5710             {
5711                 lclTyp = JITtype2varType(jitType);
5712             }
5713             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5714                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5715             var_types srcTyp = exprToBox->TypeGet();
5716             var_types dstTyp = lclTyp;
5717
5718             if (srcTyp != dstTyp)
5719             {
5720                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5721                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5722                 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
5723             }
5724             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5725         }
5726
5727         // Spill eval stack to flush out any pending side effects.
5728         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5729
5730         // Set up this copy as a second assignment.
5731         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5732
5733         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5734
5735         // Record that this is a "box" node and keep track of the matching parts.
5736         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5737
5738         // If it is a value class, mark the "box" node.  We can use this information
5739         // to optimise several cases:
5740         //    "box(x) == null" --> false
5741         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5742         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5743
5744         op1->gtFlags |= GTF_BOX_VALUE;
5745         assert(op1->IsBoxedValue());
5746         assert(asg->gtOper == GT_ASG);
5747     }
5748     else
5749     {
5750         // Don't optimize, just call the helper and be done with it.
5751         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5752         assert(operCls != nullptr);
5753
5754         // Ensure that the value class is restored
5755         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5756         if (op2 == nullptr)
5757         {
5758             // We must be backing out of an inline.
5759             assert(compDonotInline());
5760             return;
5761         }
5762
5763         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5764         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5765     }
5766
5767     /* Push the result back on the stack, */
5768     /* even if clsHnd is a value class we want the TI_REF */
5769     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5770     impPushOnStack(op1, tiRetVal);
5771 }
5772
5773 //------------------------------------------------------------------------
5774 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5775 //
5776 // Arguments:
5777 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5778 //                     by a call to CEEInfo::resolveToken().
5779 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5780 //                by a call to CEEInfo::getCallInfo().
5781 //
5782 // Assumptions:
5783 //    The multi-dimensional array constructor arguments (array dimensions) are
5784 //    pushed on the IL stack on entry to this method.
5785 //
5786 // Notes:
5787 //    Multi-dimensional array constructors are imported as calls to a JIT
5788 //    helper, not as regular calls.
5789
5790 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5791 {
5792     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5793     if (classHandle == nullptr)
5794     { // compDonotInline()
5795         return;
5796     }
5797
5798     assert(pCallInfo->sig.numArgs);
5799
5800     GenTree*        node;
5801     GenTreeArgList* args;
5802
5803     //
5804     // There are two different JIT helpers that can be used to allocate
5805     // multi-dimensional arrays:
5806     //
5807     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5808     //      This variant is deprecated. It should be eventually removed.
5809     //
5810     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5811     //      pointer to block of int32s. This variant is more portable.
5812     //
5813     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5814     // unconditionally would require ReadyToRun version bump.
5815     //
5816     CLANG_FORMAT_COMMENT_ANCHOR;
5817
5818     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5819     {
5820
5821         // Reuse the temp used to pass the array dimensions to avoid bloating
5822         // the stack frame in case there are multiple calls to multi-dim array
5823         // constructors within a single method.
5824         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5825         {
5826             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5827             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5828             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5829         }
5830
5831         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5832         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5833         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5834             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5835
5836         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5837         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5838         // to one allocation at a time.
5839         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5840
5841         //
5842         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5843         //  - Array class handle
5844         //  - Number of dimension arguments
5845         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5846         //
5847
5848         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5849         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5850
5851         // Pop dimension arguments from the stack one at a time and store it
5852         // into lvaNewObjArrayArgs temp.
5853         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5854         {
5855             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5856
5857             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5858             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5859             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5860                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5861             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5862
5863             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5864         }
5865
5866         args = gtNewArgList(node);
5867
5868         // pass number of arguments to the helper
5869         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5870
5871         args = gtNewListNode(classHandle, args);
5872
5873         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5874     }
5875     else
5876     {
5877         //
5878         // The varargs helper needs the type and method handles as last
5879         // and  last-1 param (this is a cdecl call, so args will be
5880         // pushed in reverse order on the CPU stack)
5881         //
5882
5883         args = gtNewArgList(classHandle);
5884
5885         // pass number of arguments to the helper
5886         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5887
5888         unsigned argFlags = 0;
5889         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5890
5891         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5892
5893         // varargs, so we pop the arguments
5894         node->gtFlags |= GTF_CALL_POP_ARGS;
5895
5896 #ifdef DEBUG
5897         // At the present time we don't track Caller pop arguments
5898         // that have GC references in them
5899         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5900         {
5901             assert(temp->Current()->gtType != TYP_REF);
5902         }
5903 #endif
5904     }
5905
5906     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5907     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5908
5909     // Remember that this basic block contains 'new' of a md array
5910     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5911
5912     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5913 }
5914
5915 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
5916                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5917                                     CORINFO_THIS_TRANSFORM  transform)
5918 {
5919     switch (transform)
5920     {
5921         case CORINFO_DEREF_THIS:
5922         {
5923             GenTree* obj = thisPtr;
5924
5925             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5926             impBashVarAddrsToI(obj);
5927             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5928             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5929
5930             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5931             // ldind could point anywhere, example a boxed class static int
5932             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5933
5934             return obj;
5935         }
5936
5937         case CORINFO_BOX_THIS:
5938         {
5939             // Constraint calls where there might be no
5940             // unboxed entry point require us to implement the call via helper.
5941             // These only occur when a possible target of the call
5942             // may have inherited an implementation of an interface
5943             // method from System.Object or System.ValueType.  The EE does not provide us with
5944             // "unboxed" versions of these methods.
5945
5946             GenTree* obj = thisPtr;
5947
5948             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5949             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5950             obj->gtFlags |= GTF_EXCEPT;
5951
5952             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5953             var_types   objType = JITtype2varType(jitTyp);
5954             if (impIsPrimitive(jitTyp))
5955             {
5956                 if (obj->OperIsBlk())
5957                 {
5958                     obj->ChangeOperUnchecked(GT_IND);
5959
5960                     // Obj could point anywhere, example a boxed class static int
5961                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5962                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5963                 }
5964
5965                 obj->gtType = JITtype2varType(jitTyp);
5966                 assert(varTypeIsArithmetic(obj->gtType));
5967             }
5968
5969             // This pushes on the dereferenced byref
5970             // This is then used immediately to box.
5971             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5972
5973             // This pops off the byref-to-a-value-type remaining on the stack and
5974             // replaces it with a boxed object.
5975             // This is then used as the object to the virtual call immediately below.
5976             impImportAndPushBox(pConstrainedResolvedToken);
5977             if (compDonotInline())
5978             {
5979                 return nullptr;
5980             }
5981
5982             obj = impPopStack().val;
5983             return obj;
5984         }
5985         case CORINFO_NO_THIS_TRANSFORM:
5986         default:
5987             return thisPtr;
5988     }
5989 }
5990
5991 //------------------------------------------------------------------------
5992 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5993 //
5994 // Return Value:
5995 //    true if PInvoke inlining should be enabled in current method, false otherwise
5996 //
5997 // Notes:
5998 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5999
6000 bool Compiler::impCanPInvokeInline()
6001 {
6002     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6003            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6004         ;
6005 }
6006
6007 //------------------------------------------------------------------------
6008 // impCanPInvokeInlineCallSite: basic legality checks using information
6009 // from a call to see if the call qualifies as an inline pinvoke.
6010 //
6011 // Arguments:
6012 //    block      - block contaning the call, or for inlinees, block
6013 //                 containing the call being inlined
6014 //
6015 // Return Value:
6016 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6017 //
6018 // Notes:
6019 //    For runtimes that support exception handling interop there are
6020 //    restrictions on using inline pinvoke in handler regions.
6021 //
6022 //    * We have to disable pinvoke inlining inside of filters because
6023 //    in case the main execution (i.e. in the try block) is inside
6024 //    unmanaged code, we cannot reuse the inlined stub (we still need
6025 //    the original state until we are in the catch handler)
6026 //
6027 //    * We disable pinvoke inlining inside handlers since the GSCookie
6028 //    is in the inlined Frame (see
6029 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6030 //    this would not protect framelets/return-address of handlers.
6031 //
6032 //    These restrictions are currently also in place for CoreCLR but
6033 //    can be relaxed when coreclr/#8459 is addressed.
6034
6035 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6036 {
6037     if (block->hasHndIndex())
6038     {
6039         return false;
6040     }
6041
6042     // The remaining limitations do not apply to CoreRT
6043     if (IsTargetAbi(CORINFO_CORERT_ABI))
6044     {
6045         return true;
6046     }
6047
6048 #ifdef _TARGET_AMD64_
6049     // On x64, we disable pinvoke inlining inside of try regions.
6050     // Here is the comment from JIT64 explaining why:
6051     //
6052     //   [VSWhidbey: 611015] - because the jitted code links in the
6053     //   Frame (instead of the stub) we rely on the Frame not being
6054     //   'active' until inside the stub.  This normally happens by the
6055     //   stub setting the return address pointer in the Frame object
6056     //   inside the stub.  On a normal return, the return address
6057     //   pointer is zeroed out so the Frame can be safely re-used, but
6058     //   if an exception occurs, nobody zeros out the return address
6059     //   pointer.  Thus if we re-used the Frame object, it would go
6060     //   'active' as soon as we link it into the Frame chain.
6061     //
6062     //   Technically we only need to disable PInvoke inlining if we're
6063     //   in a handler or if we're in a try body with a catch or
6064     //   filter/except where other non-handler code in this method
6065     //   might run and try to re-use the dirty Frame object.
6066     //
6067     //   A desktop test case where this seems to matter is
6068     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6069     if (block->hasTryIndex())
6070     {
6071         return false;
6072     }
6073 #endif // _TARGET_AMD64_
6074
6075     return true;
6076 }
6077
6078 //------------------------------------------------------------------------
6079 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6080 // if it can be expressed as an inline pinvoke.
6081 //
6082 // Arguments:
6083 //    call       - tree for the call
6084 //    methHnd    - handle for the method being called (may be null)
6085 //    sig        - signature of the method being called
6086 //    mflags     - method flags for the method being called
6087 //    block      - block contaning the call, or for inlinees, block
6088 //                 containing the call being inlined
6089 //
6090 // Notes:
6091 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6092 //
6093 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6094 //   call passes a combination of legality and profitabilty checks.
6095 //
6096 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6097
6098 void Compiler::impCheckForPInvokeCall(
6099     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6100 {
6101     CorInfoUnmanagedCallConv unmanagedCallConv;
6102
6103     // If VM flagged it as Pinvoke, flag the call node accordingly
6104     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6105     {
6106         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6107     }
6108
6109     if (methHnd)
6110     {
6111         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6112         {
6113             return;
6114         }
6115
6116         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6117     }
6118     else
6119     {
6120         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6121         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6122         {
6123             // Used by the IL Stubs.
6124             callConv = CORINFO_CALLCONV_C;
6125         }
6126         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6127         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6128         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6129         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6130
6131         assert(!call->gtCallCookie);
6132     }
6133
6134     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6135         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6136     {
6137         return;
6138     }
6139     optNativeCallCount++;
6140
6141     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6142     {
6143         // PInvoke CALLI in IL stubs must be inlined
6144     }
6145     else
6146     {
6147         // Check legality
6148         if (!impCanPInvokeInlineCallSite(block))
6149         {
6150             return;
6151         }
6152
6153         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6154         // profitability checks
6155         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6156         {
6157             if (!impCanPInvokeInline())
6158             {
6159                 return;
6160             }
6161
6162             // Size-speed tradeoff: don't use inline pinvoke at rarely
6163             // executed call sites.  The non-inline version is more
6164             // compact.
6165             if (block->isRunRarely())
6166             {
6167                 return;
6168             }
6169         }
6170
6171         // The expensive check should be last
6172         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6173         {
6174             return;
6175         }
6176     }
6177
6178     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6179
6180     call->gtFlags |= GTF_CALL_UNMANAGED;
6181     info.compCallUnmanaged++;
6182
6183     // AMD64 convention is same for native and managed
6184     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6185     {
6186         call->gtFlags |= GTF_CALL_POP_ARGS;
6187     }
6188
6189     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6190     {
6191         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6192     }
6193 }
6194
6195 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6196 {
6197     var_types callRetTyp = JITtype2varType(sig->retType);
6198
6199     /* The function pointer is on top of the stack - It may be a
6200      * complex expression. As it is evaluated after the args,
6201      * it may cause registered args to be spilled. Simply spill it.
6202      */
6203
6204     // Ignore this trivial case.
6205     if (impStackTop().val->gtOper != GT_LCL_VAR)
6206     {
6207         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6208                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6209     }
6210
6211     /* Get the function pointer */
6212
6213     GenTree* fptr = impPopStack().val;
6214
6215     // The function pointer is typically a sized to match the target pointer size
6216     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6217     // See ILCodeStream::LowerOpcode
6218     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6219
6220 #ifdef DEBUG
6221     // This temporary must never be converted to a double in stress mode,
6222     // because that can introduce a call to the cast helper after the
6223     // arguments have already been evaluated.
6224
6225     if (fptr->OperGet() == GT_LCL_VAR)
6226     {
6227         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6228     }
6229 #endif
6230
6231     /* Create the call node */
6232
6233     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6234
6235     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6236
6237     return call;
6238 }
6239
6240 /*****************************************************************************/
6241
6242 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6243 {
6244     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6245
6246     /* Since we push the arguments in reverse order (i.e. right -> left)
6247      * spill any side effects from the stack
6248      *
6249      * OBS: If there is only one side effect we do not need to spill it
6250      *      thus we have to spill all side-effects except last one
6251      */
6252
6253     unsigned lastLevelWithSideEffects = UINT_MAX;
6254
6255     unsigned argsToReverse = sig->numArgs;
6256
6257     // For "thiscall", the first argument goes in a register. Since its
6258     // order does not need to be changed, we do not need to spill it
6259
6260     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6261     {
6262         assert(argsToReverse);
6263         argsToReverse--;
6264     }
6265
6266 #ifndef _TARGET_X86_
6267     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6268     argsToReverse = 0;
6269 #endif
6270
6271     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6272     {
6273         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6274         {
6275             assert(lastLevelWithSideEffects == UINT_MAX);
6276
6277             impSpillStackEntry(level,
6278                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6279         }
6280         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6281         {
6282             if (lastLevelWithSideEffects != UINT_MAX)
6283             {
6284                 /* We had a previous side effect - must spill it */
6285                 impSpillStackEntry(lastLevelWithSideEffects,
6286                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6287
6288                 /* Record the level for the current side effect in case we will spill it */
6289                 lastLevelWithSideEffects = level;
6290             }
6291             else
6292             {
6293                 /* This is the first side effect encountered - record its level */
6294
6295                 lastLevelWithSideEffects = level;
6296             }
6297         }
6298     }
6299
6300     /* The argument list is now "clean" - no out-of-order side effects
6301      * Pop the argument list in reverse order */
6302
6303     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6304
6305     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6306     {
6307         GenTree* thisPtr = args->Current();
6308         impBashVarAddrsToI(thisPtr);
6309         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6310     }
6311
6312     if (args)
6313     {
6314         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6315     }
6316 }
6317
6318 //------------------------------------------------------------------------
6319 // impInitClass: Build a node to initialize the class before accessing the
6320 //               field if necessary
6321 //
6322 // Arguments:
6323 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6324 //                     by a call to CEEInfo::resolveToken().
6325 //
6326 // Return Value: If needed, a pointer to the node that will perform the class
6327 //               initializtion.  Otherwise, nullptr.
6328 //
6329
6330 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6331 {
6332     CorInfoInitClassResult initClassResult =
6333         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6334
6335     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6336     {
6337         return nullptr;
6338     }
6339     BOOL runtimeLookup;
6340
6341     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6342
6343     if (node == nullptr)
6344     {
6345         assert(compDonotInline());
6346         return nullptr;
6347     }
6348
6349     if (runtimeLookup)
6350     {
6351         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6352     }
6353     else
6354     {
6355         // Call the shared non gc static helper, as its the fastest
6356         node = fgGetSharedCCtor(pResolvedToken->hClass);
6357     }
6358
6359     return node;
6360 }
6361
6362 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6363 {
6364     GenTree* op1 = nullptr;
6365
6366     switch (lclTyp)
6367     {
6368         int     ival;
6369         __int64 lval;
6370         double  dval;
6371
6372         case TYP_BOOL:
6373             ival = *((bool*)fldAddr);
6374             goto IVAL_COMMON;
6375
6376         case TYP_BYTE:
6377             ival = *((signed char*)fldAddr);
6378             goto IVAL_COMMON;
6379
6380         case TYP_UBYTE:
6381             ival = *((unsigned char*)fldAddr);
6382             goto IVAL_COMMON;
6383
6384         case TYP_SHORT:
6385             ival = *((short*)fldAddr);
6386             goto IVAL_COMMON;
6387
6388         case TYP_USHORT:
6389             ival = *((unsigned short*)fldAddr);
6390             goto IVAL_COMMON;
6391
6392         case TYP_UINT:
6393         case TYP_INT:
6394             ival = *((int*)fldAddr);
6395         IVAL_COMMON:
6396             op1 = gtNewIconNode(ival);
6397             break;
6398
6399         case TYP_LONG:
6400         case TYP_ULONG:
6401             lval = *((__int64*)fldAddr);
6402             op1  = gtNewLconNode(lval);
6403             break;
6404
6405         case TYP_FLOAT:
6406             dval = *((float*)fldAddr);
6407             op1  = gtNewDconNode(dval);
6408 #if !FEATURE_X87_DOUBLES
6409             // X87 stack doesn't differentiate between float/double
6410             // so R4 is treated as R8, but everybody else does
6411             op1->gtType = TYP_FLOAT;
6412 #endif // FEATURE_X87_DOUBLES
6413             break;
6414
6415         case TYP_DOUBLE:
6416             dval = *((double*)fldAddr);
6417             op1  = gtNewDconNode(dval);
6418             break;
6419
6420         default:
6421             assert(!"Unexpected lclTyp");
6422             break;
6423     }
6424
6425     return op1;
6426 }
6427
6428 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6429                                               CORINFO_ACCESS_FLAGS    access,
6430                                               CORINFO_FIELD_INFO*     pFieldInfo,
6431                                               var_types               lclTyp)
6432 {
6433     GenTree* op1;
6434
6435     switch (pFieldInfo->fieldAccessor)
6436     {
6437         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6438         {
6439             assert(!compIsForInlining());
6440
6441             // We first call a special helper to get the statics base pointer
6442             op1 = impParentClassTokenToHandle(pResolvedToken);
6443
6444             // compIsForInlining() is false so we should not neve get NULL here
6445             assert(op1 != nullptr);
6446
6447             var_types type = TYP_BYREF;
6448
6449             switch (pFieldInfo->helper)
6450             {
6451                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6452                     type = TYP_I_IMPL;
6453                     break;
6454                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6455                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6456                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6457                     break;
6458                 default:
6459                     assert(!"unknown generic statics helper");
6460                     break;
6461             }
6462
6463             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6464
6465             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6466             op1              = gtNewOperNode(GT_ADD, type, op1,
6467                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6468         }
6469         break;
6470
6471         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6472         {
6473 #ifdef FEATURE_READYTORUN_COMPILER
6474             if (opts.IsReadyToRun())
6475             {
6476                 unsigned callFlags = 0;
6477
6478                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6479                 {
6480                     callFlags |= GTF_CALL_HOISTABLE;
6481                 }
6482
6483                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6484                 op1->gtFlags |= callFlags;
6485
6486                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6487             }
6488             else
6489 #endif
6490             {
6491                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6492             }
6493
6494             {
6495                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6496                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6497                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6498             }
6499             break;
6500         }
6501
6502         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6503         {
6504 #ifdef FEATURE_READYTORUN_COMPILER
6505             noway_assert(opts.IsReadyToRun());
6506             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6507             assert(kind.needsRuntimeLookup);
6508
6509             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6510             GenTreeArgList* args    = gtNewArgList(ctxTree);
6511
6512             unsigned callFlags = 0;
6513
6514             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6515             {
6516                 callFlags |= GTF_CALL_HOISTABLE;
6517             }
6518             var_types type = TYP_BYREF;
6519             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6520             op1->gtFlags |= callFlags;
6521
6522             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6523             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6524             op1              = gtNewOperNode(GT_ADD, type, op1,
6525                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6526 #else
6527             unreached();
6528 #endif // FEATURE_READYTORUN_COMPILER
6529         }
6530         break;
6531
6532         default:
6533         {
6534             if (!(access & CORINFO_ACCESS_ADDRESS))
6535             {
6536                 // In future, it may be better to just create the right tree here instead of folding it later.
6537                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6538
6539                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6540                 {
6541                     op1->gtFlags |= GTF_FLD_INITCLASS;
6542                 }
6543
6544                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6545                 {
6546                     op1->gtType = TYP_REF; // points at boxed object
6547                     FieldSeqNode* firstElemFldSeq =
6548                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6549                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6550                                         new (this, GT_CNS_INT)
6551                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6552
6553                     if (varTypeIsStruct(lclTyp))
6554                     {
6555                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6556                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6557                     }
6558                     else
6559                     {
6560                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6561                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6562                     }
6563                 }
6564
6565                 return op1;
6566             }
6567             else
6568             {
6569                 void** pFldAddr = nullptr;
6570                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6571
6572                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6573
6574                 /* Create the data member node */
6575                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6576                                           fldSeq);
6577
6578                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6579                 {
6580                     op1->gtFlags |= GTF_ICON_INITCLASS;
6581                 }
6582
6583                 if (pFldAddr != nullptr)
6584                 {
6585                     // There are two cases here, either the static is RVA based,
6586                     // in which case the type of the FIELD node is not a GC type
6587                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6588                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6589                     // because handles to statics now go into the large object heap
6590
6591                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6592                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6593                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6594                 }
6595             }
6596             break;
6597         }
6598     }
6599
6600     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6601     {
6602         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6603
6604         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6605
6606         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6607                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6608     }
6609
6610     if (!(access & CORINFO_ACCESS_ADDRESS))
6611     {
6612         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6613         op1->gtFlags |= GTF_GLOB_REF;
6614     }
6615
6616     return op1;
6617 }
6618
6619 // In general try to call this before most of the verification work.  Most people expect the access
6620 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6621 // out if you can't access something we also think that you're unverifiable for other reasons.
6622 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6623 {
6624     if (result != CORINFO_ACCESS_ALLOWED)
6625     {
6626         impHandleAccessAllowedInternal(result, helperCall);
6627     }
6628 }
6629
6630 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6631 {
6632     switch (result)
6633     {
6634         case CORINFO_ACCESS_ALLOWED:
6635             break;
6636         case CORINFO_ACCESS_ILLEGAL:
6637             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6638             // method is verifiable.  Otherwise, delay the exception to runtime.
6639             if (compIsForImportOnly())
6640             {
6641                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6642             }
6643             else
6644             {
6645                 impInsertHelperCall(helperCall);
6646             }
6647             break;
6648         case CORINFO_ACCESS_RUNTIME_CHECK:
6649             impInsertHelperCall(helperCall);
6650             break;
6651     }
6652 }
6653
6654 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6655 {
6656     // Construct the argument list
6657     GenTreeArgList* args = nullptr;
6658     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6659     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6660     {
6661         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6662         GenTree*                  currentArg = nullptr;
6663         switch (helperArg.argType)
6664         {
6665             case CORINFO_HELPER_ARG_TYPE_Field:
6666                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6667                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6668                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6669                 break;
6670             case CORINFO_HELPER_ARG_TYPE_Method:
6671                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6672                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6673                 break;
6674             case CORINFO_HELPER_ARG_TYPE_Class:
6675                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6676                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6677                 break;
6678             case CORINFO_HELPER_ARG_TYPE_Module:
6679                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6680                 break;
6681             case CORINFO_HELPER_ARG_TYPE_Const:
6682                 currentArg = gtNewIconNode(helperArg.constant);
6683                 break;
6684             default:
6685                 NO_WAY("Illegal helper arg type");
6686         }
6687         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6688     }
6689
6690     /* TODO-Review:
6691      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6692      * Also, consider sticking this in the first basic block.
6693      */
6694     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6695     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6696 }
6697
6698 // Checks whether the return types of caller and callee are compatible
6699 // so that callee can be tail called. Note that here we don't check
6700 // compatibility in IL Verifier sense, but on the lines of return type
6701 // sizes are equal and get returned in the same return register.
6702 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6703                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6704                                             var_types            calleeRetType,
6705                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6706 {
6707     // Note that we can not relax this condition with genActualType() as the
6708     // calling convention dictates that the caller of a function with a small
6709     // typed return value is responsible for normalizing the return val.
6710     if (callerRetType == calleeRetType)
6711     {
6712         return true;
6713     }
6714
6715     // If the class handles are the same and not null, the return types are compatible.
6716     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6717     {
6718         return true;
6719     }
6720
6721 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6722     // Jit64 compat:
6723     if (callerRetType == TYP_VOID)
6724     {
6725         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6726         //     tail.call
6727         //     pop
6728         //     ret
6729         //
6730         // Note that the above IL pattern is not valid as per IL verification rules.
6731         // Therefore, only full trust code can take advantage of this pattern.
6732         return true;
6733     }
6734
6735     // These checks return true if the return value type sizes are the same and
6736     // get returned in the same return register i.e. caller doesn't need to normalize
6737     // return value. Some of the tail calls permitted by below checks would have
6738     // been rejected by IL Verifier before we reached here.  Therefore, only full
6739     // trust code can make those tail calls.
6740     unsigned callerRetTypeSize = 0;
6741     unsigned calleeRetTypeSize = 0;
6742     bool     isCallerRetTypMBEnreg =
6743         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6744     bool isCalleeRetTypMBEnreg =
6745         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6746
6747     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6748     {
6749         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6750     }
6751 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6752
6753     return false;
6754 }
6755
6756 // For prefixFlags
6757 enum
6758 {
6759     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6760     PREFIX_TAILCALL_IMPLICIT =
6761         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6762     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6763     PREFIX_VOLATILE    = 0x00000100,
6764     PREFIX_UNALIGNED   = 0x00001000,
6765     PREFIX_CONSTRAINED = 0x00010000,
6766     PREFIX_READONLY    = 0x00100000
6767 };
6768
6769 /********************************************************************************
6770  *
6771  * Returns true if the current opcode and and the opcodes following it correspond
6772  * to a supported tail call IL pattern.
6773  *
6774  */
6775 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6776                                       OPCODE      curOpcode,
6777                                       const BYTE* codeAddrOfNextOpcode,
6778                                       const BYTE* codeEnd,
6779                                       bool        isRecursive,
6780                                       bool*       isCallPopAndRet /* = nullptr */)
6781 {
6782     // Bail out if the current opcode is not a call.
6783     if (!impOpcodeIsCallOpcode(curOpcode))
6784     {
6785         return false;
6786     }
6787
6788 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6789     // If shared ret tail opt is not enabled, we will enable
6790     // it for recursive methods.
6791     if (isRecursive)
6792 #endif
6793     {
6794         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6795         // sequence. Make sure we don't go past the end of the IL however.
6796         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6797     }
6798
6799     // Bail out if there is no next opcode after call
6800     if (codeAddrOfNextOpcode >= codeEnd)
6801     {
6802         return false;
6803     }
6804
6805     // Scan the opcodes to look for the following IL patterns if either
6806     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6807     //  ii) if tail prefixed, IL verification is not needed for the method.
6808     //
6809     // Only in the above two cases we can allow the below tail call patterns
6810     // violating ECMA spec.
6811     //
6812     // Pattern1:
6813     //       call
6814     //       nop*
6815     //       ret
6816     //
6817     // Pattern2:
6818     //       call
6819     //       nop*
6820     //       pop
6821     //       nop*
6822     //       ret
6823     int    cntPop = 0;
6824     OPCODE nextOpcode;
6825
6826 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6827     do
6828     {
6829         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6830         codeAddrOfNextOpcode += sizeof(__int8);
6831     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6832              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6833              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6834                                                                                          // one pop seen so far.
6835 #else
6836     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6837 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6838
6839     if (isCallPopAndRet)
6840     {
6841         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6842         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6843     }
6844
6845 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6846     // Jit64 Compat:
6847     // Tail call IL pattern could be either of the following
6848     // 1) call/callvirt/calli + ret
6849     // 2) call/callvirt/calli + pop + ret in a method returning void.
6850     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6851 #else
6852     return (nextOpcode == CEE_RET) && (cntPop == 0);
6853 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6854 }
6855
6856 /*****************************************************************************
6857  *
6858  * Determine whether the call could be converted to an implicit tail call
6859  *
6860  */
6861 bool Compiler::impIsImplicitTailCallCandidate(
6862     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6863 {
6864
6865 #if FEATURE_TAILCALL_OPT
6866     if (!opts.compTailCallOpt)
6867     {
6868         return false;
6869     }
6870
6871     if (opts.compDbgCode || opts.MinOpts())
6872     {
6873         return false;
6874     }
6875
6876     // must not be tail prefixed
6877     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6878     {
6879         return false;
6880     }
6881
6882 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6883     // the block containing call is marked as BBJ_RETURN
6884     // We allow shared ret tail call optimization on recursive calls even under
6885     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6886     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6887         return false;
6888 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6889
6890     // must be call+ret or call+pop+ret
6891     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6892     {
6893         return false;
6894     }
6895
6896     return true;
6897 #else
6898     return false;
6899 #endif // FEATURE_TAILCALL_OPT
6900 }
6901
6902 //------------------------------------------------------------------------
6903 // impImportCall: import a call-inspiring opcode
6904 //
6905 // Arguments:
6906 //    opcode                    - opcode that inspires the call
6907 //    pResolvedToken            - resolved token for the call target
6908 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6909 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6910 //    prefixFlags               - IL prefix flags for the call
6911 //    callInfo                  - EE supplied info for the call
6912 //    rawILOffset               - IL offset of the opcode
6913 //
6914 // Returns:
6915 //    Type of the call's return value.
6916 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6917 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6918 //
6919 //
6920 // Notes:
6921 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6922 //
6923 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6924 //    uninitalized object.
6925
6926 #ifdef _PREFAST_
6927 #pragma warning(push)
6928 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6929 #endif
6930
6931 var_types Compiler::impImportCall(OPCODE                  opcode,
6932                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6933                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6934                                   GenTree*                newobjThis,
6935                                   int                     prefixFlags,
6936                                   CORINFO_CALL_INFO*      callInfo,
6937                                   IL_OFFSET               rawILOffset)
6938 {
6939     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6940
6941     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6942     var_types              callRetTyp                     = TYP_COUNT;
6943     CORINFO_SIG_INFO*      sig                            = nullptr;
6944     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6945     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6946     unsigned               clsFlags                       = 0;
6947     unsigned               mflags                         = 0;
6948     unsigned               argFlags                       = 0;
6949     GenTree*               call                           = nullptr;
6950     GenTreeArgList*        args                           = nullptr;
6951     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6952     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6953     bool                   exactContextNeedsRuntimeLookup = false;
6954     bool                   canTailCall                    = true;
6955     const char*            szCanTailCallFailReason        = nullptr;
6956     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6957     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6958
6959     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6960
6961     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6962     // do that before tailcalls, but that is probably not the intended
6963     // semantic. So just disallow tailcalls from synchronized methods.
6964     // Also, popping arguments in a varargs function is more work and NYI
6965     // If we have a security object, we have to keep our frame around for callers
6966     // to see any imperative security.
6967     if (info.compFlags & CORINFO_FLG_SYNCH)
6968     {
6969         canTailCall             = false;
6970         szCanTailCallFailReason = "Caller is synchronized";
6971     }
6972 #if !FEATURE_FIXED_OUT_ARGS
6973     else if (info.compIsVarArgs)
6974     {
6975         canTailCall             = false;
6976         szCanTailCallFailReason = "Caller is varargs";
6977     }
6978 #endif // FEATURE_FIXED_OUT_ARGS
6979     else if (opts.compNeedSecurityCheck)
6980     {
6981         canTailCall             = false;
6982         szCanTailCallFailReason = "Caller requires a security check.";
6983     }
6984
6985     // We only need to cast the return value of pinvoke inlined calls that return small types
6986
6987     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6988     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6989     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6990     // the time being that the callee might be compiled by the other JIT and thus the return
6991     // value will need to be widened by us (or not widened at all...)
6992
6993     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6994
6995     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6996     bool bIntrinsicImported = false;
6997
6998     CORINFO_SIG_INFO calliSig;
6999     GenTreeArgList*  extraArg = nullptr;
7000
7001     /*-------------------------------------------------------------------------
7002      * First create the call node
7003      */
7004
7005     if (opcode == CEE_CALLI)
7006     {
7007         /* Get the call site sig */
7008         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7009
7010         callRetTyp = JITtype2varType(calliSig.retType);
7011
7012         call = impImportIndirectCall(&calliSig, ilOffset);
7013
7014         // We don't know the target method, so we have to infer the flags, or
7015         // assume the worst-case.
7016         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7017
7018 #ifdef DEBUG
7019         if (verbose)
7020         {
7021             unsigned structSize =
7022                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7023             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7024                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7025         }
7026 #endif
7027         // This should be checked in impImportBlockCode.
7028         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7029
7030         sig = &calliSig;
7031
7032 #ifdef DEBUG
7033         // We cannot lazily obtain the signature of a CALLI call because it has no method
7034         // handle that we can use, so we need to save its full call signature here.
7035         assert(call->gtCall.callSig == nullptr);
7036         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7037         *call->gtCall.callSig = calliSig;
7038 #endif // DEBUG
7039
7040         if (IsTargetAbi(CORINFO_CORERT_ABI))
7041         {
7042             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7043                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7044                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7045                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7046             if (managedCall)
7047             {
7048                 addFatPointerCandidate(call->AsCall());
7049             }
7050         }
7051     }
7052     else // (opcode != CEE_CALLI)
7053     {
7054         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7055
7056         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7057         // supply the instantiation parameters necessary to make direct calls to underlying
7058         // shared generic code, rather than calling through instantiating stubs.  If the
7059         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7060         // must indeed pass an instantiation parameter.
7061
7062         methHnd = callInfo->hMethod;
7063
7064         sig        = &(callInfo->sig);
7065         callRetTyp = JITtype2varType(sig->retType);
7066
7067         mflags = callInfo->methodFlags;
7068
7069 #ifdef DEBUG
7070         if (verbose)
7071         {
7072             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7073             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7074                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7075         }
7076 #endif
7077         if (compIsForInlining())
7078         {
7079             /* Does this call site have security boundary restrictions? */
7080
7081             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7082             {
7083                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7084                 return TYP_UNDEF;
7085             }
7086
7087             /* Does the inlinee need a security check token on the frame */
7088
7089             if (mflags & CORINFO_FLG_SECURITYCHECK)
7090             {
7091                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7092                 return TYP_UNDEF;
7093             }
7094
7095             /* Does the inlinee use StackCrawlMark */
7096
7097             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7098             {
7099                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7100                 return TYP_UNDEF;
7101             }
7102
7103             /* For now ignore delegate invoke */
7104
7105             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7106             {
7107                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7108                 return TYP_UNDEF;
7109             }
7110
7111             /* For now ignore varargs */
7112             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7113             {
7114                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7115                 return TYP_UNDEF;
7116             }
7117
7118             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7119             {
7120                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7121                 return TYP_UNDEF;
7122             }
7123
7124             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7125             {
7126                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7127                 return TYP_UNDEF;
7128             }
7129         }
7130
7131         clsHnd = pResolvedToken->hClass;
7132
7133         clsFlags = callInfo->classFlags;
7134
7135 #ifdef DEBUG
7136         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7137
7138         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7139         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7140         const char* modName;
7141         const char* className;
7142         const char* methodName;
7143         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7144             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7145             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7146         {
7147             return impImportJitTestLabelMark(sig->numArgs);
7148         }
7149 #endif // DEBUG
7150
7151         // <NICE> Factor this into getCallInfo </NICE>
7152         bool isSpecialIntrinsic = false;
7153         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7154         {
7155             const bool isTail = canTailCall && (tailCall != 0);
7156
7157             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7158                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7159
7160             if (compDonotInline())
7161             {
7162                 return TYP_UNDEF;
7163             }
7164
7165             if (call != nullptr)
7166             {
7167                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7168                        (clsFlags & CORINFO_FLG_FINAL));
7169
7170 #ifdef FEATURE_READYTORUN_COMPILER
7171                 if (call->OperGet() == GT_INTRINSIC)
7172                 {
7173                     if (opts.IsReadyToRun())
7174                     {
7175                         noway_assert(callInfo->kind == CORINFO_CALL);
7176                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7177                     }
7178                     else
7179                     {
7180                         call->gtIntrinsic.gtEntryPoint.addr       = nullptr;
7181                         call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7182                     }
7183                 }
7184 #endif
7185
7186                 bIntrinsicImported = true;
7187                 goto DONE_CALL;
7188             }
7189         }
7190
7191 #ifdef FEATURE_SIMD
7192         if (featureSIMD)
7193         {
7194             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7195             if (call != nullptr)
7196             {
7197                 bIntrinsicImported = true;
7198                 goto DONE_CALL;
7199             }
7200         }
7201 #endif // FEATURE_SIMD
7202
7203         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7204         {
7205             NO_WAY("Virtual call to a function added via EnC is not supported");
7206         }
7207
7208         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7209             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7210             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7211         {
7212             BADCODE("Bad calling convention");
7213         }
7214
7215         //-------------------------------------------------------------------------
7216         //  Construct the call node
7217         //
7218         // Work out what sort of call we're making.
7219         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7220
7221         constraintCallThisTransform    = callInfo->thisTransform;
7222         exactContextHnd                = callInfo->contextHandle;
7223         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7224
7225         // Recursive call is treated as a loop to the begining of the method.
7226         if (gtIsRecursiveCall(methHnd))
7227         {
7228 #ifdef DEBUG
7229             if (verbose)
7230             {
7231                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7232                         fgFirstBB->bbNum, compCurBB->bbNum);
7233             }
7234 #endif
7235             fgMarkBackwardJump(fgFirstBB, compCurBB);
7236         }
7237
7238         switch (callInfo->kind)
7239         {
7240
7241             case CORINFO_VIRTUALCALL_STUB:
7242             {
7243                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7244                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7245                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7246                 {
7247
7248                     if (compIsForInlining())
7249                     {
7250                         // Don't import runtime lookups when inlining
7251                         // Inlining has to be aborted in such a case
7252                         /* XXX Fri 3/20/2009
7253                          * By the way, this would never succeed.  If the handle lookup is into the generic
7254                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7255                          * inlined code will crash.
7256                          *
7257                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7258                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7259                          * failing here.
7260                          */
7261                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7262                         return TYP_UNDEF;
7263                     }
7264
7265                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7266                     assert(!compDonotInline());
7267
7268                     // This is the rough code to set up an indirect stub call
7269                     assert(stubAddr != nullptr);
7270
7271                     // The stubAddr may be a
7272                     // complex expression. As it is evaluated after the args,
7273                     // it may cause registered args to be spilled. Simply spill it.
7274
7275                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7276                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7277                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7278
7279                     // Create the actual call node
7280
7281                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7282                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7283
7284                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7285
7286                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7287                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7288
7289 #ifdef _TARGET_X86_
7290                     // No tailcalls allowed for these yet...
7291                     canTailCall             = false;
7292                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7293 #endif
7294                 }
7295                 else
7296                 {
7297                     // ok, the stub is available at compile type.
7298
7299                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7300                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7301                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7302                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7303                            callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7304                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7305                     {
7306                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7307                     }
7308                 }
7309
7310 #ifdef FEATURE_READYTORUN_COMPILER
7311                 if (opts.IsReadyToRun())
7312                 {
7313                     // Null check is sometimes needed for ready to run to handle
7314                     // non-virtual <-> virtual changes between versions
7315                     if (callInfo->nullInstanceCheck)
7316                     {
7317                         call->gtFlags |= GTF_CALL_NULLCHECK;
7318                     }
7319                 }
7320 #endif
7321
7322                 break;
7323             }
7324
7325             case CORINFO_VIRTUALCALL_VTABLE:
7326             {
7327                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7328                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7329                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7330                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7331                 break;
7332             }
7333
7334             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7335             {
7336                 if (compIsForInlining())
7337                 {
7338                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7339                     return TYP_UNDEF;
7340                 }
7341
7342                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7343                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7344                 // OK, We've been told to call via LDVIRTFTN, so just
7345                 // take the call now....
7346
7347                 args = impPopList(sig->numArgs, sig);
7348
7349                 GenTree* thisPtr = impPopStack().val;
7350                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7351                 assert(thisPtr != nullptr);
7352
7353                 // Clone the (possibly transformed) "this" pointer
7354                 GenTree* thisPtrCopy;
7355                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7356                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7357
7358                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7359                 assert(fptr != nullptr);
7360
7361                 thisPtr = nullptr; // can't reuse it
7362
7363                 // Now make an indirect call through the function pointer
7364
7365                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7366                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7367                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7368
7369                 // Create the actual call node
7370
7371                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7372                 call->gtCall.gtCallObjp = thisPtrCopy;
7373                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7374
7375                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7376                 {
7377                     // CoreRT generic virtual method: need to handle potential fat function pointers
7378                     addFatPointerCandidate(call->AsCall());
7379                 }
7380 #ifdef FEATURE_READYTORUN_COMPILER
7381                 if (opts.IsReadyToRun())
7382                 {
7383                     // Null check is needed for ready to run to handle
7384                     // non-virtual <-> virtual changes between versions
7385                     call->gtFlags |= GTF_CALL_NULLCHECK;
7386                 }
7387 #endif
7388
7389                 // Sine we are jumping over some code, check that its OK to skip that code
7390                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7391                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7392                 goto DONE;
7393             }
7394
7395             case CORINFO_CALL:
7396             {
7397                 // This is for a non-virtual, non-interface etc. call
7398                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7399
7400                 // We remove the nullcheck for the GetType call instrinsic.
7401                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7402                 // and instrinsics.
7403                 if (callInfo->nullInstanceCheck &&
7404                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7405                 {
7406                     call->gtFlags |= GTF_CALL_NULLCHECK;
7407                 }
7408
7409 #ifdef FEATURE_READYTORUN_COMPILER
7410                 if (opts.IsReadyToRun())
7411                 {
7412                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7413                 }
7414 #endif
7415                 break;
7416             }
7417
7418             case CORINFO_CALL_CODE_POINTER:
7419             {
7420                 // The EE has asked us to call by computing a code pointer and then doing an
7421                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7422
7423                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7424                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7425
7426                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7427                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7428
7429                 GenTree* fptr =
7430                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7431
7432                 if (compDonotInline())
7433                 {
7434                     return TYP_UNDEF;
7435                 }
7436
7437                 // Now make an indirect call through the function pointer
7438
7439                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7440                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7441                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7442
7443                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7444                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7445                 if (callInfo->nullInstanceCheck)
7446                 {
7447                     call->gtFlags |= GTF_CALL_NULLCHECK;
7448                 }
7449
7450                 break;
7451             }
7452
7453             default:
7454                 assert(!"unknown call kind");
7455                 break;
7456         }
7457
7458         //-------------------------------------------------------------------------
7459         // Set more flags
7460
7461         PREFIX_ASSUME(call != nullptr);
7462
7463         if (mflags & CORINFO_FLG_NOGCCHECK)
7464         {
7465             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7466         }
7467
7468         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7469         if (isSpecialIntrinsic)
7470         {
7471             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7472         }
7473     }
7474     assert(sig);
7475     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7476
7477     /* Some sanity checks */
7478
7479     // CALL_VIRT and NEWOBJ must have a THIS pointer
7480     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7481     // static bit and hasThis are negations of one another
7482     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7483     assert(call != nullptr);
7484
7485     /*-------------------------------------------------------------------------
7486      * Check special-cases etc
7487      */
7488
7489     /* Special case - Check if it is a call to Delegate.Invoke(). */
7490
7491     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7492     {
7493         assert(!compIsForInlining());
7494         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7495         assert(mflags & CORINFO_FLG_FINAL);
7496
7497         /* Set the delegate flag */
7498         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7499
7500         if (callInfo->secureDelegateInvoke)
7501         {
7502             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7503         }
7504
7505         if (opcode == CEE_CALLVIRT)
7506         {
7507             assert(mflags & CORINFO_FLG_FINAL);
7508
7509             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7510             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7511             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7512         }
7513     }
7514
7515     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7516     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7517     if (varTypeIsStruct(callRetTyp))
7518     {
7519         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7520         call->gtType = callRetTyp;
7521     }
7522
7523 #if !FEATURE_VARARG
7524     /* Check for varargs */
7525     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7526         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7527     {
7528         BADCODE("Varargs not supported.");
7529     }
7530 #endif // !FEATURE_VARARG
7531
7532 #ifdef UNIX_X86_ABI
7533     if (call->gtCall.callSig == nullptr)
7534     {
7535         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7536         *call->gtCall.callSig = *sig;
7537     }
7538 #endif // UNIX_X86_ABI
7539
7540     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7541         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7542     {
7543         assert(!compIsForInlining());
7544
7545         /* Set the right flags */
7546
7547         call->gtFlags |= GTF_CALL_POP_ARGS;
7548         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7549
7550         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7551            will be expecting to pop a certain number of arguments, but if we
7552            tailcall to a function with a different number of arguments, we
7553            are hosed. There are ways around this (caller remembers esp value,
7554            varargs is not caller-pop, etc), but not worth it. */
7555         CLANG_FORMAT_COMMENT_ANCHOR;
7556
7557 #ifdef _TARGET_X86_
7558         if (canTailCall)
7559         {
7560             canTailCall             = false;
7561             szCanTailCallFailReason = "Callee is varargs";
7562         }
7563 #endif
7564
7565         /* Get the total number of arguments - this is already correct
7566          * for CALLI - for methods we have to get it from the call site */
7567
7568         if (opcode != CEE_CALLI)
7569         {
7570 #ifdef DEBUG
7571             unsigned numArgsDef = sig->numArgs;
7572 #endif
7573             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7574
7575 #ifdef DEBUG
7576             // We cannot lazily obtain the signature of a vararg call because using its method
7577             // handle will give us only the declared argument list, not the full argument list.
7578             assert(call->gtCall.callSig == nullptr);
7579             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7580             *call->gtCall.callSig = *sig;
7581 #endif
7582
7583             // For vararg calls we must be sure to load the return type of the
7584             // method actually being called, as well as the return types of the
7585             // specified in the vararg signature. With type equivalency, these types
7586             // may not be the same.
7587             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7588             {
7589                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7590                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7591                     sig->retType != CORINFO_TYPE_VAR)
7592                 {
7593                     // Make sure that all valuetypes (including enums) that we push are loaded.
7594                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7595                     // all valuetypes in the method signature are already loaded.
7596                     // We need to be able to find the size of the valuetypes, but we cannot
7597                     // do a class-load from within GC.
7598                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7599                 }
7600             }
7601
7602             assert(numArgsDef <= sig->numArgs);
7603         }
7604
7605         /* We will have "cookie" as the last argument but we cannot push
7606          * it on the operand stack because we may overflow, so we append it
7607          * to the arg list next after we pop them */
7608     }
7609
7610     if (mflags & CORINFO_FLG_SECURITYCHECK)
7611     {
7612         assert(!compIsForInlining());
7613
7614         // Need security prolog/epilog callouts when there is
7615         // imperative security in the method. This is to give security a
7616         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7617
7618         if (compIsForInlining())
7619         {
7620             // Cannot handle this if the method being imported is an inlinee by itself.
7621             // Because inlinee method does not have its own frame.
7622
7623             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7624             return TYP_UNDEF;
7625         }
7626         else
7627         {
7628             tiSecurityCalloutNeeded = true;
7629
7630             // If the current method calls a method which needs a security check,
7631             // (i.e. the method being compiled has imperative security)
7632             // we need to reserve a slot for the security object in
7633             // the current method's stack frame
7634             opts.compNeedSecurityCheck = true;
7635         }
7636     }
7637
7638     //--------------------------- Inline NDirect ------------------------------
7639
7640     // For inline cases we technically should look at both the current
7641     // block and the call site block (or just the latter if we've
7642     // fused the EH trees). However the block-related checks pertain to
7643     // EH and we currently won't inline a method with EH. So for
7644     // inlinees, just checking the call site block is sufficient.
7645     {
7646         // New lexical block here to avoid compilation errors because of GOTOs.
7647         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7648         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7649     }
7650
7651     if (call->gtFlags & GTF_CALL_UNMANAGED)
7652     {
7653         // We set up the unmanaged call by linking the frame, disabling GC, etc
7654         // This needs to be cleaned up on return
7655         if (canTailCall)
7656         {
7657             canTailCall             = false;
7658             szCanTailCallFailReason = "Callee is native";
7659         }
7660
7661         checkForSmallType = true;
7662
7663         impPopArgsForUnmanagedCall(call, sig);
7664
7665         goto DONE;
7666     }
7667     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7668                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7669                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7670                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7671     {
7672         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7673         {
7674             // Normally this only happens with inlining.
7675             // However, a generic method (or type) being NGENd into another module
7676             // can run into this issue as well.  There's not an easy fall-back for NGEN
7677             // so instead we fallback to JIT.
7678             if (compIsForInlining())
7679             {
7680                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7681             }
7682             else
7683             {
7684                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7685             }
7686
7687             return TYP_UNDEF;
7688         }
7689
7690         GenTree* cookie = eeGetPInvokeCookie(sig);
7691
7692         // This cookie is required to be either a simple GT_CNS_INT or
7693         // an indirection of a GT_CNS_INT
7694         //
7695         GenTree* cookieConst = cookie;
7696         if (cookie->gtOper == GT_IND)
7697         {
7698             cookieConst = cookie->gtOp.gtOp1;
7699         }
7700         assert(cookieConst->gtOper == GT_CNS_INT);
7701
7702         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7703         // we won't allow this tree to participate in any CSE logic
7704         //
7705         cookie->gtFlags |= GTF_DONT_CSE;
7706         cookieConst->gtFlags |= GTF_DONT_CSE;
7707
7708         call->gtCall.gtCallCookie = cookie;
7709
7710         if (canTailCall)
7711         {
7712             canTailCall             = false;
7713             szCanTailCallFailReason = "PInvoke calli";
7714         }
7715     }
7716
7717     /*-------------------------------------------------------------------------
7718      * Create the argument list
7719      */
7720
7721     //-------------------------------------------------------------------------
7722     // Special case - for varargs we have an implicit last argument
7723
7724     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7725     {
7726         assert(!compIsForInlining());
7727
7728         void *varCookie, *pVarCookie;
7729         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7730         {
7731             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7732             return TYP_UNDEF;
7733         }
7734
7735         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7736         assert((!varCookie) != (!pVarCookie));
7737         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7738
7739         assert(extraArg == nullptr);
7740         extraArg = gtNewArgList(cookie);
7741     }
7742
7743     //-------------------------------------------------------------------------
7744     // Extra arg for shared generic code and array methods
7745     //
7746     // Extra argument containing instantiation information is passed in the
7747     // following circumstances:
7748     // (a) To the "Address" method on array classes; the extra parameter is
7749     //     the array's type handle (a TypeDesc)
7750     // (b) To shared-code instance methods in generic structs; the extra parameter
7751     //     is the struct's type handle (a vtable ptr)
7752     // (c) To shared-code per-instantiation non-generic static methods in generic
7753     //     classes and structs; the extra parameter is the type handle
7754     // (d) To shared-code generic methods; the extra parameter is an
7755     //     exact-instantiation MethodDesc
7756     //
7757     // We also set the exact type context associated with the call so we can
7758     // inline the call correctly later on.
7759
7760     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7761     {
7762         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7763         if (clsHnd == nullptr)
7764         {
7765             NO_WAY("CALLI on parameterized type");
7766         }
7767
7768         assert(opcode != CEE_CALLI);
7769
7770         GenTree* instParam;
7771         BOOL     runtimeLookup;
7772
7773         // Instantiated generic method
7774         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7775         {
7776             CORINFO_METHOD_HANDLE exactMethodHandle =
7777                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7778
7779             if (!exactContextNeedsRuntimeLookup)
7780             {
7781 #ifdef FEATURE_READYTORUN_COMPILER
7782                 if (opts.IsReadyToRun())
7783                 {
7784                     instParam =
7785                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7786                     if (instParam == nullptr)
7787                     {
7788                         assert(compDonotInline());
7789                         return TYP_UNDEF;
7790                     }
7791                 }
7792                 else
7793 #endif
7794                 {
7795                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7796                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7797                 }
7798             }
7799             else
7800             {
7801                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7802                 if (instParam == nullptr)
7803                 {
7804                     assert(compDonotInline());
7805                     return TYP_UNDEF;
7806                 }
7807             }
7808         }
7809
7810         // otherwise must be an instance method in a generic struct,
7811         // a static method in a generic type, or a runtime-generated array method
7812         else
7813         {
7814             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7815             CORINFO_CLASS_HANDLE exactClassHandle =
7816                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7817
7818             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7819             {
7820                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7821                 return TYP_UNDEF;
7822             }
7823
7824             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7825             {
7826                 // We indicate "readonly" to the Address operation by using a null
7827                 // instParam.
7828                 instParam = gtNewIconNode(0, TYP_REF);
7829             }
7830             else if (!exactContextNeedsRuntimeLookup)
7831             {
7832 #ifdef FEATURE_READYTORUN_COMPILER
7833                 if (opts.IsReadyToRun())
7834                 {
7835                     instParam =
7836                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7837                     if (instParam == nullptr)
7838                     {
7839                         assert(compDonotInline());
7840                         return TYP_UNDEF;
7841                     }
7842                 }
7843                 else
7844 #endif
7845                 {
7846                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7847                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7848                 }
7849             }
7850             else
7851             {
7852                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7853                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7854                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7855                 if (pConstrainedResolvedToken)
7856                 {
7857                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7858                                                  FALSE /* importParent */);
7859                 }
7860                 else
7861                 {
7862                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7863                 }
7864
7865                 if (instParam == nullptr)
7866                 {
7867                     assert(compDonotInline());
7868                     return TYP_UNDEF;
7869                 }
7870             }
7871         }
7872
7873         assert(extraArg == nullptr);
7874         extraArg = gtNewArgList(instParam);
7875     }
7876
7877     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7878     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7879     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7880     // exactContextHnd is not currently required when inlining shared generic code into shared
7881     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7882     // (e.g. anything marked needsRuntimeLookup)
7883     if (exactContextNeedsRuntimeLookup)
7884     {
7885         exactContextHnd = nullptr;
7886     }
7887
7888     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7889     {
7890         // Only verifiable cases are supported.
7891         // dup; ldvirtftn; newobj; or ldftn; newobj.
7892         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7893         if (impStackHeight() > 0)
7894         {
7895             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7896             if (delegateTypeInfo.IsToken())
7897             {
7898                 ldftnToken = delegateTypeInfo.GetToken();
7899             }
7900         }
7901     }
7902
7903     //-------------------------------------------------------------------------
7904     // The main group of arguments
7905
7906     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7907
7908     if (args)
7909     {
7910         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7911     }
7912
7913     //-------------------------------------------------------------------------
7914     // The "this" pointer
7915
7916     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7917     {
7918         GenTree* obj;
7919
7920         if (opcode == CEE_NEWOBJ)
7921         {
7922             obj = newobjThis;
7923         }
7924         else
7925         {
7926             obj = impPopStack().val;
7927             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7928             if (compDonotInline())
7929             {
7930                 return TYP_UNDEF;
7931             }
7932         }
7933
7934         // Store the "this" value in the call
7935         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7936         call->gtCall.gtCallObjp = obj;
7937
7938         // Is this a virtual or interface call?
7939         if (call->gtCall.IsVirtual())
7940         {
7941             // only true object pointers can be virtual
7942             assert(obj->gtType == TYP_REF);
7943
7944             // See if we can devirtualize.
7945             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7946                                 &exactContextHnd);
7947         }
7948
7949         if (impIsThis(obj))
7950         {
7951             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7952         }
7953     }
7954
7955     //-------------------------------------------------------------------------
7956     // The "this" pointer for "newobj"
7957
7958     if (opcode == CEE_NEWOBJ)
7959     {
7960         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7961         {
7962             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7963             // This is a 'new' of a variable sized object, wher
7964             // the constructor is to return the object.  In this case
7965             // the constructor claims to return VOID but we know it
7966             // actually returns the new object
7967             assert(callRetTyp == TYP_VOID);
7968             callRetTyp   = TYP_REF;
7969             call->gtType = TYP_REF;
7970             impSpillSpecialSideEff();
7971
7972             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7973         }
7974         else
7975         {
7976             if (clsFlags & CORINFO_FLG_DELEGATE)
7977             {
7978                 // New inliner morph it in impImportCall.
7979                 // This will allow us to inline the call to the delegate constructor.
7980                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7981             }
7982
7983             if (!bIntrinsicImported)
7984             {
7985
7986 #if defined(DEBUG) || defined(INLINE_DATA)
7987
7988                 // Keep track of the raw IL offset of the call
7989                 call->gtCall.gtRawILOffset = rawILOffset;
7990
7991 #endif // defined(DEBUG) || defined(INLINE_DATA)
7992
7993                 // Is it an inline candidate?
7994                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7995             }
7996
7997             // append the call node.
7998             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7999
8000             // Now push the value of the 'new onto the stack
8001
8002             // This is a 'new' of a non-variable sized object.
8003             // Append the new node (op1) to the statement list,
8004             // and then push the local holding the value of this
8005             // new instruction on the stack.
8006
8007             if (clsFlags & CORINFO_FLG_VALUECLASS)
8008             {
8009                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8010
8011                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8012                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8013             }
8014             else
8015             {
8016                 if (newobjThis->gtOper == GT_COMMA)
8017                 {
8018                     // In coreclr the callout can be inserted even if verification is disabled
8019                     // so we cannot rely on tiVerificationNeeded alone
8020
8021                     // We must have inserted the callout. Get the real newobj.
8022                     newobjThis = newobjThis->gtOp.gtOp2;
8023                 }
8024
8025                 assert(newobjThis->gtOper == GT_LCL_VAR);
8026                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8027             }
8028         }
8029         return callRetTyp;
8030     }
8031
8032 DONE:
8033
8034     if (tailCall)
8035     {
8036         // This check cannot be performed for implicit tail calls for the reason
8037         // that impIsImplicitTailCallCandidate() is not checking whether return
8038         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8039         // As a result it is possible that in the following case, we find that
8040         // the type stack is non-empty if Callee() is considered for implicit
8041         // tail calling.
8042         //      int Caller(..) { .... void Callee(); ret val; ... }
8043         //
8044         // Note that we cannot check return type compatibility before ImpImportCall()
8045         // as we don't have required info or need to duplicate some of the logic of
8046         // ImpImportCall().
8047         //
8048         // For implicit tail calls, we perform this check after return types are
8049         // known to be compatible.
8050         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8051         {
8052             BADCODE("Stack should be empty after tailcall");
8053         }
8054
8055         // Note that we can not relax this condition with genActualType() as
8056         // the calling convention dictates that the caller of a function with
8057         // a small-typed return value is responsible for normalizing the return val
8058
8059         if (canTailCall &&
8060             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8061                                           callInfo->sig.retTypeClass))
8062         {
8063             canTailCall             = false;
8064             szCanTailCallFailReason = "Return types are not tail call compatible";
8065         }
8066
8067         // Stack empty check for implicit tail calls.
8068         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8069         {
8070 #ifdef _TARGET_AMD64_
8071             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8072             // in JIT64, not an InvalidProgramException.
8073             Verify(false, "Stack should be empty after tailcall");
8074 #else  // _TARGET_64BIT_
8075             BADCODE("Stack should be empty after tailcall");
8076 #endif //!_TARGET_64BIT_
8077         }
8078
8079         // assert(compCurBB is not a catch, finally or filter block);
8080         // assert(compCurBB is not a try block protected by a finally block);
8081
8082         // Check for permission to tailcall
8083         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8084
8085         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8086
8087         if (canTailCall)
8088         {
8089             // True virtual or indirect calls, shouldn't pass in a callee handle.
8090             CORINFO_METHOD_HANDLE exactCalleeHnd =
8091                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8092             GenTree* thisArg = call->gtCall.gtCallObjp;
8093
8094             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8095             {
8096                 canTailCall = true;
8097                 if (explicitTailCall)
8098                 {
8099                     // In case of explicit tail calls, mark it so that it is not considered
8100                     // for in-lining.
8101                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8102 #ifdef DEBUG
8103                     if (verbose)
8104                     {
8105                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8106                         printTreeID(call);
8107                         printf("\n");
8108                     }
8109 #endif
8110                 }
8111                 else
8112                 {
8113 #if FEATURE_TAILCALL_OPT
8114                     // Must be an implicit tail call.
8115                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8116
8117                     // It is possible that a call node is both an inline candidate and marked
8118                     // for opportunistic tail calling.  In-lining happens before morhphing of
8119                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8120                     // reason, it will survive to the morphing stage at which point it will be
8121                     // transformed into a tail call after performing additional checks.
8122
8123                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8124 #ifdef DEBUG
8125                     if (verbose)
8126                     {
8127                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8128                         printTreeID(call);
8129                         printf("\n");
8130                     }
8131 #endif
8132
8133 #else //! FEATURE_TAILCALL_OPT
8134                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8135
8136 #endif // FEATURE_TAILCALL_OPT
8137                 }
8138
8139                 // we can't report success just yet...
8140             }
8141             else
8142             {
8143                 canTailCall = false;
8144 // canTailCall reported its reasons already
8145 #ifdef DEBUG
8146                 if (verbose)
8147                 {
8148                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8149                     printTreeID(call);
8150                     printf("\n");
8151                 }
8152 #endif
8153             }
8154         }
8155         else
8156         {
8157             // If this assert fires it means that canTailCall was set to false without setting a reason!
8158             assert(szCanTailCallFailReason != nullptr);
8159
8160 #ifdef DEBUG
8161             if (verbose)
8162             {
8163                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8164                 printTreeID(call);
8165                 printf(": %s\n", szCanTailCallFailReason);
8166             }
8167 #endif
8168             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8169                                                      szCanTailCallFailReason);
8170         }
8171     }
8172
8173     // Note: we assume that small return types are already normalized by the managed callee
8174     // or by the pinvoke stub for calls to unmanaged code.
8175
8176     if (!bIntrinsicImported)
8177     {
8178         //
8179         // Things needed to be checked when bIntrinsicImported is false.
8180         //
8181
8182         assert(call->gtOper == GT_CALL);
8183         assert(sig != nullptr);
8184
8185         // Tail calls require us to save the call site's sig info so we can obtain an argument
8186         // copying thunk from the EE later on.
8187         if (call->gtCall.callSig == nullptr)
8188         {
8189             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8190             *call->gtCall.callSig = *sig;
8191         }
8192
8193         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8194         {
8195             GenTree* callObj = call->gtCall.gtCallObjp;
8196             assert(callObj != nullptr);
8197
8198             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8199                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8200                                                                    impInlineInfo->inlArgInfo))
8201             {
8202                 impInlineInfo->thisDereferencedFirst = true;
8203             }
8204         }
8205
8206 #if defined(DEBUG) || defined(INLINE_DATA)
8207
8208         // Keep track of the raw IL offset of the call
8209         call->gtCall.gtRawILOffset = rawILOffset;
8210
8211 #endif // defined(DEBUG) || defined(INLINE_DATA)
8212
8213         // Is it an inline candidate?
8214         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8215     }
8216
8217 DONE_CALL:
8218     // Push or append the result of the call
8219     if (callRetTyp == TYP_VOID)
8220     {
8221         if (opcode == CEE_NEWOBJ)
8222         {
8223             // we actually did push something, so don't spill the thing we just pushed.
8224             assert(verCurrentState.esStackDepth > 0);
8225             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8226         }
8227         else
8228         {
8229             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8230         }
8231     }
8232     else
8233     {
8234         impSpillSpecialSideEff();
8235
8236         if (clsFlags & CORINFO_FLG_ARRAY)
8237         {
8238             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8239         }
8240
8241         // Find the return type used for verification by interpreting the method signature.
8242         // NB: we are clobbering the already established sig.
8243         if (tiVerificationNeeded)
8244         {
8245             // Actually, we never get the sig for the original method.
8246             sig = &(callInfo->verSig);
8247         }
8248
8249         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8250         tiRetVal.NormaliseForStack();
8251
8252         // The CEE_READONLY prefix modifies the verification semantics of an Address
8253         // operation on an array type.
8254         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8255         {
8256             tiRetVal.SetIsReadonlyByRef();
8257         }
8258
8259         if (tiVerificationNeeded)
8260         {
8261             // We assume all calls return permanent home byrefs. If they
8262             // didn't they wouldn't be verifiable. This is also covering
8263             // the Address() helper for multidimensional arrays.
8264             if (tiRetVal.IsByRef())
8265             {
8266                 tiRetVal.SetIsPermanentHomeByRef();
8267             }
8268         }
8269
8270         if (call->IsCall())
8271         {
8272             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8273
8274             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8275             if (varTypeIsStruct(callRetTyp))
8276             {
8277                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8278             }
8279
8280             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8281             {
8282                 assert(opts.OptEnabled(CLFLG_INLINING));
8283                 assert(!fatPointerCandidate); // We should not try to inline calli.
8284
8285                 // Make the call its own tree (spill the stack if needed).
8286                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8287
8288                 // TODO: Still using the widened type.
8289                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8290             }
8291             else
8292             {
8293                 if (fatPointerCandidate)
8294                 {
8295                     // fatPointer candidates should be in statements of the form call() or var = call().
8296                     // Such form allows to find statements with fat calls without walking through whole trees
8297                     // and removes problems with cutting trees.
8298                     assert(!bIntrinsicImported);
8299                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8300                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8301                     {
8302                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8303                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8304                         varDsc->lvVerTypeInfo = tiRetVal;
8305                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8306                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8307                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8308                         call           = gtNewLclvNode(calliSlot, type);
8309                     }
8310                 }
8311
8312                 // For non-candidates we must also spill, since we
8313                 // might have locals live on the eval stack that this
8314                 // call can modify.
8315                 //
8316                 // Suppress this for certain well-known call targets
8317                 // that we know won't modify locals, eg calls that are
8318                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8319                 // we may break key fragile pattern matches later on.
8320                 bool spillStack = true;
8321                 if (call->IsCall())
8322                 {
8323                     GenTreeCall* callNode = call->AsCall();
8324                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8325                     {
8326                         spillStack = false;
8327                     }
8328                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8329                     {
8330                         spillStack = false;
8331                     }
8332                 }
8333
8334                 if (spillStack)
8335                 {
8336                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8337                 }
8338             }
8339         }
8340
8341         if (!bIntrinsicImported)
8342         {
8343             //-------------------------------------------------------------------------
8344             //
8345             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8346                 before returning.
8347                 However, we need to normalize small type values returned by unmanaged
8348                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8349                 if we use the shorter inlined pinvoke stub. */
8350
8351             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8352             {
8353                 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8354             }
8355         }
8356
8357         impPushOnStack(call, tiRetVal);
8358     }
8359
8360     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8361     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8362     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8363     //  callInfoCache.uncacheCallInfo();
8364
8365     return callRetTyp;
8366 }
8367 #ifdef _PREFAST_
8368 #pragma warning(pop)
8369 #endif
8370
8371 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8372 {
8373     CorInfoType corType = methInfo->args.retType;
8374
8375     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8376     {
8377         // We have some kind of STRUCT being returned
8378
8379         structPassingKind howToReturnStruct = SPK_Unknown;
8380
8381         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8382
8383         if (howToReturnStruct == SPK_ByReference)
8384         {
8385             return true;
8386         }
8387     }
8388
8389     return false;
8390 }
8391
8392 #ifdef DEBUG
8393 //
8394 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8395 {
8396     TestLabelAndNum tlAndN;
8397     if (numArgs == 2)
8398     {
8399         tlAndN.m_num  = 0;
8400         StackEntry se = impPopStack();
8401         assert(se.seTypeInfo.GetType() == TI_INT);
8402         GenTree* val = se.val;
8403         assert(val->IsCnsIntOrI());
8404         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8405     }
8406     else if (numArgs == 3)
8407     {
8408         StackEntry se = impPopStack();
8409         assert(se.seTypeInfo.GetType() == TI_INT);
8410         GenTree* val = se.val;
8411         assert(val->IsCnsIntOrI());
8412         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8413         se           = impPopStack();
8414         assert(se.seTypeInfo.GetType() == TI_INT);
8415         val = se.val;
8416         assert(val->IsCnsIntOrI());
8417         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8418     }
8419     else
8420     {
8421         assert(false);
8422     }
8423
8424     StackEntry expSe = impPopStack();
8425     GenTree*   node  = expSe.val;
8426
8427     // There are a small number of special cases, where we actually put the annotation on a subnode.
8428     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8429     {
8430         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8431         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8432         // offset within the the static field block whose address is returned by the helper call.
8433         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8434         GenTree* helperCall = nullptr;
8435         assert(node->OperGet() == GT_IND);
8436         tlAndN.m_num -= 100;
8437         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8438         GetNodeTestData()->Remove(node);
8439     }
8440     else
8441     {
8442         GetNodeTestData()->Set(node, tlAndN);
8443     }
8444
8445     impPushOnStack(node, expSe.seTypeInfo);
8446     return node->TypeGet();
8447 }
8448 #endif // DEBUG
8449
8450 //-----------------------------------------------------------------------------------
8451 //  impFixupCallStructReturn: For a call node that returns a struct type either
8452 //  adjust the return type to an enregisterable type, or set the flag to indicate
8453 //  struct return via retbuf arg.
8454 //
8455 //  Arguments:
8456 //    call       -  GT_CALL GenTree node
8457 //    retClsHnd  -  Class handle of return type of the call
8458 //
8459 //  Return Value:
8460 //    Returns new GenTree node after fixing struct return of call node
8461 //
8462 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8463 {
8464     if (!varTypeIsStruct(call))
8465     {
8466         return call;
8467     }
8468
8469     call->gtRetClsHnd = retClsHnd;
8470
8471 #if FEATURE_MULTIREG_RET
8472     // Initialize Return type descriptor of call node
8473     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8474     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8475 #endif // FEATURE_MULTIREG_RET
8476
8477 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8478
8479     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8480     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8481
8482     // The return type will remain as the incoming struct type unless normalized to a
8483     // single eightbyte return type below.
8484     call->gtReturnType = call->gtType;
8485
8486     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8487     if (retRegCount != 0)
8488     {
8489         if (retRegCount == 1)
8490         {
8491             // struct returned in a single register
8492             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8493         }
8494         else
8495         {
8496             // must be a struct returned in two registers
8497             assert(retRegCount == 2);
8498
8499             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8500             {
8501                 // Force a call returning multi-reg struct to be always of the IR form
8502                 //   tmp = call
8503                 //
8504                 // No need to assign a multi-reg struct to a local var if:
8505                 //  - It is a tail call or
8506                 //  - The call is marked for in-lining later
8507                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8508             }
8509         }
8510     }
8511     else
8512     {
8513         // struct not returned in registers i.e returned via hiddden retbuf arg.
8514         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8515     }
8516
8517 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8518
8519     // Check for TYP_STRUCT type that wraps a primitive type
8520     // Such structs are returned using a single register
8521     // and we change the return type on those calls here.
8522     //
8523     structPassingKind howToReturnStruct;
8524     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8525
8526     if (howToReturnStruct == SPK_ByReference)
8527     {
8528         assert(returnType == TYP_UNKNOWN);
8529         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8530     }
8531     else
8532     {
8533         assert(returnType != TYP_UNKNOWN);
8534         call->gtReturnType = returnType;
8535
8536         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8537         if ((returnType == TYP_LONG) && (compLongUsed == false))
8538         {
8539             compLongUsed = true;
8540         }
8541         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8542         {
8543             compFloatingPointUsed = true;
8544         }
8545
8546 #if FEATURE_MULTIREG_RET
8547         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8548         assert(retRegCount != 0);
8549
8550         if (retRegCount >= 2)
8551         {
8552             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8553             {
8554                 // Force a call returning multi-reg struct to be always of the IR form
8555                 //   tmp = call
8556                 //
8557                 // No need to assign a multi-reg struct to a local var if:
8558                 //  - It is a tail call or
8559                 //  - The call is marked for in-lining later
8560                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8561             }
8562         }
8563 #endif // FEATURE_MULTIREG_RET
8564     }
8565
8566 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8567
8568     return call;
8569 }
8570
8571 /*****************************************************************************
8572    For struct return values, re-type the operand in the case where the ABI
8573    does not use a struct return buffer
8574    Note that this method is only call for !_TARGET_X86_
8575  */
8576
8577 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8578 {
8579     assert(varTypeIsStruct(info.compRetType));
8580     assert(info.compRetBuffArg == BAD_VAR_NUM);
8581
8582 #if defined(_TARGET_XARCH_)
8583
8584 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8585     // No VarArgs for CoreCLR on x64 Unix
8586     assert(!info.compIsVarArgs);
8587
8588     // Is method returning a multi-reg struct?
8589     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8590     {
8591         // In case of multi-reg struct return, we force IR to be one of the following:
8592         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8593         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8594
8595         if (op->gtOper == GT_LCL_VAR)
8596         {
8597             // Make sure that this struct stays in memory and doesn't get promoted.
8598             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8599             lvaTable[lclNum].lvIsMultiRegRet = true;
8600
8601             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8602             op->gtFlags |= GTF_DONT_CSE;
8603
8604             return op;
8605         }
8606
8607         if (op->gtOper == GT_CALL)
8608         {
8609             return op;
8610         }
8611
8612         return impAssignMultiRegTypeToVar(op, retClsHnd);
8613     }
8614 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8615     assert(info.compRetNativeType != TYP_STRUCT);
8616 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8617
8618 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8619
8620     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8621     {
8622         if (op->gtOper == GT_LCL_VAR)
8623         {
8624             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8625             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8626             // Make sure this struct type stays as struct so that we can return it as an HFA
8627             lvaTable[lclNum].lvIsMultiRegRet = true;
8628
8629             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8630             op->gtFlags |= GTF_DONT_CSE;
8631
8632             return op;
8633         }
8634
8635         if (op->gtOper == GT_CALL)
8636         {
8637             if (op->gtCall.IsVarargs())
8638             {
8639                 // We cannot tail call because control needs to return to fixup the calling
8640                 // convention for result return.
8641                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8642                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8643             }
8644             else
8645             {
8646                 return op;
8647             }
8648         }
8649         return impAssignMultiRegTypeToVar(op, retClsHnd);
8650     }
8651
8652 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8653
8654     // Is method returning a multi-reg struct?
8655     if (IsMultiRegReturnedType(retClsHnd))
8656     {
8657         if (op->gtOper == GT_LCL_VAR)
8658         {
8659             // This LCL_VAR stays as a TYP_STRUCT
8660             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8661
8662             // Make sure this struct type is not struct promoted
8663             lvaTable[lclNum].lvIsMultiRegRet = true;
8664
8665             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8666             op->gtFlags |= GTF_DONT_CSE;
8667
8668             return op;
8669         }
8670
8671         if (op->gtOper == GT_CALL)
8672         {
8673             if (op->gtCall.IsVarargs())
8674             {
8675                 // We cannot tail call because control needs to return to fixup the calling
8676                 // convention for result return.
8677                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8678                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8679             }
8680             else
8681             {
8682                 return op;
8683             }
8684         }
8685         return impAssignMultiRegTypeToVar(op, retClsHnd);
8686     }
8687
8688 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8689
8690 REDO_RETURN_NODE:
8691     // adjust the type away from struct to integral
8692     // and no normalizing
8693     if (op->gtOper == GT_LCL_VAR)
8694     {
8695         op->ChangeOper(GT_LCL_FLD);
8696     }
8697     else if (op->gtOper == GT_OBJ)
8698     {
8699         GenTree* op1 = op->AsObj()->Addr();
8700
8701         // We will fold away OBJ/ADDR
8702         // except for OBJ/ADDR/INDEX
8703         //     as the array type influences the array element's offset
8704         //     Later in this method we change op->gtType to info.compRetNativeType
8705         //     This is not correct when op is a GT_INDEX as the starting offset
8706         //     for the array elements 'elemOffs' is different for an array of
8707         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8708         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8709         //
8710         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8711         {
8712             // Change '*(&X)' to 'X' and see if we can do better
8713             op = op1->gtOp.gtOp1;
8714             goto REDO_RETURN_NODE;
8715         }
8716         op->gtObj.gtClass = NO_CLASS_HANDLE;
8717         op->ChangeOperUnchecked(GT_IND);
8718         op->gtFlags |= GTF_IND_TGTANYWHERE;
8719     }
8720     else if (op->gtOper == GT_CALL)
8721     {
8722         if (op->AsCall()->TreatAsHasRetBufArg(this))
8723         {
8724             // This must be one of those 'special' helpers that don't
8725             // really have a return buffer, but instead use it as a way
8726             // to keep the trees cleaner with fewer address-taken temps.
8727             //
8728             // Well now we have to materialize the the return buffer as
8729             // an address-taken temp. Then we can return the temp.
8730             //
8731             // NOTE: this code assumes that since the call directly
8732             // feeds the return, then the call must be returning the
8733             // same structure/class/type.
8734             //
8735             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8736
8737             // No need to spill anything as we're about to return.
8738             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8739
8740             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8741             // jump directly to a GT_LCL_FLD.
8742             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8743             op->ChangeOper(GT_LCL_FLD);
8744         }
8745         else
8746         {
8747             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8748
8749             // Don't change the gtType of the node just yet, it will get changed later.
8750             return op;
8751         }
8752     }
8753 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8754     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8755     {
8756         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8757         // assert(op->gtType == info.compRetNativeType)
8758         if (op->gtType != info.compRetNativeType)
8759         {
8760             // Insert a register move to keep target type of SIMD intrinsic intact
8761             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8762         }
8763     }
8764 #endif
8765     else if (op->gtOper == GT_COMMA)
8766     {
8767         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8768     }
8769
8770     op->gtType = info.compRetNativeType;
8771
8772     return op;
8773 }
8774
8775 /*****************************************************************************
8776    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8777    finally-protected try. We find the finally blocks protecting the current
8778    offset (in order) by walking over the complete exception table and
8779    finding enclosing clauses. This assumes that the table is sorted.
8780    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8781
8782    If we are leaving a catch handler, we need to attach the
8783    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8784
8785    After this function, the BBJ_LEAVE block has been converted to a different type.
8786  */
8787
8788 #if !FEATURE_EH_FUNCLETS
8789
8790 void Compiler::impImportLeave(BasicBlock* block)
8791 {
8792 #ifdef DEBUG
8793     if (verbose)
8794     {
8795         printf("\nBefore import CEE_LEAVE:\n");
8796         fgDispBasicBlocks();
8797         fgDispHandlerTab();
8798     }
8799 #endif // DEBUG
8800
8801     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8802     unsigned    blkAddr         = block->bbCodeOffs;
8803     BasicBlock* leaveTarget     = block->bbJumpDest;
8804     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8805
8806     // LEAVE clears the stack, spill side effects, and set stack to 0
8807
8808     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8809     verCurrentState.esStackDepth = 0;
8810
8811     assert(block->bbJumpKind == BBJ_LEAVE);
8812     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8813
8814     BasicBlock* step         = DUMMY_INIT(NULL);
8815     unsigned    encFinallies = 0; // Number of enclosing finallies.
8816     GenTree*    endCatches   = NULL;
8817     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8818
8819     unsigned  XTnum;
8820     EHblkDsc* HBtab;
8821
8822     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8823     {
8824         // Grab the handler offsets
8825
8826         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8827         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8828         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8829         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8830
8831         /* Is this a catch-handler we are CEE_LEAVEing out of?
8832          * If so, we need to call CORINFO_HELP_ENDCATCH.
8833          */
8834
8835         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8836         {
8837             // Can't CEE_LEAVE out of a finally/fault handler
8838             if (HBtab->HasFinallyOrFaultHandler())
8839                 BADCODE("leave out of fault/finally block");
8840
8841             // Create the call to CORINFO_HELP_ENDCATCH
8842             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8843
8844             // Make a list of all the currently pending endCatches
8845             if (endCatches)
8846                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8847             else
8848                 endCatches = endCatch;
8849
8850 #ifdef DEBUG
8851             if (verbose)
8852             {
8853                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8854                        "CORINFO_HELP_ENDCATCH\n",
8855                        block->bbNum, XTnum);
8856             }
8857 #endif
8858         }
8859         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8860                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8861         {
8862             /* This is a finally-protected try we are jumping out of */
8863
8864             /* If there are any pending endCatches, and we have already
8865                jumped out of a finally-protected try, then the endCatches
8866                have to be put in a block in an outer try for async
8867                exceptions to work correctly.
8868                Else, just use append to the original block */
8869
8870             BasicBlock* callBlock;
8871
8872             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8873
8874             if (encFinallies == 0)
8875             {
8876                 assert(step == DUMMY_INIT(NULL));
8877                 callBlock             = block;
8878                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8879
8880                 if (endCatches)
8881                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8882
8883 #ifdef DEBUG
8884                 if (verbose)
8885                 {
8886                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8887                            "block %s\n",
8888                            callBlock->dspToString());
8889                 }
8890 #endif
8891             }
8892             else
8893             {
8894                 assert(step != DUMMY_INIT(NULL));
8895
8896                 /* Calling the finally block */
8897                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8898                 assert(step->bbJumpKind == BBJ_ALWAYS);
8899                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8900                                               // finally in the chain)
8901                 step->bbJumpDest->bbRefs++;
8902
8903                 /* The new block will inherit this block's weight */
8904                 callBlock->setBBWeight(block->bbWeight);
8905                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8906
8907 #ifdef DEBUG
8908                 if (verbose)
8909                 {
8910                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8911                            callBlock->dspToString());
8912                 }
8913 #endif
8914
8915                 GenTree* lastStmt;
8916
8917                 if (endCatches)
8918                 {
8919                     lastStmt         = gtNewStmt(endCatches);
8920                     endLFin->gtNext  = lastStmt;
8921                     lastStmt->gtPrev = endLFin;
8922                 }
8923                 else
8924                 {
8925                     lastStmt = endLFin;
8926                 }
8927
8928                 // note that this sets BBF_IMPORTED on the block
8929                 impEndTreeList(callBlock, endLFin, lastStmt);
8930             }
8931
8932             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8933             /* The new block will inherit this block's weight */
8934             step->setBBWeight(block->bbWeight);
8935             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8936
8937 #ifdef DEBUG
8938             if (verbose)
8939             {
8940                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8941                        step->dspToString());
8942             }
8943 #endif
8944
8945             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8946             assert(finallyNesting <= compHndBBtabCount);
8947
8948             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8949             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8950             endLFin               = gtNewStmt(endLFin);
8951             endCatches            = NULL;
8952
8953             encFinallies++;
8954
8955             invalidatePreds = true;
8956         }
8957     }
8958
8959     /* Append any remaining endCatches, if any */
8960
8961     assert(!encFinallies == !endLFin);
8962
8963     if (encFinallies == 0)
8964     {
8965         assert(step == DUMMY_INIT(NULL));
8966         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8967
8968         if (endCatches)
8969             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8970
8971 #ifdef DEBUG
8972         if (verbose)
8973         {
8974             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8975                    "block %s\n",
8976                    block->dspToString());
8977         }
8978 #endif
8979     }
8980     else
8981     {
8982         // If leaveTarget is the start of another try block, we want to make sure that
8983         // we do not insert finalStep into that try block. Hence, we find the enclosing
8984         // try block.
8985         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8986
8987         // Insert a new BB either in the try region indicated by tryIndex or
8988         // the handler region indicated by leaveTarget->bbHndIndex,
8989         // depending on which is the inner region.
8990         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8991         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8992         step->bbJumpDest = finalStep;
8993
8994         /* The new block will inherit this block's weight */
8995         finalStep->setBBWeight(block->bbWeight);
8996         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8997
8998 #ifdef DEBUG
8999         if (verbose)
9000         {
9001             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9002                    finalStep->dspToString());
9003         }
9004 #endif
9005
9006         GenTree* lastStmt;
9007
9008         if (endCatches)
9009         {
9010             lastStmt         = gtNewStmt(endCatches);
9011             endLFin->gtNext  = lastStmt;
9012             lastStmt->gtPrev = endLFin;
9013         }
9014         else
9015         {
9016             lastStmt = endLFin;
9017         }
9018
9019         impEndTreeList(finalStep, endLFin, lastStmt);
9020
9021         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9022
9023         // Queue up the jump target for importing
9024
9025         impImportBlockPending(leaveTarget);
9026
9027         invalidatePreds = true;
9028     }
9029
9030     if (invalidatePreds && fgComputePredsDone)
9031     {
9032         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9033         fgRemovePreds();
9034     }
9035
9036 #ifdef DEBUG
9037     fgVerifyHandlerTab();
9038
9039     if (verbose)
9040     {
9041         printf("\nAfter import CEE_LEAVE:\n");
9042         fgDispBasicBlocks();
9043         fgDispHandlerTab();
9044     }
9045 #endif // DEBUG
9046 }
9047
9048 #else // FEATURE_EH_FUNCLETS
9049
9050 void Compiler::impImportLeave(BasicBlock* block)
9051 {
9052 #ifdef DEBUG
9053     if (verbose)
9054     {
9055         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9056         fgDispBasicBlocks();
9057         fgDispHandlerTab();
9058     }
9059 #endif // DEBUG
9060
9061     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9062     unsigned    blkAddr         = block->bbCodeOffs;
9063     BasicBlock* leaveTarget     = block->bbJumpDest;
9064     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9065
9066     // LEAVE clears the stack, spill side effects, and set stack to 0
9067
9068     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9069     verCurrentState.esStackDepth = 0;
9070
9071     assert(block->bbJumpKind == BBJ_LEAVE);
9072     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9073
9074     BasicBlock* step = nullptr;
9075
9076     enum StepType
9077     {
9078         // No step type; step == NULL.
9079         ST_None,
9080
9081         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9082         // That is, is step->bbJumpDest where a finally will return to?
9083         ST_FinallyReturn,
9084
9085         // The step block is a catch return.
9086         ST_Catch,
9087
9088         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9089         ST_Try
9090     };
9091     StepType stepType = ST_None;
9092
9093     unsigned  XTnum;
9094     EHblkDsc* HBtab;
9095
9096     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9097     {
9098         // Grab the handler offsets
9099
9100         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9101         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9102         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9103         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9104
9105         /* Is this a catch-handler we are CEE_LEAVEing out of?
9106          */
9107
9108         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9109         {
9110             // Can't CEE_LEAVE out of a finally/fault handler
9111             if (HBtab->HasFinallyOrFaultHandler())
9112             {
9113                 BADCODE("leave out of fault/finally block");
9114             }
9115
9116             /* We are jumping out of a catch */
9117
9118             if (step == nullptr)
9119             {
9120                 step             = block;
9121                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9122                 stepType         = ST_Catch;
9123
9124 #ifdef DEBUG
9125                 if (verbose)
9126                 {
9127                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9128                            "block\n",
9129                            XTnum, step->bbNum);
9130                 }
9131 #endif
9132             }
9133             else
9134             {
9135                 BasicBlock* exitBlock;
9136
9137                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9138                  * scope */
9139                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9140
9141                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9142                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9143                                               // exit) returns to this block
9144                 step->bbJumpDest->bbRefs++;
9145
9146 #if defined(_TARGET_ARM_)
9147                 if (stepType == ST_FinallyReturn)
9148                 {
9149                     assert(step->bbJumpKind == BBJ_ALWAYS);
9150                     // Mark the target of a finally return
9151                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9152                 }
9153 #endif // defined(_TARGET_ARM_)
9154
9155                 /* The new block will inherit this block's weight */
9156                 exitBlock->setBBWeight(block->bbWeight);
9157                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9158
9159                 /* This exit block is the new step */
9160                 step     = exitBlock;
9161                 stepType = ST_Catch;
9162
9163                 invalidatePreds = true;
9164
9165 #ifdef DEBUG
9166                 if (verbose)
9167                 {
9168                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9169                            exitBlock->bbNum);
9170                 }
9171 #endif
9172             }
9173         }
9174         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9175                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9176         {
9177             /* We are jumping out of a finally-protected try */
9178
9179             BasicBlock* callBlock;
9180
9181             if (step == nullptr)
9182             {
9183 #if FEATURE_EH_CALLFINALLY_THUNKS
9184
9185                 // Put the call to the finally in the enclosing region.
9186                 unsigned callFinallyTryIndex =
9187                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9188                 unsigned callFinallyHndIndex =
9189                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9190                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9191
9192                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9193                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9194                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9195                 // next block, and flow optimizations will remove it.
9196                 block->bbJumpKind = BBJ_ALWAYS;
9197                 block->bbJumpDest = callBlock;
9198                 block->bbJumpDest->bbRefs++;
9199
9200                 /* The new block will inherit this block's weight */
9201                 callBlock->setBBWeight(block->bbWeight);
9202                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9203
9204 #ifdef DEBUG
9205                 if (verbose)
9206                 {
9207                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9208                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9209                            XTnum, block->bbNum, callBlock->bbNum);
9210                 }
9211 #endif
9212
9213 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9214
9215                 callBlock             = block;
9216                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9217
9218 #ifdef DEBUG
9219                 if (verbose)
9220                 {
9221                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9222                            "BBJ_CALLFINALLY block\n",
9223                            XTnum, callBlock->bbNum);
9224                 }
9225 #endif
9226
9227 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9228             }
9229             else
9230             {
9231                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9232                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9233                 // a 'finally'), or the step block is the return from a catch.
9234                 //
9235                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9236                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9237                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9238                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9239                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9240                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9241                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9242                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9243                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9244                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9245                 // stack walks.)
9246
9247                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9248
9249 #if FEATURE_EH_CALLFINALLY_THUNKS
9250                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9251                 {
9252                     // Need to create another step block in the 'try' region that will actually branch to the
9253                     // call-to-finally thunk.
9254                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9255                     step->bbJumpDest  = step2;
9256                     step->bbJumpDest->bbRefs++;
9257                     step2->setBBWeight(block->bbWeight);
9258                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9259
9260 #ifdef DEBUG
9261                     if (verbose)
9262                     {
9263                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9264                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9265                                XTnum, step->bbNum, step2->bbNum);
9266                     }
9267 #endif
9268
9269                     step = step2;
9270                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9271                 }
9272 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9273
9274 #if FEATURE_EH_CALLFINALLY_THUNKS
9275                 unsigned callFinallyTryIndex =
9276                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9277                 unsigned callFinallyHndIndex =
9278                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9279 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9280                 unsigned callFinallyTryIndex = XTnum + 1;
9281                 unsigned callFinallyHndIndex = 0; // don't care
9282 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9283
9284                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9285                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9286                                               // finally in the chain)
9287                 step->bbJumpDest->bbRefs++;
9288
9289 #if defined(_TARGET_ARM_)
9290                 if (stepType == ST_FinallyReturn)
9291                 {
9292                     assert(step->bbJumpKind == BBJ_ALWAYS);
9293                     // Mark the target of a finally return
9294                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9295                 }
9296 #endif // defined(_TARGET_ARM_)
9297
9298                 /* The new block will inherit this block's weight */
9299                 callBlock->setBBWeight(block->bbWeight);
9300                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9301
9302 #ifdef DEBUG
9303                 if (verbose)
9304                 {
9305                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9306                            "BB%02u\n",
9307                            XTnum, callBlock->bbNum);
9308                 }
9309 #endif
9310             }
9311
9312             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9313             stepType = ST_FinallyReturn;
9314
9315             /* The new block will inherit this block's weight */
9316             step->setBBWeight(block->bbWeight);
9317             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9318
9319 #ifdef DEBUG
9320             if (verbose)
9321             {
9322                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9323                        "block BB%02u\n",
9324                        XTnum, step->bbNum);
9325             }
9326 #endif
9327
9328             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9329
9330             invalidatePreds = true;
9331         }
9332         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9333                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9334         {
9335             // We are jumping out of a catch-protected try.
9336             //
9337             // If we are returning from a call to a finally, then we must have a step block within a try
9338             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9339             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9340             // and invoke the appropriate catch.
9341             //
9342             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9343             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9344             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9345             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9346             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9347             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9348             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9349             // For example:
9350             //
9351             // try {
9352             //    try {
9353             //       // something here raises ThreadAbortException
9354             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9355             //    } catch (Exception) {
9356             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9357             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9358             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9359             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9360             //       // need to do this transformation if the current EH block is a try/catch that catches
9361             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9362             //       // information, so currently we do it for all catch types.
9363             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9364             //    }
9365             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9366             // } catch (ThreadAbortException) {
9367             // }
9368             // LABEL_1:
9369             //
9370             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9371             // compiler.
9372
9373             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9374             {
9375                 BasicBlock* catchStep;
9376
9377                 assert(step);
9378
9379                 if (stepType == ST_FinallyReturn)
9380                 {
9381                     assert(step->bbJumpKind == BBJ_ALWAYS);
9382                 }
9383                 else
9384                 {
9385                     assert(stepType == ST_Catch);
9386                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9387                 }
9388
9389                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9390                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9391                 step->bbJumpDest = catchStep;
9392                 step->bbJumpDest->bbRefs++;
9393
9394 #if defined(_TARGET_ARM_)
9395                 if (stepType == ST_FinallyReturn)
9396                 {
9397                     // Mark the target of a finally return
9398                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9399                 }
9400 #endif // defined(_TARGET_ARM_)
9401
9402                 /* The new block will inherit this block's weight */
9403                 catchStep->setBBWeight(block->bbWeight);
9404                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9405
9406 #ifdef DEBUG
9407                 if (verbose)
9408                 {
9409                     if (stepType == ST_FinallyReturn)
9410                     {
9411                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9412                                "BBJ_ALWAYS block BB%02u\n",
9413                                XTnum, catchStep->bbNum);
9414                     }
9415                     else
9416                     {
9417                         assert(stepType == ST_Catch);
9418                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9419                                "BBJ_ALWAYS block BB%02u\n",
9420                                XTnum, catchStep->bbNum);
9421                     }
9422                 }
9423 #endif // DEBUG
9424
9425                 /* This block is the new step */
9426                 step     = catchStep;
9427                 stepType = ST_Try;
9428
9429                 invalidatePreds = true;
9430             }
9431         }
9432     }
9433
9434     if (step == nullptr)
9435     {
9436         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9437
9438 #ifdef DEBUG
9439         if (verbose)
9440         {
9441             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9442                    "block BB%02u to BBJ_ALWAYS\n",
9443                    block->bbNum);
9444         }
9445 #endif
9446     }
9447     else
9448     {
9449         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9450
9451 #if defined(_TARGET_ARM_)
9452         if (stepType == ST_FinallyReturn)
9453         {
9454             assert(step->bbJumpKind == BBJ_ALWAYS);
9455             // Mark the target of a finally return
9456             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9457         }
9458 #endif // defined(_TARGET_ARM_)
9459
9460 #ifdef DEBUG
9461         if (verbose)
9462         {
9463             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9464         }
9465 #endif
9466
9467         // Queue up the jump target for importing
9468
9469         impImportBlockPending(leaveTarget);
9470     }
9471
9472     if (invalidatePreds && fgComputePredsDone)
9473     {
9474         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9475         fgRemovePreds();
9476     }
9477
9478 #ifdef DEBUG
9479     fgVerifyHandlerTab();
9480
9481     if (verbose)
9482     {
9483         printf("\nAfter import CEE_LEAVE:\n");
9484         fgDispBasicBlocks();
9485         fgDispHandlerTab();
9486     }
9487 #endif // DEBUG
9488 }
9489
9490 #endif // FEATURE_EH_FUNCLETS
9491
9492 /*****************************************************************************/
9493 // This is called when reimporting a leave block. It resets the JumpKind,
9494 // JumpDest, and bbNext to the original values
9495
9496 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9497 {
9498 #if FEATURE_EH_FUNCLETS
9499     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9500     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9501     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9502     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9503     // only predecessor are also considered orphans and attempted to be deleted.
9504     //
9505     //  try  {
9506     //     ....
9507     //     try
9508     //     {
9509     //         ....
9510     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9511     //     } finally { }
9512     //  } finally { }
9513     //  OUTSIDE:
9514     //
9515     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9516     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9517     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9518     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9519     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9520     // will be treated as pair and handled correctly.
9521     if (block->bbJumpKind == BBJ_CALLFINALLY)
9522     {
9523         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9524         dupBlock->bbFlags    = block->bbFlags;
9525         dupBlock->bbJumpDest = block->bbJumpDest;
9526         dupBlock->copyEHRegion(block);
9527         dupBlock->bbCatchTyp = block->bbCatchTyp;
9528
9529         // Mark this block as
9530         //  a) not referenced by any other block to make sure that it gets deleted
9531         //  b) weight zero
9532         //  c) prevent from being imported
9533         //  d) as internal
9534         //  e) as rarely run
9535         dupBlock->bbRefs   = 0;
9536         dupBlock->bbWeight = 0;
9537         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9538
9539         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9540         // will be next to each other.
9541         fgInsertBBafter(block, dupBlock);
9542
9543 #ifdef DEBUG
9544         if (verbose)
9545         {
9546             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9547         }
9548 #endif
9549     }
9550 #endif // FEATURE_EH_FUNCLETS
9551
9552     block->bbJumpKind = BBJ_LEAVE;
9553     fgInitBBLookup();
9554     block->bbJumpDest = fgLookupBB(jmpAddr);
9555
9556     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9557     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9558     // reason we don't want to remove the block at this point is that if we call
9559     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9560     // added and the linked list length will be different than fgBBcount.
9561 }
9562
9563 /*****************************************************************************/
9564 // Get the first non-prefix opcode. Used for verification of valid combinations
9565 // of prefixes and actual opcodes.
9566
9567 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9568 {
9569     while (codeAddr < codeEndp)
9570     {
9571         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9572         codeAddr += sizeof(__int8);
9573
9574         if (opcode == CEE_PREFIX1)
9575         {
9576             if (codeAddr >= codeEndp)
9577             {
9578                 break;
9579             }
9580             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9581             codeAddr += sizeof(__int8);
9582         }
9583
9584         switch (opcode)
9585         {
9586             case CEE_UNALIGNED:
9587             case CEE_VOLATILE:
9588             case CEE_TAILCALL:
9589             case CEE_CONSTRAINED:
9590             case CEE_READONLY:
9591                 break;
9592             default:
9593                 return opcode;
9594         }
9595
9596         codeAddr += opcodeSizes[opcode];
9597     }
9598
9599     return CEE_ILLEGAL;
9600 }
9601
9602 /*****************************************************************************/
9603 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9604
9605 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9606 {
9607     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9608
9609     if (!(
9610             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9611             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9612             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9613             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9614             // volatile. prefix is allowed with the ldsfld and stsfld
9615             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9616     {
9617         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9618     }
9619 }
9620
9621 /*****************************************************************************/
9622
9623 #ifdef DEBUG
9624
9625 #undef RETURN // undef contracts RETURN macro
9626
9627 enum controlFlow_t
9628 {
9629     NEXT,
9630     CALL,
9631     RETURN,
9632     THROW,
9633     BRANCH,
9634     COND_BRANCH,
9635     BREAK,
9636     PHI,
9637     META,
9638 };
9639
9640 const static controlFlow_t controlFlow[] = {
9641 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9642 #include "opcode.def"
9643 #undef OPDEF
9644 };
9645
9646 #endif // DEBUG
9647
9648 /*****************************************************************************
9649  *  Determine the result type of an arithemetic operation
9650  *  On 64-bit inserts upcasts when native int is mixed with int32
9651  */
9652 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9653 {
9654     var_types type = TYP_UNDEF;
9655     GenTree*  op1  = *pOp1;
9656     GenTree*  op2  = *pOp2;
9657
9658     // Arithemetic operations are generally only allowed with
9659     // primitive types, but certain operations are allowed
9660     // with byrefs
9661
9662     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9663     {
9664         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9665         {
9666             // byref1-byref2 => gives a native int
9667             type = TYP_I_IMPL;
9668         }
9669         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9670         {
9671             // [native] int - byref => gives a native int
9672
9673             //
9674             // The reason is that it is possible, in managed C++,
9675             // to have a tree like this:
9676             //
9677             //              -
9678             //             / \
9679             //            /   \
9680             //           /     \
9681             //          /       \
9682             // const(h) int     addr byref
9683             //
9684             // <BUGNUM> VSW 318822 </BUGNUM>
9685             //
9686             // So here we decide to make the resulting type to be a native int.
9687             CLANG_FORMAT_COMMENT_ANCHOR;
9688
9689 #ifdef _TARGET_64BIT_
9690             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9691             {
9692                 // insert an explicit upcast
9693                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9694             }
9695 #endif // _TARGET_64BIT_
9696
9697             type = TYP_I_IMPL;
9698         }
9699         else
9700         {
9701             // byref - [native] int => gives a byref
9702             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9703
9704 #ifdef _TARGET_64BIT_
9705             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9706             {
9707                 // insert an explicit upcast
9708                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9709             }
9710 #endif // _TARGET_64BIT_
9711
9712             type = TYP_BYREF;
9713         }
9714     }
9715     else if ((oper == GT_ADD) &&
9716              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9717     {
9718         // byref + [native] int => gives a byref
9719         // (or)
9720         // [native] int + byref => gives a byref
9721
9722         // only one can be a byref : byref op byref not allowed
9723         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9724         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9725
9726 #ifdef _TARGET_64BIT_
9727         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9728         {
9729             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9730             {
9731                 // insert an explicit upcast
9732                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9733             }
9734         }
9735         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9736         {
9737             // insert an explicit upcast
9738             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9739         }
9740 #endif // _TARGET_64BIT_
9741
9742         type = TYP_BYREF;
9743     }
9744 #ifdef _TARGET_64BIT_
9745     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9746     {
9747         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9748
9749         // int + long => gives long
9750         // long + int => gives long
9751         // we get this because in the IL the long isn't Int64, it's just IntPtr
9752
9753         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9754         {
9755             // insert an explicit upcast
9756             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9757         }
9758         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9759         {
9760             // insert an explicit upcast
9761             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9762         }
9763
9764         type = TYP_I_IMPL;
9765     }
9766 #else  // 32-bit TARGET
9767     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9768     {
9769         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9770
9771         // int + long => gives long
9772         // long + int => gives long
9773
9774         type = TYP_LONG;
9775     }
9776 #endif // _TARGET_64BIT_
9777     else
9778     {
9779         // int + int => gives an int
9780         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9781
9782         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9783                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9784
9785         type = genActualType(op1->gtType);
9786
9787 #if FEATURE_X87_DOUBLES
9788
9789         // For x87, since we only have 1 size of registers, prefer double
9790         // For everybody else, be more precise
9791         if (type == TYP_FLOAT)
9792             type = TYP_DOUBLE;
9793
9794 #else // !FEATURE_X87_DOUBLES
9795
9796         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9797         // Otherwise, turn floats into doubles
9798         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9799         {
9800             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9801             type = TYP_DOUBLE;
9802         }
9803
9804 #endif // FEATURE_X87_DOUBLES
9805     }
9806
9807 #if FEATURE_X87_DOUBLES
9808     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9809 #else  // FEATURE_X87_DOUBLES
9810     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9811 #endif // FEATURE_X87_DOUBLES
9812
9813     return type;
9814 }
9815
9816 //------------------------------------------------------------------------
9817 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9818 //
9819 // Arguments:
9820 //   op1 - value to cast
9821 //   pResolvedToken - resolved token for type to cast to
9822 //   isCastClass - true if this is a castclass, false if isinst
9823 //
9824 // Return Value:
9825 //   tree representing optimized cast, or null if no optimization possible
9826
9827 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9828 {
9829     assert(op1->TypeGet() == TYP_REF);
9830
9831     // Don't optimize for minopts or debug codegen.
9832     if (opts.compDbgCode || opts.MinOpts())
9833     {
9834         return nullptr;
9835     }
9836
9837     // See what we know about the type of the object being cast.
9838     bool                 isExact   = false;
9839     bool                 isNonNull = false;
9840     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9841     GenTree*             optResult = nullptr;
9842
9843     if (fromClass != nullptr)
9844     {
9845         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9846         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9847                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9848                 info.compCompHnd->getClassName(toClass));
9849
9850         // Perhaps we know if the cast will succeed or fail.
9851         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9852
9853         if (castResult == TypeCompareState::Must)
9854         {
9855             // Cast will succeed, result is simply op1.
9856             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9857             return op1;
9858         }
9859         else if (castResult == TypeCompareState::MustNot)
9860         {
9861             // See if we can sharpen exactness by looking for final classes
9862             if (!isExact)
9863             {
9864                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9865                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9866                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9867                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9868             }
9869
9870             // Cast to exact type will fail. Handle case where we have
9871             // an exact type (that is, fromClass is not a subtype)
9872             // and we're not going to throw on failure.
9873             if (isExact && !isCastClass)
9874             {
9875                 JITDUMP("Cast will fail, optimizing to return null\n");
9876                 GenTree* result = gtNewIconNode(0, TYP_REF);
9877
9878                 // If the cast was fed by a box, we can remove that too.
9879                 if (op1->IsBoxedValue())
9880                 {
9881                     JITDUMP("Also removing upstream box\n");
9882                     gtTryRemoveBoxUpstreamEffects(op1);
9883                 }
9884
9885                 return result;
9886             }
9887             else if (isExact)
9888             {
9889                 JITDUMP("Not optimizing failing castclass (yet)\n");
9890             }
9891             else
9892             {
9893                 JITDUMP("Can't optimize since fromClass is inexact\n");
9894             }
9895         }
9896         else
9897         {
9898             JITDUMP("Result of cast unknown, must generate runtime test\n");
9899         }
9900     }
9901     else
9902     {
9903         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9904     }
9905
9906     return nullptr;
9907 }
9908
9909 //------------------------------------------------------------------------
9910 // impCastClassOrIsInstToTree: build and import castclass/isinst
9911 //
9912 // Arguments:
9913 //   op1 - value to cast
9914 //   op2 - type handle for type to cast to
9915 //   pResolvedToken - resolved token from the cast operation
9916 //   isCastClass - true if this is castclass, false means isinst
9917 //
9918 // Return Value:
9919 //   Tree representing the cast
9920 //
9921 // Notes:
9922 //   May expand into a series of runtime checks or a helper call.
9923
9924 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
9925                                               GenTree*                op2,
9926                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
9927                                               bool                    isCastClass)
9928 {
9929     assert(op1->TypeGet() == TYP_REF);
9930
9931     // Optimistically assume the jit should expand this as an inline test
9932     bool shouldExpandInline = true;
9933
9934     // Profitability check.
9935     //
9936     // Don't bother with inline expansion when jit is trying to
9937     // generate code quickly, or the cast is in code that won't run very
9938     // often, or the method already is pretty big.
9939     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9940     {
9941         // not worth the code expansion if jitting fast or in a rarely run block
9942         shouldExpandInline = false;
9943     }
9944     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9945     {
9946         // not worth creating an untracked local variable
9947         shouldExpandInline = false;
9948     }
9949
9950     // Pessimistically assume the jit cannot expand this as an inline test
9951     bool                  canExpandInline = false;
9952     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9953
9954     // Legality check.
9955     //
9956     // Not all classclass/isinst operations can be inline expanded.
9957     // Check legality only if an inline expansion is desirable.
9958     if (shouldExpandInline)
9959     {
9960         if (isCastClass)
9961         {
9962             // Jit can only inline expand the normal CHKCASTCLASS helper.
9963             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9964         }
9965         else
9966         {
9967             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9968             {
9969                 // Check the class attributes.
9970                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9971
9972                 // If the class is final and is not marshal byref or
9973                 // contextful, the jit can expand the IsInst check inline.
9974                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9975                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9976             }
9977         }
9978     }
9979
9980     const bool expandInline = canExpandInline && shouldExpandInline;
9981
9982     if (!expandInline)
9983     {
9984         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9985                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9986
9987         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9988         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9989         //
9990         op2->gtFlags |= GTF_DONT_CSE;
9991
9992         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9993     }
9994
9995     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9996
9997     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9998
9999     GenTree* temp;
10000     GenTree* condMT;
10001     //
10002     // expand the methodtable match:
10003     //
10004     //  condMT ==>   GT_NE
10005     //               /    \
10006     //           GT_IND   op2 (typically CNS_INT)
10007     //              |
10008     //           op1Copy
10009     //
10010
10011     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10012     //
10013     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10014     //
10015     // op1 is now known to be a non-complex tree
10016     // thus we can use gtClone(op1) from now on
10017     //
10018
10019     GenTree* op2Var = op2;
10020     if (isCastClass)
10021     {
10022         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10023         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10024     }
10025     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10026     temp->gtFlags |= GTF_EXCEPT;
10027     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10028
10029     GenTree* condNull;
10030     //
10031     // expand the null check:
10032     //
10033     //  condNull ==>   GT_EQ
10034     //                 /    \
10035     //             op1Copy CNS_INT
10036     //                      null
10037     //
10038     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10039
10040     //
10041     // expand the true and false trees for the condMT
10042     //
10043     GenTree* condFalse = gtClone(op1);
10044     GenTree* condTrue;
10045     if (isCastClass)
10046     {
10047         //
10048         // use the special helper that skips the cases checked by our inlined cast
10049         //
10050         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10051
10052         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10053     }
10054     else
10055     {
10056         condTrue = gtNewIconNode(0, TYP_REF);
10057     }
10058
10059 #define USE_QMARK_TREES
10060
10061 #ifdef USE_QMARK_TREES
10062     GenTree* qmarkMT;
10063     //
10064     // Generate first QMARK - COLON tree
10065     //
10066     //  qmarkMT ==>   GT_QMARK
10067     //                 /     \
10068     //            condMT   GT_COLON
10069     //                      /     \
10070     //                condFalse  condTrue
10071     //
10072     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10073     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10074     condMT->gtFlags |= GTF_RELOP_QMARK;
10075
10076     GenTree* qmarkNull;
10077     //
10078     // Generate second QMARK - COLON tree
10079     //
10080     //  qmarkNull ==>  GT_QMARK
10081     //                 /     \
10082     //           condNull  GT_COLON
10083     //                      /     \
10084     //                qmarkMT   op1Copy
10085     //
10086     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10087     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10088     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10089     condNull->gtFlags |= GTF_RELOP_QMARK;
10090
10091     // Make QMark node a top level node by spilling it.
10092     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10093     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10094
10095     // TODO: Is it possible op1 has a better type?
10096     lvaSetClass(tmp, pResolvedToken->hClass);
10097     return gtNewLclvNode(tmp, TYP_REF);
10098 #endif
10099 }
10100
10101 #ifndef DEBUG
10102 #define assertImp(cond) ((void)0)
10103 #else
10104 #define assertImp(cond)                                                                                                \
10105     do                                                                                                                 \
10106     {                                                                                                                  \
10107         if (!(cond))                                                                                                   \
10108         {                                                                                                              \
10109             const int cchAssertImpBuf = 600;                                                                           \
10110             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10111             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10112                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10113                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10114                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10115             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10116         }                                                                                                              \
10117     } while (0)
10118 #endif // DEBUG
10119
10120 #ifdef _PREFAST_
10121 #pragma warning(push)
10122 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10123 #endif
10124 /*****************************************************************************
10125  *  Import the instr for the given basic block
10126  */
10127 void Compiler::impImportBlockCode(BasicBlock* block)
10128 {
10129 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10130
10131 #ifdef DEBUG
10132
10133     if (verbose)
10134     {
10135         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10136     }
10137 #endif
10138
10139     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10140     IL_OFFSET nxtStmtOffs;
10141
10142     GenTree*                     arrayNodeFrom;
10143     GenTree*                     arrayNodeTo;
10144     GenTree*                     arrayNodeToIndex;
10145     CorInfoHelpFunc              helper;
10146     CorInfoIsAccessAllowedResult accessAllowedResult;
10147     CORINFO_HELPER_DESC          calloutHelper;
10148     const BYTE*                  lastLoadToken = nullptr;
10149
10150     // reject cyclic constraints
10151     if (tiVerificationNeeded)
10152     {
10153         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10154         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10155     }
10156
10157     /* Get the tree list started */
10158
10159     impBeginTreeList();
10160
10161     /* Walk the opcodes that comprise the basic block */
10162
10163     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10164     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10165
10166     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10167     IL_OFFSET lastSpillOffs = opcodeOffs;
10168
10169     signed jmpDist;
10170
10171     /* remember the start of the delegate creation sequence (used for verification) */
10172     const BYTE* delegateCreateStart = nullptr;
10173
10174     int  prefixFlags = 0;
10175     bool explicitTailCall, constraintCall, readonlyCall;
10176
10177     typeInfo tiRetVal;
10178
10179     unsigned numArgs = info.compArgsCount;
10180
10181     /* Now process all the opcodes in the block */
10182
10183     var_types callTyp    = TYP_COUNT;
10184     OPCODE    prevOpcode = CEE_ILLEGAL;
10185
10186     if (block->bbCatchTyp)
10187     {
10188         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10189         {
10190             impCurStmtOffsSet(block->bbCodeOffs);
10191         }
10192
10193         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10194         // to a temp. This is a trade off for code simplicity
10195         impSpillSpecialSideEff();
10196     }
10197
10198     while (codeAddr < codeEndp)
10199     {
10200         bool                   usingReadyToRunHelper = false;
10201         CORINFO_RESOLVED_TOKEN resolvedToken;
10202         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10203         CORINFO_CALL_INFO      callInfo;
10204         CORINFO_FIELD_INFO     fieldInfo;
10205
10206         tiRetVal = typeInfo(); // Default type info
10207
10208         //---------------------------------------------------------------------
10209
10210         /* We need to restrict the max tree depth as many of the Compiler
10211            functions are recursive. We do this by spilling the stack */
10212
10213         if (verCurrentState.esStackDepth)
10214         {
10215             /* Has it been a while since we last saw a non-empty stack (which
10216                guarantees that the tree depth isnt accumulating. */
10217
10218             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10219             {
10220                 impSpillStackEnsure();
10221                 lastSpillOffs = opcodeOffs;
10222             }
10223         }
10224         else
10225         {
10226             lastSpillOffs   = opcodeOffs;
10227             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10228         }
10229
10230         /* Compute the current instr offset */
10231
10232         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10233
10234 #ifndef DEBUG
10235         if (opts.compDbgInfo)
10236 #endif
10237         {
10238             if (!compIsForInlining())
10239             {
10240                 nxtStmtOffs =
10241                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10242
10243                 /* Have we reached the next stmt boundary ? */
10244
10245                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10246                 {
10247                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10248
10249                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10250                     {
10251                         /* We need to provide accurate IP-mapping at this point.
10252                            So spill anything on the stack so that it will form
10253                            gtStmts with the correct stmt offset noted */
10254
10255                         impSpillStackEnsure(true);
10256                     }
10257
10258                     // Has impCurStmtOffs been reported in any tree?
10259
10260                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10261                     {
10262                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10263                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10264
10265                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10266                     }
10267
10268                     if (impCurStmtOffs == BAD_IL_OFFSET)
10269                     {
10270                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10271                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10272
10273                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10274                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10275                         {
10276                             nxtStmtIndex++;
10277                         }
10278
10279                         /* Go to the new stmt */
10280
10281                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10282
10283                         /* Update the stmt boundary index */
10284
10285                         nxtStmtIndex++;
10286                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10287
10288                         /* Are there any more line# entries after this one? */
10289
10290                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10291                         {
10292                             /* Remember where the next line# starts */
10293
10294                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10295                         }
10296                         else
10297                         {
10298                             /* No more line# entries */
10299
10300                             nxtStmtOffs = BAD_IL_OFFSET;
10301                         }
10302                     }
10303                 }
10304                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10305                          (verCurrentState.esStackDepth == 0))
10306                 {
10307                     /* At stack-empty locations, we have already added the tree to
10308                        the stmt list with the last offset. We just need to update
10309                        impCurStmtOffs
10310                      */
10311
10312                     impCurStmtOffsSet(opcodeOffs);
10313                 }
10314                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10315                          impOpcodeIsCallSiteBoundary(prevOpcode))
10316                 {
10317                     /* Make sure we have a type cached */
10318                     assert(callTyp != TYP_COUNT);
10319
10320                     if (callTyp == TYP_VOID)
10321                     {
10322                         impCurStmtOffsSet(opcodeOffs);
10323                     }
10324                     else if (opts.compDbgCode)
10325                     {
10326                         impSpillStackEnsure(true);
10327                         impCurStmtOffsSet(opcodeOffs);
10328                     }
10329                 }
10330                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10331                 {
10332                     if (opts.compDbgCode)
10333                     {
10334                         impSpillStackEnsure(true);
10335                     }
10336
10337                     impCurStmtOffsSet(opcodeOffs);
10338                 }
10339
10340                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10341                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10342             }
10343         }
10344
10345         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10346         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10347         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10348
10349         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10350         GenTree*        op1           = DUMMY_INIT(NULL);
10351         GenTree*        op2           = DUMMY_INIT(NULL);
10352         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10353         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10354         bool            uns           = DUMMY_INIT(false);
10355         bool            isLocal       = false;
10356
10357         /* Get the next opcode and the size of its parameters */
10358
10359         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10360         codeAddr += sizeof(__int8);
10361
10362 #ifdef DEBUG
10363         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10364         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10365 #endif
10366
10367     DECODE_OPCODE:
10368
10369         // Return if any previous code has caused inline to fail.
10370         if (compDonotInline())
10371         {
10372             return;
10373         }
10374
10375         /* Get the size of additional parameters */
10376
10377         signed int sz = opcodeSizes[opcode];
10378
10379 #ifdef DEBUG
10380         clsHnd  = NO_CLASS_HANDLE;
10381         lclTyp  = TYP_COUNT;
10382         callTyp = TYP_COUNT;
10383
10384         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10385         impCurOpcName = opcodeNames[opcode];
10386
10387         if (verbose && (opcode != CEE_PREFIX1))
10388         {
10389             printf("%s", impCurOpcName);
10390         }
10391
10392         /* Use assertImp() to display the opcode */
10393
10394         op1 = op2 = nullptr;
10395 #endif
10396
10397         /* See what kind of an opcode we have, then */
10398
10399         unsigned mflags   = 0;
10400         unsigned clsFlags = 0;
10401
10402         switch (opcode)
10403         {
10404             unsigned  lclNum;
10405             var_types type;
10406
10407             GenTree*   op3;
10408             genTreeOps oper;
10409             unsigned   size;
10410
10411             int val;
10412
10413             CORINFO_SIG_INFO     sig;
10414             IL_OFFSET            jmpAddr;
10415             bool                 ovfl, unordered, callNode;
10416             bool                 ldstruct;
10417             CORINFO_CLASS_HANDLE tokenType;
10418
10419             union {
10420                 int     intVal;
10421                 float   fltVal;
10422                 __int64 lngVal;
10423                 double  dblVal;
10424             } cval;
10425
10426             case CEE_PREFIX1:
10427                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10428                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10429                 codeAddr += sizeof(__int8);
10430                 goto DECODE_OPCODE;
10431
10432             SPILL_APPEND:
10433
10434                 // We need to call impSpillLclRefs() for a struct type lclVar.
10435                 // This is done for non-block assignments in the handling of stloc.
10436                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10437                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10438                 {
10439                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10440                 }
10441
10442                 /* Append 'op1' to the list of statements */
10443                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10444                 goto DONE_APPEND;
10445
10446             APPEND:
10447
10448                 /* Append 'op1' to the list of statements */
10449
10450                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10451                 goto DONE_APPEND;
10452
10453             DONE_APPEND:
10454
10455 #ifdef DEBUG
10456                 // Remember at which BC offset the tree was finished
10457                 impNoteLastILoffs();
10458 #endif
10459                 break;
10460
10461             case CEE_LDNULL:
10462                 impPushNullObjRefOnStack();
10463                 break;
10464
10465             case CEE_LDC_I4_M1:
10466             case CEE_LDC_I4_0:
10467             case CEE_LDC_I4_1:
10468             case CEE_LDC_I4_2:
10469             case CEE_LDC_I4_3:
10470             case CEE_LDC_I4_4:
10471             case CEE_LDC_I4_5:
10472             case CEE_LDC_I4_6:
10473             case CEE_LDC_I4_7:
10474             case CEE_LDC_I4_8:
10475                 cval.intVal = (opcode - CEE_LDC_I4_0);
10476                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10477                 goto PUSH_I4CON;
10478
10479             case CEE_LDC_I4_S:
10480                 cval.intVal = getI1LittleEndian(codeAddr);
10481                 goto PUSH_I4CON;
10482             case CEE_LDC_I4:
10483                 cval.intVal = getI4LittleEndian(codeAddr);
10484                 goto PUSH_I4CON;
10485             PUSH_I4CON:
10486                 JITDUMP(" %d", cval.intVal);
10487                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10488                 break;
10489
10490             case CEE_LDC_I8:
10491                 cval.lngVal = getI8LittleEndian(codeAddr);
10492                 JITDUMP(" 0x%016llx", cval.lngVal);
10493                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10494                 break;
10495
10496             case CEE_LDC_R8:
10497                 cval.dblVal = getR8LittleEndian(codeAddr);
10498                 JITDUMP(" %#.17g", cval.dblVal);
10499                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10500                 break;
10501
10502             case CEE_LDC_R4:
10503                 cval.dblVal = getR4LittleEndian(codeAddr);
10504                 JITDUMP(" %#.17g", cval.dblVal);
10505                 {
10506                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10507 #if !FEATURE_X87_DOUBLES
10508                     // X87 stack doesn't differentiate between float/double
10509                     // so R4 is treated as R8, but everybody else does
10510                     cnsOp->gtType = TYP_FLOAT;
10511 #endif // FEATURE_X87_DOUBLES
10512                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10513                 }
10514                 break;
10515
10516             case CEE_LDSTR:
10517
10518                 if (compIsForInlining())
10519                 {
10520                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10521                     {
10522                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10523                         return;
10524                     }
10525                 }
10526
10527                 val = getU4LittleEndian(codeAddr);
10528                 JITDUMP(" %08X", val);
10529                 if (tiVerificationNeeded)
10530                 {
10531                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10532                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10533                 }
10534                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10535
10536                 break;
10537
10538             case CEE_LDARG:
10539                 lclNum = getU2LittleEndian(codeAddr);
10540                 JITDUMP(" %u", lclNum);
10541                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10542                 break;
10543
10544             case CEE_LDARG_S:
10545                 lclNum = getU1LittleEndian(codeAddr);
10546                 JITDUMP(" %u", lclNum);
10547                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10548                 break;
10549
10550             case CEE_LDARG_0:
10551             case CEE_LDARG_1:
10552             case CEE_LDARG_2:
10553             case CEE_LDARG_3:
10554                 lclNum = (opcode - CEE_LDARG_0);
10555                 assert(lclNum >= 0 && lclNum < 4);
10556                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10557                 break;
10558
10559             case CEE_LDLOC:
10560                 lclNum = getU2LittleEndian(codeAddr);
10561                 JITDUMP(" %u", lclNum);
10562                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10563                 break;
10564
10565             case CEE_LDLOC_S:
10566                 lclNum = getU1LittleEndian(codeAddr);
10567                 JITDUMP(" %u", lclNum);
10568                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10569                 break;
10570
10571             case CEE_LDLOC_0:
10572             case CEE_LDLOC_1:
10573             case CEE_LDLOC_2:
10574             case CEE_LDLOC_3:
10575                 lclNum = (opcode - CEE_LDLOC_0);
10576                 assert(lclNum >= 0 && lclNum < 4);
10577                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10578                 break;
10579
10580             case CEE_STARG:
10581                 lclNum = getU2LittleEndian(codeAddr);
10582                 goto STARG;
10583
10584             case CEE_STARG_S:
10585                 lclNum = getU1LittleEndian(codeAddr);
10586             STARG:
10587                 JITDUMP(" %u", lclNum);
10588
10589                 if (tiVerificationNeeded)
10590                 {
10591                     Verify(lclNum < info.compILargsCount, "bad arg num");
10592                 }
10593
10594                 if (compIsForInlining())
10595                 {
10596                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10597                     noway_assert(op1->gtOper == GT_LCL_VAR);
10598                     lclNum = op1->AsLclVar()->gtLclNum;
10599
10600                     goto VAR_ST_VALID;
10601                 }
10602
10603                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10604                 assertImp(lclNum < numArgs);
10605
10606                 if (lclNum == info.compThisArg)
10607                 {
10608                     lclNum = lvaArg0Var;
10609                 }
10610
10611                 // We should have seen this arg write in the prescan
10612                 assert(lvaTable[lclNum].lvHasILStoreOp);
10613
10614                 if (tiVerificationNeeded)
10615                 {
10616                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10617                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10618                            "type mismatch");
10619
10620                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10621                     {
10622                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10623                     }
10624                 }
10625
10626                 goto VAR_ST;
10627
10628             case CEE_STLOC:
10629                 lclNum  = getU2LittleEndian(codeAddr);
10630                 isLocal = true;
10631                 JITDUMP(" %u", lclNum);
10632                 goto LOC_ST;
10633
10634             case CEE_STLOC_S:
10635                 lclNum  = getU1LittleEndian(codeAddr);
10636                 isLocal = true;
10637                 JITDUMP(" %u", lclNum);
10638                 goto LOC_ST;
10639
10640             case CEE_STLOC_0:
10641             case CEE_STLOC_1:
10642             case CEE_STLOC_2:
10643             case CEE_STLOC_3:
10644                 isLocal = true;
10645                 lclNum  = (opcode - CEE_STLOC_0);
10646                 assert(lclNum >= 0 && lclNum < 4);
10647
10648             LOC_ST:
10649                 if (tiVerificationNeeded)
10650                 {
10651                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10652                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10653                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10654                            "type mismatch");
10655                 }
10656
10657                 if (compIsForInlining())
10658                 {
10659                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10660
10661                     /* Have we allocated a temp for this local? */
10662
10663                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10664
10665                     goto _PopValue;
10666                 }
10667
10668                 lclNum += numArgs;
10669
10670             VAR_ST:
10671
10672                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10673                 {
10674                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10675                     BADCODE("Bad IL");
10676                 }
10677
10678             VAR_ST_VALID:
10679
10680                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10681                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10682
10683                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10684                 {
10685                     lclTyp = lvaGetRealType(lclNum);
10686                 }
10687                 else
10688                 {
10689                     lclTyp = lvaGetActualType(lclNum);
10690                 }
10691
10692             _PopValue:
10693                 /* Pop the value being assigned */
10694
10695                 {
10696                     StackEntry se = impPopStack();
10697                     clsHnd        = se.seTypeInfo.GetClassHandle();
10698                     op1           = se.val;
10699                     tiRetVal      = se.seTypeInfo;
10700                 }
10701
10702 #ifdef FEATURE_SIMD
10703                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10704                 {
10705                     assert(op1->TypeGet() == TYP_STRUCT);
10706                     op1->gtType = lclTyp;
10707                 }
10708 #endif // FEATURE_SIMD
10709
10710                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10711
10712 #ifdef _TARGET_64BIT_
10713                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10714                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10715                 {
10716                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10717                     op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
10718                 }
10719 #endif // _TARGET_64BIT_
10720
10721                 // We had better assign it a value of the correct type
10722                 assertImp(
10723                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10724                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10725                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10726                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10727                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10728                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10729
10730                 /* If op1 is "&var" then its type is the transient "*" and it can
10731                    be used either as TYP_BYREF or TYP_I_IMPL */
10732
10733                 if (op1->IsVarAddr())
10734                 {
10735                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10736
10737                     /* When "&var" is created, we assume it is a byref. If it is
10738                        being assigned to a TYP_I_IMPL var, change the type to
10739                        prevent unnecessary GC info */
10740
10741                     if (genActualType(lclTyp) == TYP_I_IMPL)
10742                     {
10743                         op1->gtType = TYP_I_IMPL;
10744                     }
10745                 }
10746
10747                 // If this is a local and the local is a ref type, see
10748                 // if we can improve type information based on the
10749                 // value being assigned.
10750                 if (isLocal && (lclTyp == TYP_REF))
10751                 {
10752                     // We should have seen a stloc in our IL prescan.
10753                     assert(lvaTable[lclNum].lvHasILStoreOp);
10754
10755                     const bool isSingleILStoreLocal =
10756                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10757
10758                     // Conservative check that there is just one
10759                     // definition that reaches this store.
10760                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10761
10762                     if (isSingleILStoreLocal && hasSingleReachingDef)
10763                     {
10764                         lvaUpdateClass(lclNum, op1, clsHnd);
10765                     }
10766                 }
10767
10768                 /* Filter out simple assignments to itself */
10769
10770                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10771                 {
10772                     if (opts.compDbgCode)
10773                     {
10774                         op1 = gtNewNothingNode();
10775                         goto SPILL_APPEND;
10776                     }
10777                     else
10778                     {
10779                         break;
10780                     }
10781                 }
10782
10783                 /* Create the assignment node */
10784
10785                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10786
10787                 /* If the local is aliased or pinned, we need to spill calls and
10788                    indirections from the stack. */
10789
10790                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10791                     (verCurrentState.esStackDepth > 0))
10792                 {
10793                     impSpillSideEffects(false,
10794                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10795                 }
10796
10797                 /* Spill any refs to the local from the stack */
10798
10799                 impSpillLclRefs(lclNum);
10800
10801 #if !FEATURE_X87_DOUBLES
10802                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10803                 // We insert a cast to the dest 'op2' type
10804                 //
10805                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10806                     varTypeIsFloating(op2->gtType))
10807                 {
10808                     op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
10809                 }
10810 #endif // !FEATURE_X87_DOUBLES
10811
10812                 if (varTypeIsStruct(lclTyp))
10813                 {
10814                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10815                 }
10816                 else
10817                 {
10818                     // The code generator generates GC tracking information
10819                     // based on the RHS of the assignment.  Later the LHS (which is
10820                     // is a BYREF) gets used and the emitter checks that that variable
10821                     // is being tracked.  It is not (since the RHS was an int and did
10822                     // not need tracking).  To keep this assert happy, we change the RHS
10823                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10824                     {
10825                         op1->gtType = TYP_BYREF;
10826                     }
10827                     op1 = gtNewAssignNode(op2, op1);
10828                 }
10829
10830                 goto SPILL_APPEND;
10831
10832             case CEE_LDLOCA:
10833                 lclNum = getU2LittleEndian(codeAddr);
10834                 goto LDLOCA;
10835
10836             case CEE_LDLOCA_S:
10837                 lclNum = getU1LittleEndian(codeAddr);
10838             LDLOCA:
10839                 JITDUMP(" %u", lclNum);
10840                 if (tiVerificationNeeded)
10841                 {
10842                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10843                     Verify(info.compInitMem, "initLocals not set");
10844                 }
10845
10846                 if (compIsForInlining())
10847                 {
10848                     // Get the local type
10849                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10850
10851                     /* Have we allocated a temp for this local? */
10852
10853                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10854
10855                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10856
10857                     goto _PUSH_ADRVAR;
10858                 }
10859
10860                 lclNum += numArgs;
10861                 assertImp(lclNum < info.compLocalsCount);
10862                 goto ADRVAR;
10863
10864             case CEE_LDARGA:
10865                 lclNum = getU2LittleEndian(codeAddr);
10866                 goto LDARGA;
10867
10868             case CEE_LDARGA_S:
10869                 lclNum = getU1LittleEndian(codeAddr);
10870             LDARGA:
10871                 JITDUMP(" %u", lclNum);
10872                 Verify(lclNum < info.compILargsCount, "bad arg num");
10873
10874                 if (compIsForInlining())
10875                 {
10876                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10877                     // followed by a ldfld to load the field.
10878
10879                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10880                     if (op1->gtOper != GT_LCL_VAR)
10881                     {
10882                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10883                         return;
10884                     }
10885
10886                     assert(op1->gtOper == GT_LCL_VAR);
10887
10888                     goto _PUSH_ADRVAR;
10889                 }
10890
10891                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10892                 assertImp(lclNum < numArgs);
10893
10894                 if (lclNum == info.compThisArg)
10895                 {
10896                     lclNum = lvaArg0Var;
10897                 }
10898
10899                 goto ADRVAR;
10900
10901             ADRVAR:
10902
10903                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10904
10905             _PUSH_ADRVAR:
10906                 assert(op1->gtOper == GT_LCL_VAR);
10907
10908                 /* Note that this is supposed to create the transient type "*"
10909                    which may be used as a TYP_I_IMPL. However we catch places
10910                    where it is used as a TYP_I_IMPL and change the node if needed.
10911                    Thus we are pessimistic and may report byrefs in the GC info
10912                    where it was not absolutely needed, but it is safer this way.
10913                  */
10914                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10915
10916                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10917                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10918
10919                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10920                 if (tiVerificationNeeded)
10921                 {
10922                     // Don't allow taking address of uninit this ptr.
10923                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10924                     {
10925                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10926                     }
10927
10928                     if (!tiRetVal.IsByRef())
10929                     {
10930                         tiRetVal.MakeByRef();
10931                     }
10932                     else
10933                     {
10934                         Verify(false, "byref to byref");
10935                     }
10936                 }
10937
10938                 impPushOnStack(op1, tiRetVal);
10939                 break;
10940
10941             case CEE_ARGLIST:
10942
10943                 if (!info.compIsVarArgs)
10944                 {
10945                     BADCODE("arglist in non-vararg method");
10946                 }
10947
10948                 if (tiVerificationNeeded)
10949                 {
10950                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10951                 }
10952                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10953
10954                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10955                    adjusted the arg count cos this is like fetching the last param */
10956                 assertImp(0 < numArgs);
10957                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10958                 lclNum = lvaVarargsHandleArg;
10959                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10960                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10961                 impPushOnStack(op1, tiRetVal);
10962                 break;
10963
10964             case CEE_ENDFINALLY:
10965
10966                 if (compIsForInlining())
10967                 {
10968                     assert(!"Shouldn't have exception handlers in the inliner!");
10969                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10970                     return;
10971                 }
10972
10973                 if (verCurrentState.esStackDepth > 0)
10974                 {
10975                     impEvalSideEffects();
10976                 }
10977
10978                 if (info.compXcptnsCount == 0)
10979                 {
10980                     BADCODE("endfinally outside finally");
10981                 }
10982
10983                 assert(verCurrentState.esStackDepth == 0);
10984
10985                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10986                 goto APPEND;
10987
10988             case CEE_ENDFILTER:
10989
10990                 if (compIsForInlining())
10991                 {
10992                     assert(!"Shouldn't have exception handlers in the inliner!");
10993                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10994                     return;
10995                 }
10996
10997                 block->bbSetRunRarely(); // filters are rare
10998
10999                 if (info.compXcptnsCount == 0)
11000                 {
11001                     BADCODE("endfilter outside filter");
11002                 }
11003
11004                 if (tiVerificationNeeded)
11005                 {
11006                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11007                 }
11008
11009                 op1 = impPopStack().val;
11010                 assertImp(op1->gtType == TYP_INT);
11011                 if (!bbInFilterILRange(block))
11012                 {
11013                     BADCODE("EndFilter outside a filter handler");
11014                 }
11015
11016                 /* Mark current bb as end of filter */
11017
11018                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11019                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11020
11021                 /* Mark catch handler as successor */
11022
11023                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11024                 if (verCurrentState.esStackDepth != 0)
11025                 {
11026                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11027                                                 DEBUGARG(__LINE__));
11028                 }
11029                 goto APPEND;
11030
11031             case CEE_RET:
11032                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11033             RET:
11034                 if (!impReturnInstruction(block, prefixFlags, opcode))
11035                 {
11036                     return; // abort
11037                 }
11038                 else
11039                 {
11040                     break;
11041                 }
11042
11043             case CEE_JMP:
11044
11045                 assert(!compIsForInlining());
11046
11047                 if (tiVerificationNeeded)
11048                 {
11049                     Verify(false, "Invalid opcode: CEE_JMP");
11050                 }
11051
11052                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11053                 {
11054                     /* CEE_JMP does not make sense in some "protected" regions. */
11055
11056                     BADCODE("Jmp not allowed in protected region");
11057                 }
11058
11059                 if (verCurrentState.esStackDepth != 0)
11060                 {
11061                     BADCODE("Stack must be empty after CEE_JMPs");
11062                 }
11063
11064                 _impResolveToken(CORINFO_TOKENKIND_Method);
11065
11066                 JITDUMP(" %08X", resolvedToken.token);
11067
11068                 /* The signature of the target has to be identical to ours.
11069                    At least check that argCnt and returnType match */
11070
11071                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11072                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11073                     sig.retType != info.compMethodInfo->args.retType ||
11074                     sig.callConv != info.compMethodInfo->args.callConv)
11075                 {
11076                     BADCODE("Incompatible target for CEE_JMPs");
11077                 }
11078
11079                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11080
11081                 /* Mark the basic block as being a JUMP instead of RETURN */
11082
11083                 block->bbFlags |= BBF_HAS_JMP;
11084
11085                 /* Set this flag to make sure register arguments have a location assigned
11086                  * even if we don't use them inside the method */
11087
11088                 compJmpOpUsed = true;
11089
11090                 fgNoStructPromotion = true;
11091
11092                 goto APPEND;
11093
11094             case CEE_LDELEMA:
11095                 assertImp(sz == sizeof(unsigned));
11096
11097                 _impResolveToken(CORINFO_TOKENKIND_Class);
11098
11099                 JITDUMP(" %08X", resolvedToken.token);
11100
11101                 ldelemClsHnd = resolvedToken.hClass;
11102
11103                 if (tiVerificationNeeded)
11104                 {
11105                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11106                     typeInfo tiIndex = impStackTop().seTypeInfo;
11107
11108                     // As per ECMA 'index' specified can be either int32 or native int.
11109                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11110
11111                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11112                     Verify(tiArray.IsNullObjRef() ||
11113                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11114                            "bad array");
11115
11116                     tiRetVal = arrayElemType;
11117                     tiRetVal.MakeByRef();
11118                     if (prefixFlags & PREFIX_READONLY)
11119                     {
11120                         tiRetVal.SetIsReadonlyByRef();
11121                     }
11122
11123                     // an array interior pointer is always in the heap
11124                     tiRetVal.SetIsPermanentHomeByRef();
11125                 }
11126
11127                 // If it's a value class array we just do a simple address-of
11128                 if (eeIsValueClass(ldelemClsHnd))
11129                 {
11130                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11131                     if (cit == CORINFO_TYPE_UNDEF)
11132                     {
11133                         lclTyp = TYP_STRUCT;
11134                     }
11135                     else
11136                     {
11137                         lclTyp = JITtype2varType(cit);
11138                     }
11139                     goto ARR_LD_POST_VERIFY;
11140                 }
11141
11142                 // Similarly, if its a readonly access, we can do a simple address-of
11143                 // without doing a runtime type-check
11144                 if (prefixFlags & PREFIX_READONLY)
11145                 {
11146                     lclTyp = TYP_REF;
11147                     goto ARR_LD_POST_VERIFY;
11148                 }
11149
11150                 // Otherwise we need the full helper function with run-time type check
11151                 op1 = impTokenToHandle(&resolvedToken);
11152                 if (op1 == nullptr)
11153                 { // compDonotInline()
11154                     return;
11155                 }
11156
11157                 args = gtNewArgList(op1);                      // Type
11158                 args = gtNewListNode(impPopStack().val, args); // index
11159                 args = gtNewListNode(impPopStack().val, args); // array
11160                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11161
11162                 impPushOnStack(op1, tiRetVal);
11163                 break;
11164
11165             // ldelem for reference and value types
11166             case CEE_LDELEM:
11167                 assertImp(sz == sizeof(unsigned));
11168
11169                 _impResolveToken(CORINFO_TOKENKIND_Class);
11170
11171                 JITDUMP(" %08X", resolvedToken.token);
11172
11173                 ldelemClsHnd = resolvedToken.hClass;
11174
11175                 if (tiVerificationNeeded)
11176                 {
11177                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11178                     typeInfo tiIndex = impStackTop().seTypeInfo;
11179
11180                     // As per ECMA 'index' specified can be either int32 or native int.
11181                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11182                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11183
11184                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11185                            "type of array incompatible with type operand");
11186                     tiRetVal.NormaliseForStack();
11187                 }
11188
11189                 // If it's a reference type or generic variable type
11190                 // then just generate code as though it's a ldelem.ref instruction
11191                 if (!eeIsValueClass(ldelemClsHnd))
11192                 {
11193                     lclTyp = TYP_REF;
11194                     opcode = CEE_LDELEM_REF;
11195                 }
11196                 else
11197                 {
11198                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11199                     lclTyp             = JITtype2varType(jitTyp);
11200                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11201                     tiRetVal.NormaliseForStack();
11202                 }
11203                 goto ARR_LD_POST_VERIFY;
11204
11205             case CEE_LDELEM_I1:
11206                 lclTyp = TYP_BYTE;
11207                 goto ARR_LD;
11208             case CEE_LDELEM_I2:
11209                 lclTyp = TYP_SHORT;
11210                 goto ARR_LD;
11211             case CEE_LDELEM_I:
11212                 lclTyp = TYP_I_IMPL;
11213                 goto ARR_LD;
11214
11215             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11216             // and treating it as TYP_INT avoids other asserts.
11217             case CEE_LDELEM_U4:
11218                 lclTyp = TYP_INT;
11219                 goto ARR_LD;
11220
11221             case CEE_LDELEM_I4:
11222                 lclTyp = TYP_INT;
11223                 goto ARR_LD;
11224             case CEE_LDELEM_I8:
11225                 lclTyp = TYP_LONG;
11226                 goto ARR_LD;
11227             case CEE_LDELEM_REF:
11228                 lclTyp = TYP_REF;
11229                 goto ARR_LD;
11230             case CEE_LDELEM_R4:
11231                 lclTyp = TYP_FLOAT;
11232                 goto ARR_LD;
11233             case CEE_LDELEM_R8:
11234                 lclTyp = TYP_DOUBLE;
11235                 goto ARR_LD;
11236             case CEE_LDELEM_U1:
11237                 lclTyp = TYP_UBYTE;
11238                 goto ARR_LD;
11239             case CEE_LDELEM_U2:
11240                 lclTyp = TYP_USHORT;
11241                 goto ARR_LD;
11242
11243             ARR_LD:
11244
11245                 if (tiVerificationNeeded)
11246                 {
11247                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11248                     typeInfo tiIndex = impStackTop().seTypeInfo;
11249
11250                     // As per ECMA 'index' specified can be either int32 or native int.
11251                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11252                     if (tiArray.IsNullObjRef())
11253                     {
11254                         if (lclTyp == TYP_REF)
11255                         { // we will say a deref of a null array yields a null ref
11256                             tiRetVal = typeInfo(TI_NULL);
11257                         }
11258                         else
11259                         {
11260                             tiRetVal = typeInfo(lclTyp);
11261                         }
11262                     }
11263                     else
11264                     {
11265                         tiRetVal             = verGetArrayElemType(tiArray);
11266                         typeInfo arrayElemTi = typeInfo(lclTyp);
11267 #ifdef _TARGET_64BIT_
11268                         if (opcode == CEE_LDELEM_I)
11269                         {
11270                             arrayElemTi = typeInfo::nativeInt();
11271                         }
11272
11273                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11274                         {
11275                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11276                         }
11277                         else
11278 #endif // _TARGET_64BIT_
11279                         {
11280                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11281                         }
11282                     }
11283                     tiRetVal.NormaliseForStack();
11284                 }
11285             ARR_LD_POST_VERIFY:
11286
11287                 /* Pull the index value and array address */
11288                 op2 = impPopStack().val;
11289                 op1 = impPopStack().val;
11290                 assertImp(op1->gtType == TYP_REF);
11291
11292                 /* Check for null pointer - in the inliner case we simply abort */
11293
11294                 if (compIsForInlining())
11295                 {
11296                     if (op1->gtOper == GT_CNS_INT)
11297                     {
11298                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11299                         return;
11300                     }
11301                 }
11302
11303                 op1 = impCheckForNullPointer(op1);
11304
11305                 /* Mark the block as containing an index expression */
11306
11307                 if (op1->gtOper == GT_LCL_VAR)
11308                 {
11309                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11310                     {
11311                         block->bbFlags |= BBF_HAS_IDX_LEN;
11312                         optMethodFlags |= OMF_HAS_ARRAYREF;
11313                     }
11314                 }
11315
11316                 /* Create the index node and push it on the stack */
11317
11318                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11319
11320                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11321
11322                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11323                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11324                 {
11325                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11326
11327                     // remember the element size
11328                     if (lclTyp == TYP_REF)
11329                     {
11330                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11331                     }
11332                     else
11333                     {
11334                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11335                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11336                         {
11337                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11338                         }
11339                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11340                         if (lclTyp == TYP_STRUCT)
11341                         {
11342                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11343                             op1->gtIndex.gtIndElemSize = size;
11344                             op1->gtType                = lclTyp;
11345                         }
11346                     }
11347
11348                     if ((opcode == CEE_LDELEMA) || ldstruct)
11349                     {
11350                         // wrap it in a &
11351                         lclTyp = TYP_BYREF;
11352
11353                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11354                     }
11355                     else
11356                     {
11357                         assert(lclTyp != TYP_STRUCT);
11358                     }
11359                 }
11360
11361                 if (ldstruct)
11362                 {
11363                     // Create an OBJ for the result
11364                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11365                     op1->gtFlags |= GTF_EXCEPT;
11366                 }
11367                 impPushOnStack(op1, tiRetVal);
11368                 break;
11369
11370             // stelem for reference and value types
11371             case CEE_STELEM:
11372
11373                 assertImp(sz == sizeof(unsigned));
11374
11375                 _impResolveToken(CORINFO_TOKENKIND_Class);
11376
11377                 JITDUMP(" %08X", resolvedToken.token);
11378
11379                 stelemClsHnd = resolvedToken.hClass;
11380
11381                 if (tiVerificationNeeded)
11382                 {
11383                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11384                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11385                     typeInfo tiValue = impStackTop().seTypeInfo;
11386
11387                     // As per ECMA 'index' specified can be either int32 or native int.
11388                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11389                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11390
11391                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11392                            "type operand incompatible with array element type");
11393                     arrayElem.NormaliseForStack();
11394                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11395                 }
11396
11397                 // If it's a reference type just behave as though it's a stelem.ref instruction
11398                 if (!eeIsValueClass(stelemClsHnd))
11399                 {
11400                     goto STELEM_REF_POST_VERIFY;
11401                 }
11402
11403                 // Otherwise extract the type
11404                 {
11405                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11406                     lclTyp             = JITtype2varType(jitTyp);
11407                     goto ARR_ST_POST_VERIFY;
11408                 }
11409
11410             case CEE_STELEM_REF:
11411
11412                 if (tiVerificationNeeded)
11413                 {
11414                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11415                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11416                     typeInfo tiValue = impStackTop().seTypeInfo;
11417
11418                     // As per ECMA 'index' specified can be either int32 or native int.
11419                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11420                     Verify(tiValue.IsObjRef(), "bad value");
11421
11422                     // we only check that it is an object referece, The helper does additional checks
11423                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11424                 }
11425
11426             STELEM_REF_POST_VERIFY:
11427
11428                 arrayNodeTo      = impStackTop(2).val;
11429                 arrayNodeToIndex = impStackTop(1).val;
11430                 arrayNodeFrom    = impStackTop().val;
11431
11432                 //
11433                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11434                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11435                 //
11436
11437                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11438                 // This does not need CORINFO_HELP_ARRADDR_ST
11439                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11440                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11441                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11442                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11443                 {
11444                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11445                     lclTyp = TYP_REF;
11446                     goto ARR_ST_POST_VERIFY;
11447                 }
11448
11449                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11450                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11451                 {
11452                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11453                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11454                     lclTyp = TYP_REF;
11455                     goto ARR_ST_POST_VERIFY;
11456                 }
11457
11458                 /* Call a helper function to do the assignment */
11459                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11460
11461                 goto SPILL_APPEND;
11462
11463             case CEE_STELEM_I1:
11464                 lclTyp = TYP_BYTE;
11465                 goto ARR_ST;
11466             case CEE_STELEM_I2:
11467                 lclTyp = TYP_SHORT;
11468                 goto ARR_ST;
11469             case CEE_STELEM_I:
11470                 lclTyp = TYP_I_IMPL;
11471                 goto ARR_ST;
11472             case CEE_STELEM_I4:
11473                 lclTyp = TYP_INT;
11474                 goto ARR_ST;
11475             case CEE_STELEM_I8:
11476                 lclTyp = TYP_LONG;
11477                 goto ARR_ST;
11478             case CEE_STELEM_R4:
11479                 lclTyp = TYP_FLOAT;
11480                 goto ARR_ST;
11481             case CEE_STELEM_R8:
11482                 lclTyp = TYP_DOUBLE;
11483                 goto ARR_ST;
11484
11485             ARR_ST:
11486
11487                 if (tiVerificationNeeded)
11488                 {
11489                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11490                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11491                     typeInfo tiValue = impStackTop().seTypeInfo;
11492
11493                     // As per ECMA 'index' specified can be either int32 or native int.
11494                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11495                     typeInfo arrayElem = typeInfo(lclTyp);
11496 #ifdef _TARGET_64BIT_
11497                     if (opcode == CEE_STELEM_I)
11498                     {
11499                         arrayElem = typeInfo::nativeInt();
11500                     }
11501 #endif // _TARGET_64BIT_
11502                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11503                            "bad array");
11504
11505                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11506                            "bad value");
11507                 }
11508
11509             ARR_ST_POST_VERIFY:
11510                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11511                    range-check, and then assignment. However, codegen currently
11512                    does the range-check before evaluation the RHS-operands. So to
11513                    maintain strict ordering, we spill the stack. */
11514
11515                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11516                 {
11517                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11518                                                    "Strict ordering of exceptions for Array store"));
11519                 }
11520
11521                 /* Pull the new value from the stack */
11522                 op2 = impPopStack().val;
11523
11524                 /* Pull the index value */
11525                 op1 = impPopStack().val;
11526
11527                 /* Pull the array address */
11528                 op3 = impPopStack().val;
11529
11530                 assertImp(op3->gtType == TYP_REF);
11531                 if (op2->IsVarAddr())
11532                 {
11533                     op2->gtType = TYP_I_IMPL;
11534                 }
11535
11536                 op3 = impCheckForNullPointer(op3);
11537
11538                 // Mark the block as containing an index expression
11539
11540                 if (op3->gtOper == GT_LCL_VAR)
11541                 {
11542                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11543                     {
11544                         block->bbFlags |= BBF_HAS_IDX_LEN;
11545                         optMethodFlags |= OMF_HAS_ARRAYREF;
11546                     }
11547                 }
11548
11549                 /* Create the index node */
11550
11551                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11552
11553                 /* Create the assignment node and append it */
11554
11555                 if (lclTyp == TYP_STRUCT)
11556                 {
11557                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11558
11559                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11560                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11561                 }
11562                 if (varTypeIsStruct(op1))
11563                 {
11564                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11565                 }
11566                 else
11567                 {
11568                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11569                     op1 = gtNewAssignNode(op1, op2);
11570                 }
11571
11572                 /* Mark the expression as containing an assignment */
11573
11574                 op1->gtFlags |= GTF_ASG;
11575
11576                 goto SPILL_APPEND;
11577
11578             case CEE_ADD:
11579                 oper = GT_ADD;
11580                 goto MATH_OP2;
11581
11582             case CEE_ADD_OVF:
11583                 uns = false;
11584                 goto ADD_OVF;
11585             case CEE_ADD_OVF_UN:
11586                 uns = true;
11587                 goto ADD_OVF;
11588
11589             ADD_OVF:
11590                 ovfl     = true;
11591                 callNode = false;
11592                 oper     = GT_ADD;
11593                 goto MATH_OP2_FLAGS;
11594
11595             case CEE_SUB:
11596                 oper = GT_SUB;
11597                 goto MATH_OP2;
11598
11599             case CEE_SUB_OVF:
11600                 uns = false;
11601                 goto SUB_OVF;
11602             case CEE_SUB_OVF_UN:
11603                 uns = true;
11604                 goto SUB_OVF;
11605
11606             SUB_OVF:
11607                 ovfl     = true;
11608                 callNode = false;
11609                 oper     = GT_SUB;
11610                 goto MATH_OP2_FLAGS;
11611
11612             case CEE_MUL:
11613                 oper = GT_MUL;
11614                 goto MATH_MAYBE_CALL_NO_OVF;
11615
11616             case CEE_MUL_OVF:
11617                 uns = false;
11618                 goto MUL_OVF;
11619             case CEE_MUL_OVF_UN:
11620                 uns = true;
11621                 goto MUL_OVF;
11622
11623             MUL_OVF:
11624                 ovfl = true;
11625                 oper = GT_MUL;
11626                 goto MATH_MAYBE_CALL_OVF;
11627
11628             // Other binary math operations
11629
11630             case CEE_DIV:
11631                 oper = GT_DIV;
11632                 goto MATH_MAYBE_CALL_NO_OVF;
11633
11634             case CEE_DIV_UN:
11635                 oper = GT_UDIV;
11636                 goto MATH_MAYBE_CALL_NO_OVF;
11637
11638             case CEE_REM:
11639                 oper = GT_MOD;
11640                 goto MATH_MAYBE_CALL_NO_OVF;
11641
11642             case CEE_REM_UN:
11643                 oper = GT_UMOD;
11644                 goto MATH_MAYBE_CALL_NO_OVF;
11645
11646             MATH_MAYBE_CALL_NO_OVF:
11647                 ovfl = false;
11648             MATH_MAYBE_CALL_OVF:
11649                 // Morpher has some complex logic about when to turn different
11650                 // typed nodes on different platforms into helper calls. We
11651                 // need to either duplicate that logic here, or just
11652                 // pessimistically make all the nodes large enough to become
11653                 // call nodes.  Since call nodes aren't that much larger and
11654                 // these opcodes are infrequent enough I chose the latter.
11655                 callNode = true;
11656                 goto MATH_OP2_FLAGS;
11657
11658             case CEE_AND:
11659                 oper = GT_AND;
11660                 goto MATH_OP2;
11661             case CEE_OR:
11662                 oper = GT_OR;
11663                 goto MATH_OP2;
11664             case CEE_XOR:
11665                 oper = GT_XOR;
11666                 goto MATH_OP2;
11667
11668             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11669
11670                 ovfl     = false;
11671                 callNode = false;
11672
11673             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11674
11675                 /* Pull two values and push back the result */
11676
11677                 if (tiVerificationNeeded)
11678                 {
11679                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11680                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11681
11682                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11683                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11684                     {
11685                         Verify(tiOp1.IsNumberType(), "not number");
11686                     }
11687                     else
11688                     {
11689                         Verify(tiOp1.IsIntegerType(), "not integer");
11690                     }
11691
11692                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11693
11694                     tiRetVal = tiOp1;
11695
11696 #ifdef _TARGET_64BIT_
11697                     if (tiOp2.IsNativeIntType())
11698                     {
11699                         tiRetVal = tiOp2;
11700                     }
11701 #endif // _TARGET_64BIT_
11702                 }
11703
11704                 op2 = impPopStack().val;
11705                 op1 = impPopStack().val;
11706
11707 #if !CPU_HAS_FP_SUPPORT
11708                 if (varTypeIsFloating(op1->gtType))
11709                 {
11710                     callNode = true;
11711                 }
11712 #endif
11713                 /* Can't do arithmetic with references */
11714                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11715
11716                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11717                 // if it is in the stack)
11718                 impBashVarAddrsToI(op1, op2);
11719
11720                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11721
11722                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11723
11724                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11725
11726                 if (op2->gtOper == GT_CNS_INT)
11727                 {
11728                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11729                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11730
11731                     {
11732                         impPushOnStack(op1, tiRetVal);
11733                         break;
11734                     }
11735                 }
11736
11737 #if !FEATURE_X87_DOUBLES
11738                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11739                 //
11740                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11741                 {
11742                     if (op1->TypeGet() != type)
11743                     {
11744                         // We insert a cast of op1 to 'type'
11745                         op1 = gtNewCastNode(type, op1, false, type);
11746                     }
11747                     if (op2->TypeGet() != type)
11748                     {
11749                         // We insert a cast of op2 to 'type'
11750                         op2 = gtNewCastNode(type, op2, false, type);
11751                     }
11752                 }
11753 #endif // !FEATURE_X87_DOUBLES
11754
11755 #if SMALL_TREE_NODES
11756                 if (callNode)
11757                 {
11758                     /* These operators can later be transformed into 'GT_CALL' */
11759
11760                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11761 #ifndef _TARGET_ARM_
11762                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11763                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11764                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11765                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11766 #endif
11767                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11768                     // that we'll need to transform into a general large node, but rather specifically
11769                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11770                     // and a CALL is no longer the largest.
11771                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11772                     // than an "if".
11773                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11774                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11775                 }
11776                 else
11777 #endif // SMALL_TREE_NODES
11778                 {
11779                     op1 = gtNewOperNode(oper, type, op1, op2);
11780                 }
11781
11782                 /* Special case: integer/long division may throw an exception */
11783
11784                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11785                 {
11786                     op1->gtFlags |= GTF_EXCEPT;
11787                 }
11788
11789                 if (ovfl)
11790                 {
11791                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11792                     if (ovflType != TYP_UNKNOWN)
11793                     {
11794                         op1->gtType = ovflType;
11795                     }
11796                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11797                     if (uns)
11798                     {
11799                         op1->gtFlags |= GTF_UNSIGNED;
11800                     }
11801                 }
11802
11803                 impPushOnStack(op1, tiRetVal);
11804                 break;
11805
11806             case CEE_SHL:
11807                 oper = GT_LSH;
11808                 goto CEE_SH_OP2;
11809
11810             case CEE_SHR:
11811                 oper = GT_RSH;
11812                 goto CEE_SH_OP2;
11813             case CEE_SHR_UN:
11814                 oper = GT_RSZ;
11815                 goto CEE_SH_OP2;
11816
11817             CEE_SH_OP2:
11818                 if (tiVerificationNeeded)
11819                 {
11820                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11821                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11822                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11823                     tiRetVal = tiVal;
11824                 }
11825                 op2 = impPopStack().val;
11826                 op1 = impPopStack().val; // operand to be shifted
11827                 impBashVarAddrsToI(op1, op2);
11828
11829                 type = genActualType(op1->TypeGet());
11830                 op1  = gtNewOperNode(oper, type, op1, op2);
11831
11832                 impPushOnStack(op1, tiRetVal);
11833                 break;
11834
11835             case CEE_NOT:
11836                 if (tiVerificationNeeded)
11837                 {
11838                     tiRetVal = impStackTop().seTypeInfo;
11839                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11840                 }
11841
11842                 op1 = impPopStack().val;
11843                 impBashVarAddrsToI(op1, nullptr);
11844                 type = genActualType(op1->TypeGet());
11845                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11846                 break;
11847
11848             case CEE_CKFINITE:
11849                 if (tiVerificationNeeded)
11850                 {
11851                     tiRetVal = impStackTop().seTypeInfo;
11852                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11853                 }
11854                 op1  = impPopStack().val;
11855                 type = op1->TypeGet();
11856                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11857                 op1->gtFlags |= GTF_EXCEPT;
11858
11859                 impPushOnStack(op1, tiRetVal);
11860                 break;
11861
11862             case CEE_LEAVE:
11863
11864                 val     = getI4LittleEndian(codeAddr); // jump distance
11865                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11866                 goto LEAVE;
11867
11868             case CEE_LEAVE_S:
11869                 val     = getI1LittleEndian(codeAddr); // jump distance
11870                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11871
11872             LEAVE:
11873
11874                 if (compIsForInlining())
11875                 {
11876                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11877                     return;
11878                 }
11879
11880                 JITDUMP(" %04X", jmpAddr);
11881                 if (block->bbJumpKind != BBJ_LEAVE)
11882                 {
11883                     impResetLeaveBlock(block, jmpAddr);
11884                 }
11885
11886                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11887                 impImportLeave(block);
11888                 impNoteBranchOffs();
11889
11890                 break;
11891
11892             case CEE_BR:
11893             case CEE_BR_S:
11894                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11895
11896                 if (compIsForInlining() && jmpDist == 0)
11897                 {
11898                     break; /* NOP */
11899                 }
11900
11901                 impNoteBranchOffs();
11902                 break;
11903
11904             case CEE_BRTRUE:
11905             case CEE_BRTRUE_S:
11906             case CEE_BRFALSE:
11907             case CEE_BRFALSE_S:
11908
11909                 /* Pop the comparand (now there's a neat term) from the stack */
11910                 if (tiVerificationNeeded)
11911                 {
11912                     typeInfo& tiVal = impStackTop().seTypeInfo;
11913                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11914                            "bad value");
11915                 }
11916
11917                 op1  = impPopStack().val;
11918                 type = op1->TypeGet();
11919
11920                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11921                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11922                 {
11923                     block->bbJumpKind = BBJ_NONE;
11924
11925                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11926                     {
11927                         op1 = gtUnusedValNode(op1);
11928                         goto SPILL_APPEND;
11929                     }
11930                     else
11931                     {
11932                         break;
11933                     }
11934                 }
11935
11936                 if (op1->OperIsCompare())
11937                 {
11938                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11939                     {
11940                         // Flip the sense of the compare
11941
11942                         op1 = gtReverseCond(op1);
11943                     }
11944                 }
11945                 else
11946                 {
11947                     /* We'll compare against an equally-sized integer 0 */
11948                     /* For small types, we always compare against int   */
11949                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11950
11951                     /* Create the comparison operator and try to fold it */
11952
11953                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11954                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11955                 }
11956
11957             // fall through
11958
11959             COND_JUMP:
11960
11961                 /* Fold comparison if we can */
11962
11963                 op1 = gtFoldExpr(op1);
11964
11965                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11966                 /* Don't make any blocks unreachable in import only mode */
11967
11968                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11969                 {
11970                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11971                        unreachable under compDbgCode */
11972                     assert(!opts.compDbgCode);
11973
11974                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11975                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11976                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11977                                                                          // block for the second time
11978
11979                     block->bbJumpKind = foldedJumpKind;
11980 #ifdef DEBUG
11981                     if (verbose)
11982                     {
11983                         if (op1->gtIntCon.gtIconVal)
11984                         {
11985                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11986                                    block->bbJumpDest->bbNum);
11987                         }
11988                         else
11989                         {
11990                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11991                         }
11992                     }
11993 #endif
11994                     break;
11995                 }
11996
11997                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11998
11999                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12000                    in impImportBlock(block). For correct line numbers, spill stack. */
12001
12002                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12003                 {
12004                     impSpillStackEnsure(true);
12005                 }
12006
12007                 goto SPILL_APPEND;
12008
12009             case CEE_CEQ:
12010                 oper = GT_EQ;
12011                 uns  = false;
12012                 goto CMP_2_OPs;
12013             case CEE_CGT_UN:
12014                 oper = GT_GT;
12015                 uns  = true;
12016                 goto CMP_2_OPs;
12017             case CEE_CGT:
12018                 oper = GT_GT;
12019                 uns  = false;
12020                 goto CMP_2_OPs;
12021             case CEE_CLT_UN:
12022                 oper = GT_LT;
12023                 uns  = true;
12024                 goto CMP_2_OPs;
12025             case CEE_CLT:
12026                 oper = GT_LT;
12027                 uns  = false;
12028                 goto CMP_2_OPs;
12029
12030             CMP_2_OPs:
12031                 if (tiVerificationNeeded)
12032                 {
12033                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12034                     tiRetVal = typeInfo(TI_INT);
12035                 }
12036
12037                 op2 = impPopStack().val;
12038                 op1 = impPopStack().val;
12039
12040 #ifdef _TARGET_64BIT_
12041                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12042                 {
12043                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12044                 }
12045                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12046                 {
12047                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12048                 }
12049 #endif // _TARGET_64BIT_
12050
12051                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12052                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12053                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12054
12055                 /* Create the comparison node */
12056
12057                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12058
12059                 /* TODO: setting both flags when only one is appropriate */
12060                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12061                 {
12062                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12063                 }
12064
12065                 // Fold result, if possible.
12066                 op1 = gtFoldExpr(op1);
12067
12068                 impPushOnStack(op1, tiRetVal);
12069                 break;
12070
12071             case CEE_BEQ_S:
12072             case CEE_BEQ:
12073                 oper = GT_EQ;
12074                 goto CMP_2_OPs_AND_BR;
12075
12076             case CEE_BGE_S:
12077             case CEE_BGE:
12078                 oper = GT_GE;
12079                 goto CMP_2_OPs_AND_BR;
12080
12081             case CEE_BGE_UN_S:
12082             case CEE_BGE_UN:
12083                 oper = GT_GE;
12084                 goto CMP_2_OPs_AND_BR_UN;
12085
12086             case CEE_BGT_S:
12087             case CEE_BGT:
12088                 oper = GT_GT;
12089                 goto CMP_2_OPs_AND_BR;
12090
12091             case CEE_BGT_UN_S:
12092             case CEE_BGT_UN:
12093                 oper = GT_GT;
12094                 goto CMP_2_OPs_AND_BR_UN;
12095
12096             case CEE_BLE_S:
12097             case CEE_BLE:
12098                 oper = GT_LE;
12099                 goto CMP_2_OPs_AND_BR;
12100
12101             case CEE_BLE_UN_S:
12102             case CEE_BLE_UN:
12103                 oper = GT_LE;
12104                 goto CMP_2_OPs_AND_BR_UN;
12105
12106             case CEE_BLT_S:
12107             case CEE_BLT:
12108                 oper = GT_LT;
12109                 goto CMP_2_OPs_AND_BR;
12110
12111             case CEE_BLT_UN_S:
12112             case CEE_BLT_UN:
12113                 oper = GT_LT;
12114                 goto CMP_2_OPs_AND_BR_UN;
12115
12116             case CEE_BNE_UN_S:
12117             case CEE_BNE_UN:
12118                 oper = GT_NE;
12119                 goto CMP_2_OPs_AND_BR_UN;
12120
12121             CMP_2_OPs_AND_BR_UN:
12122                 uns       = true;
12123                 unordered = true;
12124                 goto CMP_2_OPs_AND_BR_ALL;
12125             CMP_2_OPs_AND_BR:
12126                 uns       = false;
12127                 unordered = false;
12128                 goto CMP_2_OPs_AND_BR_ALL;
12129             CMP_2_OPs_AND_BR_ALL:
12130
12131                 if (tiVerificationNeeded)
12132                 {
12133                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12134                 }
12135
12136                 /* Pull two values */
12137                 op2 = impPopStack().val;
12138                 op1 = impPopStack().val;
12139
12140 #ifdef _TARGET_64BIT_
12141                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12142                 {
12143                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12144                 }
12145                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12146                 {
12147                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12148                 }
12149 #endif // _TARGET_64BIT_
12150
12151                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12152                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12153                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12154
12155                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12156                 {
12157                     block->bbJumpKind = BBJ_NONE;
12158
12159                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12160                     {
12161                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12162                                                        "Branch to next Optimization, op1 side effect"));
12163                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12164                     }
12165                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12166                     {
12167                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12168                                                        "Branch to next Optimization, op2 side effect"));
12169                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12170                     }
12171
12172 #ifdef DEBUG
12173                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12174                     {
12175                         impNoteLastILoffs();
12176                     }
12177 #endif
12178                     break;
12179                 }
12180 #if !FEATURE_X87_DOUBLES
12181                 // We can generate an compare of different sized floating point op1 and op2
12182                 // We insert a cast
12183                 //
12184                 if (varTypeIsFloating(op1->TypeGet()))
12185                 {
12186                     if (op1->TypeGet() != op2->TypeGet())
12187                     {
12188                         assert(varTypeIsFloating(op2->TypeGet()));
12189
12190                         // say op1=double, op2=float. To avoid loss of precision
12191                         // while comparing, op2 is converted to double and double
12192                         // comparison is done.
12193                         if (op1->TypeGet() == TYP_DOUBLE)
12194                         {
12195                             // We insert a cast of op2 to TYP_DOUBLE
12196                             op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12197                         }
12198                         else if (op2->TypeGet() == TYP_DOUBLE)
12199                         {
12200                             // We insert a cast of op1 to TYP_DOUBLE
12201                             op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12202                         }
12203                     }
12204                 }
12205 #endif // !FEATURE_X87_DOUBLES
12206
12207                 /* Create and append the operator */
12208
12209                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12210
12211                 if (uns)
12212                 {
12213                     op1->gtFlags |= GTF_UNSIGNED;
12214                 }
12215
12216                 if (unordered)
12217                 {
12218                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12219                 }
12220
12221                 goto COND_JUMP;
12222
12223             case CEE_SWITCH:
12224                 assert(!compIsForInlining());
12225
12226                 if (tiVerificationNeeded)
12227                 {
12228                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12229                 }
12230                 /* Pop the switch value off the stack */
12231                 op1 = impPopStack().val;
12232                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12233
12234                 /* We can create a switch node */
12235
12236                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12237
12238                 val = (int)getU4LittleEndian(codeAddr);
12239                 codeAddr += 4 + val * 4; // skip over the switch-table
12240
12241                 goto SPILL_APPEND;
12242
12243             /************************** Casting OPCODES ***************************/
12244
12245             case CEE_CONV_OVF_I1:
12246                 lclTyp = TYP_BYTE;
12247                 goto CONV_OVF;
12248             case CEE_CONV_OVF_I2:
12249                 lclTyp = TYP_SHORT;
12250                 goto CONV_OVF;
12251             case CEE_CONV_OVF_I:
12252                 lclTyp = TYP_I_IMPL;
12253                 goto CONV_OVF;
12254             case CEE_CONV_OVF_I4:
12255                 lclTyp = TYP_INT;
12256                 goto CONV_OVF;
12257             case CEE_CONV_OVF_I8:
12258                 lclTyp = TYP_LONG;
12259                 goto CONV_OVF;
12260
12261             case CEE_CONV_OVF_U1:
12262                 lclTyp = TYP_UBYTE;
12263                 goto CONV_OVF;
12264             case CEE_CONV_OVF_U2:
12265                 lclTyp = TYP_USHORT;
12266                 goto CONV_OVF;
12267             case CEE_CONV_OVF_U:
12268                 lclTyp = TYP_U_IMPL;
12269                 goto CONV_OVF;
12270             case CEE_CONV_OVF_U4:
12271                 lclTyp = TYP_UINT;
12272                 goto CONV_OVF;
12273             case CEE_CONV_OVF_U8:
12274                 lclTyp = TYP_ULONG;
12275                 goto CONV_OVF;
12276
12277             case CEE_CONV_OVF_I1_UN:
12278                 lclTyp = TYP_BYTE;
12279                 goto CONV_OVF_UN;
12280             case CEE_CONV_OVF_I2_UN:
12281                 lclTyp = TYP_SHORT;
12282                 goto CONV_OVF_UN;
12283             case CEE_CONV_OVF_I_UN:
12284                 lclTyp = TYP_I_IMPL;
12285                 goto CONV_OVF_UN;
12286             case CEE_CONV_OVF_I4_UN:
12287                 lclTyp = TYP_INT;
12288                 goto CONV_OVF_UN;
12289             case CEE_CONV_OVF_I8_UN:
12290                 lclTyp = TYP_LONG;
12291                 goto CONV_OVF_UN;
12292
12293             case CEE_CONV_OVF_U1_UN:
12294                 lclTyp = TYP_UBYTE;
12295                 goto CONV_OVF_UN;
12296             case CEE_CONV_OVF_U2_UN:
12297                 lclTyp = TYP_USHORT;
12298                 goto CONV_OVF_UN;
12299             case CEE_CONV_OVF_U_UN:
12300                 lclTyp = TYP_U_IMPL;
12301                 goto CONV_OVF_UN;
12302             case CEE_CONV_OVF_U4_UN:
12303                 lclTyp = TYP_UINT;
12304                 goto CONV_OVF_UN;
12305             case CEE_CONV_OVF_U8_UN:
12306                 lclTyp = TYP_ULONG;
12307                 goto CONV_OVF_UN;
12308
12309             CONV_OVF_UN:
12310                 uns = true;
12311                 goto CONV_OVF_COMMON;
12312             CONV_OVF:
12313                 uns = false;
12314                 goto CONV_OVF_COMMON;
12315
12316             CONV_OVF_COMMON:
12317                 ovfl = true;
12318                 goto _CONV;
12319
12320             case CEE_CONV_I1:
12321                 lclTyp = TYP_BYTE;
12322                 goto CONV;
12323             case CEE_CONV_I2:
12324                 lclTyp = TYP_SHORT;
12325                 goto CONV;
12326             case CEE_CONV_I:
12327                 lclTyp = TYP_I_IMPL;
12328                 goto CONV;
12329             case CEE_CONV_I4:
12330                 lclTyp = TYP_INT;
12331                 goto CONV;
12332             case CEE_CONV_I8:
12333                 lclTyp = TYP_LONG;
12334                 goto CONV;
12335
12336             case CEE_CONV_U1:
12337                 lclTyp = TYP_UBYTE;
12338                 goto CONV;
12339             case CEE_CONV_U2:
12340                 lclTyp = TYP_USHORT;
12341                 goto CONV;
12342 #if (REGSIZE_BYTES == 8)
12343             case CEE_CONV_U:
12344                 lclTyp = TYP_U_IMPL;
12345                 goto CONV_UN;
12346 #else
12347             case CEE_CONV_U:
12348                 lclTyp = TYP_U_IMPL;
12349                 goto CONV;
12350 #endif
12351             case CEE_CONV_U4:
12352                 lclTyp = TYP_UINT;
12353                 goto CONV;
12354             case CEE_CONV_U8:
12355                 lclTyp = TYP_ULONG;
12356                 goto CONV_UN;
12357
12358             case CEE_CONV_R4:
12359                 lclTyp = TYP_FLOAT;
12360                 goto CONV;
12361             case CEE_CONV_R8:
12362                 lclTyp = TYP_DOUBLE;
12363                 goto CONV;
12364
12365             case CEE_CONV_R_UN:
12366                 lclTyp = TYP_DOUBLE;
12367                 goto CONV_UN;
12368
12369             CONV_UN:
12370                 uns  = true;
12371                 ovfl = false;
12372                 goto _CONV;
12373
12374             CONV:
12375                 uns  = false;
12376                 ovfl = false;
12377                 goto _CONV;
12378
12379             _CONV:
12380                 // just check that we have a number on the stack
12381                 if (tiVerificationNeeded)
12382                 {
12383                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12384                     Verify(tiVal.IsNumberType(), "bad arg");
12385
12386 #ifdef _TARGET_64BIT_
12387                     bool isNative = false;
12388
12389                     switch (opcode)
12390                     {
12391                         case CEE_CONV_OVF_I:
12392                         case CEE_CONV_OVF_I_UN:
12393                         case CEE_CONV_I:
12394                         case CEE_CONV_OVF_U:
12395                         case CEE_CONV_OVF_U_UN:
12396                         case CEE_CONV_U:
12397                             isNative = true;
12398                         default:
12399                             // leave 'isNative' = false;
12400                             break;
12401                     }
12402                     if (isNative)
12403                     {
12404                         tiRetVal = typeInfo::nativeInt();
12405                     }
12406                     else
12407 #endif // _TARGET_64BIT_
12408                     {
12409                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12410                     }
12411                 }
12412
12413                 // only converts from FLOAT or DOUBLE to an integer type
12414                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12415
12416                 if (varTypeIsFloating(lclTyp))
12417                 {
12418                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12419 #ifdef _TARGET_64BIT_
12420                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12421                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12422                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12423                                // and generate SSE2 code instead of going through helper calls.
12424                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12425 #endif
12426                         ;
12427                 }
12428                 else
12429                 {
12430                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12431                 }
12432
12433                 // At this point uns, ovf, callNode all set
12434
12435                 op1 = impPopStack().val;
12436                 impBashVarAddrsToI(op1);
12437
12438                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12439                 {
12440                     op2 = op1->gtOp.gtOp2;
12441
12442                     if (op2->gtOper == GT_CNS_INT)
12443                     {
12444                         ssize_t ival = op2->gtIntCon.gtIconVal;
12445                         ssize_t mask, umask;
12446
12447                         switch (lclTyp)
12448                         {
12449                             case TYP_BYTE:
12450                             case TYP_UBYTE:
12451                                 mask  = 0x00FF;
12452                                 umask = 0x007F;
12453                                 break;
12454                             case TYP_USHORT:
12455                             case TYP_SHORT:
12456                                 mask  = 0xFFFF;
12457                                 umask = 0x7FFF;
12458                                 break;
12459
12460                             default:
12461                                 assert(!"unexpected type");
12462                                 return;
12463                         }
12464
12465                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12466                         {
12467                             /* Toss the cast, it's a waste of time */
12468
12469                             impPushOnStack(op1, tiRetVal);
12470                             break;
12471                         }
12472                         else if (ival == mask)
12473                         {
12474                             /* Toss the masking, it's a waste of time, since
12475                                we sign-extend from the small value anyways */
12476
12477                             op1 = op1->gtOp.gtOp1;
12478                         }
12479                     }
12480                 }
12481
12482                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12483                     since the result of a cast to one of the 'small' integer
12484                     types is an integer.
12485                  */
12486
12487                 type = genActualType(lclTyp);
12488
12489 #if SMALL_TREE_NODES
12490                 if (callNode)
12491                 {
12492                     op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12493                 }
12494                 else
12495 #endif // SMALL_TREE_NODES
12496                 {
12497                     op1 = gtNewCastNode(type, op1, uns, lclTyp);
12498                 }
12499
12500                 if (ovfl)
12501                 {
12502                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12503                 }
12504                 impPushOnStack(op1, tiRetVal);
12505                 break;
12506
12507             case CEE_NEG:
12508                 if (tiVerificationNeeded)
12509                 {
12510                     tiRetVal = impStackTop().seTypeInfo;
12511                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12512                 }
12513
12514                 op1 = impPopStack().val;
12515                 impBashVarAddrsToI(op1, nullptr);
12516                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12517                 break;
12518
12519             case CEE_POP:
12520             {
12521                 /* Pull the top value from the stack */
12522
12523                 StackEntry se = impPopStack();
12524                 clsHnd        = se.seTypeInfo.GetClassHandle();
12525                 op1           = se.val;
12526
12527                 /* Get hold of the type of the value being duplicated */
12528
12529                 lclTyp = genActualType(op1->gtType);
12530
12531                 /* Does the value have any side effects? */
12532
12533                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12534                 {
12535                     // Since we are throwing away the value, just normalize
12536                     // it to its address.  This is more efficient.
12537
12538                     if (varTypeIsStruct(op1))
12539                     {
12540 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12541                         // Non-calls, such as obj or ret_expr, have to go through this.
12542                         // Calls with large struct return value have to go through this.
12543                         // Helper calls with small struct return value also have to go
12544                         // through this since they do not follow Unix calling convention.
12545                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12546                             op1->AsCall()->gtCallType == CT_HELPER)
12547 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12548                         {
12549                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12550                         }
12551                     }
12552
12553                     // If op1 is non-overflow cast, throw it away since it is useless.
12554                     // Another reason for throwing away the useless cast is in the context of
12555                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12556                     // The cast gets added as part of importing GT_CALL, which gets in the way
12557                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12558                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12559                     {
12560                         op1 = op1->gtOp.gtOp1;
12561                     }
12562
12563                     // If 'op1' is an expression, create an assignment node.
12564                     // Helps analyses (like CSE) to work fine.
12565
12566                     if (op1->gtOper != GT_CALL)
12567                     {
12568                         op1 = gtUnusedValNode(op1);
12569                     }
12570
12571                     /* Append the value to the tree list */
12572                     goto SPILL_APPEND;
12573                 }
12574
12575                 /* No side effects - just throw the <BEEP> thing away */
12576             }
12577             break;
12578
12579             case CEE_DUP:
12580             {
12581                 if (tiVerificationNeeded)
12582                 {
12583                     // Dup could start the begining of delegate creation sequence, remember that
12584                     delegateCreateStart = codeAddr - 1;
12585                     impStackTop(0);
12586                 }
12587
12588                 // If the expression to dup is simple, just clone it.
12589                 // Otherwise spill it to a temp, and reload the temp
12590                 // twice.
12591                 StackEntry se   = impPopStack();
12592                 GenTree*   tree = se.val;
12593                 tiRetVal        = se.seTypeInfo;
12594                 op1             = tree;
12595
12596                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12597                 {
12598                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12599                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12600                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12601                     op1            = gtNewLclvNode(tmpNum, type);
12602
12603                     // Propagate type info to the temp from the stack and the original tree
12604                     if (type == TYP_REF)
12605                     {
12606                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12607                     }
12608                 }
12609
12610                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12611                                    nullptr DEBUGARG("DUP instruction"));
12612
12613                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12614                 impPushOnStack(op1, tiRetVal);
12615                 impPushOnStack(op2, tiRetVal);
12616             }
12617             break;
12618
12619             case CEE_STIND_I1:
12620                 lclTyp = TYP_BYTE;
12621                 goto STIND;
12622             case CEE_STIND_I2:
12623                 lclTyp = TYP_SHORT;
12624                 goto STIND;
12625             case CEE_STIND_I4:
12626                 lclTyp = TYP_INT;
12627                 goto STIND;
12628             case CEE_STIND_I8:
12629                 lclTyp = TYP_LONG;
12630                 goto STIND;
12631             case CEE_STIND_I:
12632                 lclTyp = TYP_I_IMPL;
12633                 goto STIND;
12634             case CEE_STIND_REF:
12635                 lclTyp = TYP_REF;
12636                 goto STIND;
12637             case CEE_STIND_R4:
12638                 lclTyp = TYP_FLOAT;
12639                 goto STIND;
12640             case CEE_STIND_R8:
12641                 lclTyp = TYP_DOUBLE;
12642                 goto STIND;
12643             STIND:
12644
12645                 if (tiVerificationNeeded)
12646                 {
12647                     typeInfo instrType(lclTyp);
12648 #ifdef _TARGET_64BIT_
12649                     if (opcode == CEE_STIND_I)
12650                     {
12651                         instrType = typeInfo::nativeInt();
12652                     }
12653 #endif // _TARGET_64BIT_
12654                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12655                 }
12656                 else
12657                 {
12658                     compUnsafeCastUsed = true; // Have to go conservative
12659                 }
12660
12661             STIND_POST_VERIFY:
12662
12663                 op2 = impPopStack().val; // value to store
12664                 op1 = impPopStack().val; // address to store to
12665
12666                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12667                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12668
12669                 impBashVarAddrsToI(op1, op2);
12670
12671                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12672
12673 #ifdef _TARGET_64BIT_
12674                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12675                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12676                 {
12677                     op2->gtType = TYP_I_IMPL;
12678                 }
12679                 else
12680                 {
12681                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12682                     //
12683                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12684                     {
12685                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12686                         op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
12687                     }
12688                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12689                     //
12690                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12691                     {
12692                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12693                         op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
12694                     }
12695                 }
12696 #endif // _TARGET_64BIT_
12697
12698                 if (opcode == CEE_STIND_REF)
12699                 {
12700                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12701                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12702                     lclTyp = genActualType(op2->TypeGet());
12703                 }
12704
12705 // Check target type.
12706 #ifdef DEBUG
12707                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12708                 {
12709                     if (op2->gtType == TYP_BYREF)
12710                     {
12711                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12712                     }
12713                     else if (lclTyp == TYP_BYREF)
12714                     {
12715                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12716                     }
12717                 }
12718                 else
12719                 {
12720                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12721                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12722                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12723                 }
12724 #endif
12725
12726                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12727
12728                 // stind could point anywhere, example a boxed class static int
12729                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12730
12731                 if (prefixFlags & PREFIX_VOLATILE)
12732                 {
12733                     assert(op1->OperGet() == GT_IND);
12734                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12735                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12736                     op1->gtFlags |= GTF_IND_VOLATILE;
12737                 }
12738
12739                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12740                 {
12741                     assert(op1->OperGet() == GT_IND);
12742                     op1->gtFlags |= GTF_IND_UNALIGNED;
12743                 }
12744
12745                 op1 = gtNewAssignNode(op1, op2);
12746                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12747
12748                 // Spill side-effects AND global-data-accesses
12749                 if (verCurrentState.esStackDepth > 0)
12750                 {
12751                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12752                 }
12753
12754                 goto APPEND;
12755
12756             case CEE_LDIND_I1:
12757                 lclTyp = TYP_BYTE;
12758                 goto LDIND;
12759             case CEE_LDIND_I2:
12760                 lclTyp = TYP_SHORT;
12761                 goto LDIND;
12762             case CEE_LDIND_U4:
12763             case CEE_LDIND_I4:
12764                 lclTyp = TYP_INT;
12765                 goto LDIND;
12766             case CEE_LDIND_I8:
12767                 lclTyp = TYP_LONG;
12768                 goto LDIND;
12769             case CEE_LDIND_REF:
12770                 lclTyp = TYP_REF;
12771                 goto LDIND;
12772             case CEE_LDIND_I:
12773                 lclTyp = TYP_I_IMPL;
12774                 goto LDIND;
12775             case CEE_LDIND_R4:
12776                 lclTyp = TYP_FLOAT;
12777                 goto LDIND;
12778             case CEE_LDIND_R8:
12779                 lclTyp = TYP_DOUBLE;
12780                 goto LDIND;
12781             case CEE_LDIND_U1:
12782                 lclTyp = TYP_UBYTE;
12783                 goto LDIND;
12784             case CEE_LDIND_U2:
12785                 lclTyp = TYP_USHORT;
12786                 goto LDIND;
12787             LDIND:
12788
12789                 if (tiVerificationNeeded)
12790                 {
12791                     typeInfo lclTiType(lclTyp);
12792 #ifdef _TARGET_64BIT_
12793                     if (opcode == CEE_LDIND_I)
12794                     {
12795                         lclTiType = typeInfo::nativeInt();
12796                     }
12797 #endif // _TARGET_64BIT_
12798                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12799                     tiRetVal.NormaliseForStack();
12800                 }
12801                 else
12802                 {
12803                     compUnsafeCastUsed = true; // Have to go conservative
12804                 }
12805
12806             LDIND_POST_VERIFY:
12807
12808                 op1 = impPopStack().val; // address to load from
12809                 impBashVarAddrsToI(op1);
12810
12811 #ifdef _TARGET_64BIT_
12812                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12813                 //
12814                 if (genActualType(op1->gtType) == TYP_INT)
12815                 {
12816                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12817                     op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
12818                 }
12819 #endif
12820
12821                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12822
12823                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12824
12825                 // ldind could point anywhere, example a boxed class static int
12826                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12827
12828                 if (prefixFlags & PREFIX_VOLATILE)
12829                 {
12830                     assert(op1->OperGet() == GT_IND);
12831                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12832                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12833                     op1->gtFlags |= GTF_IND_VOLATILE;
12834                 }
12835
12836                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12837                 {
12838                     assert(op1->OperGet() == GT_IND);
12839                     op1->gtFlags |= GTF_IND_UNALIGNED;
12840                 }
12841
12842                 impPushOnStack(op1, tiRetVal);
12843
12844                 break;
12845
12846             case CEE_UNALIGNED:
12847
12848                 assert(sz == 1);
12849                 val = getU1LittleEndian(codeAddr);
12850                 ++codeAddr;
12851                 JITDUMP(" %u", val);
12852                 if ((val != 1) && (val != 2) && (val != 4))
12853                 {
12854                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12855                 }
12856
12857                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12858                 prefixFlags |= PREFIX_UNALIGNED;
12859
12860                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12861
12862             PREFIX:
12863                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12864                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12865                 codeAddr += sizeof(__int8);
12866                 goto DECODE_OPCODE;
12867
12868             case CEE_VOLATILE:
12869
12870                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12871                 prefixFlags |= PREFIX_VOLATILE;
12872
12873                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12874
12875                 assert(sz == 0);
12876                 goto PREFIX;
12877
12878             case CEE_LDFTN:
12879             {
12880                 // Need to do a lookup here so that we perform an access check
12881                 // and do a NOWAY if protections are violated
12882                 _impResolveToken(CORINFO_TOKENKIND_Method);
12883
12884                 JITDUMP(" %08X", resolvedToken.token);
12885
12886                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12887                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12888                               &callInfo);
12889
12890                 // This check really only applies to intrinsic Array.Address methods
12891                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12892                 {
12893                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12894                 }
12895
12896                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12897                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12898
12899                 if (tiVerificationNeeded)
12900                 {
12901                     // LDFTN could start the begining of delegate creation sequence, remember that
12902                     delegateCreateStart = codeAddr - 2;
12903
12904                     // check any constraints on the callee's class and type parameters
12905                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12906                                    "method has unsatisfied class constraints");
12907                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12908                                                                                 resolvedToken.hMethod),
12909                                    "method has unsatisfied method constraints");
12910
12911                     mflags = callInfo.verMethodFlags;
12912                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12913                 }
12914
12915             DO_LDFTN:
12916                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12917                 if (compDonotInline())
12918                 {
12919                     return;
12920                 }
12921
12922                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12923                 impPushOnStack(op1, typeInfo(heapToken));
12924
12925                 break;
12926             }
12927
12928             case CEE_LDVIRTFTN:
12929             {
12930                 /* Get the method token */
12931
12932                 _impResolveToken(CORINFO_TOKENKIND_Method);
12933
12934                 JITDUMP(" %08X", resolvedToken.token);
12935
12936                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12937                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12938                                                     CORINFO_CALLINFO_CALLVIRT)),
12939                               &callInfo);
12940
12941                 // This check really only applies to intrinsic Array.Address methods
12942                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12943                 {
12944                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12945                 }
12946
12947                 mflags = callInfo.methodFlags;
12948
12949                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12950
12951                 if (compIsForInlining())
12952                 {
12953                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12954                     {
12955                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12956                         return;
12957                     }
12958                 }
12959
12960                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12961
12962                 if (tiVerificationNeeded)
12963                 {
12964
12965                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12966                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12967
12968                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12969                     typeInfo declType =
12970                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12971
12972                     typeInfo arg = impStackTop().seTypeInfo;
12973                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12974                            "bad ldvirtftn");
12975
12976                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12977                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12978                     {
12979                         instanceClassHnd = arg.GetClassHandleForObjRef();
12980                     }
12981
12982                     // check any constraints on the method's class and type parameters
12983                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12984                                    "method has unsatisfied class constraints");
12985                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12986                                                                                 resolvedToken.hMethod),
12987                                    "method has unsatisfied method constraints");
12988
12989                     if (mflags & CORINFO_FLG_PROTECTED)
12990                     {
12991                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12992                                "Accessing protected method through wrong type.");
12993                     }
12994                 }
12995
12996                 /* Get the object-ref */
12997                 op1 = impPopStack().val;
12998                 assertImp(op1->gtType == TYP_REF);
12999
13000                 if (opts.IsReadyToRun())
13001                 {
13002                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13003                     {
13004                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13005                         {
13006                             op1 = gtUnusedValNode(op1);
13007                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13008                         }
13009                         goto DO_LDFTN;
13010                     }
13011                 }
13012                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13013                 {
13014                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13015                     {
13016                         op1 = gtUnusedValNode(op1);
13017                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13018                     }
13019                     goto DO_LDFTN;
13020                 }
13021
13022                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13023                 if (compDonotInline())
13024                 {
13025                     return;
13026                 }
13027
13028                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13029                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13030                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13031                 impPushOnStack(fptr, typeInfo(heapToken));
13032
13033                 break;
13034             }
13035
13036             case CEE_CONSTRAINED:
13037
13038                 assertImp(sz == sizeof(unsigned));
13039                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13040                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13041                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13042
13043                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13044                 prefixFlags |= PREFIX_CONSTRAINED;
13045
13046                 {
13047                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13048                     if (actualOpcode != CEE_CALLVIRT)
13049                     {
13050                         BADCODE("constrained. has to be followed by callvirt");
13051                     }
13052                 }
13053
13054                 goto PREFIX;
13055
13056             case CEE_READONLY:
13057                 JITDUMP(" readonly.");
13058
13059                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13060                 prefixFlags |= PREFIX_READONLY;
13061
13062                 {
13063                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13064                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13065                     {
13066                         BADCODE("readonly. has to be followed by ldelema or call");
13067                     }
13068                 }
13069
13070                 assert(sz == 0);
13071                 goto PREFIX;
13072
13073             case CEE_TAILCALL:
13074                 JITDUMP(" tail.");
13075
13076                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13077                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13078
13079                 {
13080                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13081                     if (!impOpcodeIsCallOpcode(actualOpcode))
13082                     {
13083                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13084                     }
13085                 }
13086                 assert(sz == 0);
13087                 goto PREFIX;
13088
13089             case CEE_NEWOBJ:
13090
13091                 /* Since we will implicitly insert newObjThisPtr at the start of the
13092                    argument list, spill any GTF_ORDER_SIDEEFF */
13093                 impSpillSpecialSideEff();
13094
13095                 /* NEWOBJ does not respond to TAIL */
13096                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13097
13098                 /* NEWOBJ does not respond to CONSTRAINED */
13099                 prefixFlags &= ~PREFIX_CONSTRAINED;
13100
13101                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13102
13103                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13104                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13105                               &callInfo);
13106
13107                 if (compIsForInlining())
13108                 {
13109                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13110                     {
13111                         // Check to see if this call violates the boundary.
13112                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13113                         return;
13114                     }
13115                 }
13116
13117                 mflags = callInfo.methodFlags;
13118
13119                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13120                 {
13121                     BADCODE("newobj on static or abstract method");
13122                 }
13123
13124                 // Insert the security callout before any actual code is generated
13125                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13126
13127                 // There are three different cases for new
13128                 // Object size is variable (depends on arguments)
13129                 //      1) Object is an array (arrays treated specially by the EE)
13130                 //      2) Object is some other variable sized object (e.g. String)
13131                 //      3) Class Size can be determined beforehand (normal case)
13132                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13133                 // in the second case we call the constructor with a '0' this pointer
13134                 // In the third case we alloc the memory, then call the constuctor
13135
13136                 clsFlags = callInfo.classFlags;
13137                 if (clsFlags & CORINFO_FLG_ARRAY)
13138                 {
13139                     if (tiVerificationNeeded)
13140                     {
13141                         CORINFO_CLASS_HANDLE elemTypeHnd;
13142                         INDEBUG(CorInfoType corType =)
13143                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13144                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13145                         Verify(elemTypeHnd == nullptr ||
13146                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13147                                "newarr of byref-like objects");
13148                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13149                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13150                                       &callInfo DEBUGARG(info.compFullName));
13151                     }
13152                     // Arrays need to call the NEWOBJ helper.
13153                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13154
13155                     impImportNewObjArray(&resolvedToken, &callInfo);
13156                     if (compDonotInline())
13157                     {
13158                         return;
13159                     }
13160
13161                     callTyp = TYP_REF;
13162                     break;
13163                 }
13164                 // At present this can only be String
13165                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13166                 {
13167                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13168                     {
13169                         // The dummy argument does not exist in CoreRT
13170                         newObjThisPtr = nullptr;
13171                     }
13172                     else
13173                     {
13174                         // This is the case for variable-sized objects that are not
13175                         // arrays.  In this case, call the constructor with a null 'this'
13176                         // pointer
13177                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13178                     }
13179
13180                     /* Remember that this basic block contains 'new' of an object */
13181                     block->bbFlags |= BBF_HAS_NEWOBJ;
13182                     optMethodFlags |= OMF_HAS_NEWOBJ;
13183                 }
13184                 else
13185                 {
13186                     // This is the normal case where the size of the object is
13187                     // fixed.  Allocate the memory and call the constructor.
13188
13189                     // Note: We cannot add a peep to avoid use of temp here
13190                     // becase we don't have enough interference info to detect when
13191                     // sources and destination interfere, example: s = new S(ref);
13192
13193                     // TODO: We find the correct place to introduce a general
13194                     // reverse copy prop for struct return values from newobj or
13195                     // any function returning structs.
13196
13197                     /* get a temporary for the new object */
13198                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13199                     if (compDonotInline())
13200                     {
13201                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13202                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13203                         return;
13204                     }
13205
13206                     // In the value class case we only need clsHnd for size calcs.
13207                     //
13208                     // The lookup of the code pointer will be handled by CALL in this case
13209                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13210                     {
13211                         if (compIsForInlining())
13212                         {
13213                             // If value class has GC fields, inform the inliner. It may choose to
13214                             // bail out on the inline.
13215                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13216                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13217                             {
13218                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13219                                 if (compInlineResult->IsFailure())
13220                                 {
13221                                     return;
13222                                 }
13223
13224                                 // Do further notification in the case where the call site is rare;
13225                                 // some policies do not track the relative hotness of call sites for
13226                                 // "always" inline cases.
13227                                 if (impInlineInfo->iciBlock->isRunRarely())
13228                                 {
13229                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13230                                     if (compInlineResult->IsFailure())
13231                                     {
13232                                         return;
13233                                     }
13234                                 }
13235                             }
13236                         }
13237
13238                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13239                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13240
13241                         if (impIsPrimitive(jitTyp))
13242                         {
13243                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13244                         }
13245                         else
13246                         {
13247                             // The local variable itself is the allocated space.
13248                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13249                             // and potentially exploitable.
13250                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13251                         }
13252                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13253                         {
13254                             // Append a tree to zero-out the temp
13255                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13256
13257                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13258                                                            gtNewIconNode(0), // Value
13259                                                            size,             // Size
13260                                                            false,            // isVolatile
13261                                                            false);           // not copyBlock
13262                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13263                         }
13264
13265                         // Obtain the address of the temp
13266                         newObjThisPtr =
13267                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13268                     }
13269                     else
13270                     {
13271 #ifdef FEATURE_READYTORUN_COMPILER
13272                         if (opts.IsReadyToRun())
13273                         {
13274                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13275                             usingReadyToRunHelper = (op1 != nullptr);
13276                         }
13277
13278                         if (!usingReadyToRunHelper)
13279 #endif
13280                         {
13281                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13282                             if (op1 == nullptr)
13283                             { // compDonotInline()
13284                                 return;
13285                             }
13286
13287                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13288                             // and the newfast call with a single call to a dynamic R2R cell that will:
13289                             //      1) Load the context
13290                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13291                             //      stub
13292                             //      3) Allocate and return the new object
13293                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13294
13295                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13296                                                     resolvedToken.hClass, TYP_REF, op1);
13297                         }
13298
13299                         // Remember that this basic block contains 'new' of an object
13300                         block->bbFlags |= BBF_HAS_NEWOBJ;
13301                         optMethodFlags |= OMF_HAS_NEWOBJ;
13302
13303                         // Append the assignment to the temp/local. Dont need to spill
13304                         // at all as we are just calling an EE-Jit helper which can only
13305                         // cause an (async) OutOfMemoryException.
13306
13307                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13308                         // to a temp. Note that the pattern "temp = allocObj" is required
13309                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13310                         // without exhaustive walk over all expressions.
13311
13312                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13313                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13314
13315                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13316                     }
13317                 }
13318                 goto CALL;
13319
13320             case CEE_CALLI:
13321
13322                 /* CALLI does not respond to CONSTRAINED */
13323                 prefixFlags &= ~PREFIX_CONSTRAINED;
13324
13325                 if (compIsForInlining())
13326                 {
13327                     // CALLI doesn't have a method handle, so assume the worst.
13328                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13329                     {
13330                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13331                         return;
13332                     }
13333                 }
13334
13335             // fall through
13336
13337             case CEE_CALLVIRT:
13338             case CEE_CALL:
13339
13340                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13341                 // many other places.  We unfortunately embed that knowledge here.
13342                 if (opcode != CEE_CALLI)
13343                 {
13344                     _impResolveToken(CORINFO_TOKENKIND_Method);
13345
13346                     eeGetCallInfo(&resolvedToken,
13347                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13348                                   // this is how impImportCall invokes getCallInfo
13349                                   addVerifyFlag(
13350                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13351                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13352                                                                        : CORINFO_CALLINFO_NONE)),
13353                                   &callInfo);
13354                 }
13355                 else
13356                 {
13357                     // Suppress uninitialized use warning.
13358                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13359                     memset(&callInfo, 0, sizeof(callInfo));
13360
13361                     resolvedToken.token = getU4LittleEndian(codeAddr);
13362                 }
13363
13364             CALL: // memberRef should be set.
13365                 // newObjThisPtr should be set for CEE_NEWOBJ
13366
13367                 JITDUMP(" %08X", resolvedToken.token);
13368                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13369
13370                 bool newBBcreatedForTailcallStress;
13371
13372                 newBBcreatedForTailcallStress = false;
13373
13374                 if (compIsForInlining())
13375                 {
13376                     if (compDonotInline())
13377                     {
13378                         return;
13379                     }
13380                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13381                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13382                 }
13383                 else
13384                 {
13385                     if (compTailCallStress())
13386                     {
13387                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13388                         // Tail call stress only recognizes call+ret patterns and forces them to be
13389                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13390                         // doesn't import 'ret' opcode following the call into the basic block containing
13391                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13392                         // is already checking that there is an opcode following call and hence it is
13393                         // safe here to read next opcode without bounds check.
13394                         newBBcreatedForTailcallStress =
13395                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13396                                                              // make it jump to RET.
13397                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13398
13399                         if (newBBcreatedForTailcallStress &&
13400                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13401                             verCheckTailCallConstraint(opcode, &resolvedToken,
13402                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13403                                                        true) // Is it legal to do tailcall?
13404                             )
13405                         {
13406                             // Stress the tailcall.
13407                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13408                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13409                         }
13410                     }
13411                 }
13412
13413                 // This is split up to avoid goto flow warnings.
13414                 bool isRecursive;
13415                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13416
13417                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13418                 // hence will not be considered for implicit tail calling.
13419                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13420                 {
13421                     if (compIsForInlining())
13422                     {
13423 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13424                         // Are we inlining at an implicit tail call site? If so the we can flag
13425                         // implicit tail call sites in the inline body. These call sites
13426                         // often end up in non BBJ_RETURN blocks, so only flag them when
13427                         // we're able to handle shared returns.
13428                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13429                         {
13430                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13431                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13432                         }
13433 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13434                     }
13435                     else
13436                     {
13437                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13438                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13439                     }
13440                 }
13441
13442                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13443                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13444                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13445
13446                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13447                 {
13448                     // All calls and delegates need a security callout.
13449                     // For delegates, this is the call to the delegate constructor, not the access check on the
13450                     // LD(virt)FTN.
13451                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13452
13453 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13454
13455                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13456                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13457                 // ldtoken <filed token>, and we now check accessibility
13458                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13459                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13460                 {
13461                     if (prevOpcode != CEE_LDTOKEN)
13462                     {
13463                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13464                     }
13465                     else
13466                     {
13467                         assert(lastLoadToken != NULL);
13468                         // Now that we know we have a token, verify that it is accessible for loading
13469                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13470                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13471                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13472                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13473                     }
13474                 }
13475
13476 #endif // DevDiv 410397
13477                 }
13478
13479                 if (tiVerificationNeeded)
13480                 {
13481                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13482                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13483                                   &callInfo DEBUGARG(info.compFullName));
13484                 }
13485
13486                 // Insert delegate callout here.
13487                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13488                 {
13489 #ifdef DEBUG
13490                     // We should do this only if verification is enabled
13491                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13492                     if (tiVerificationNeeded)
13493                     {
13494                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13495                         // We should get here only for well formed delegate creation.
13496                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13497                     }
13498 #endif
13499                 }
13500
13501                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13502                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13503                 if (compDonotInline())
13504                 {
13505                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13506                     assert((callTyp == TYP_UNDEF) ||
13507                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13508                     return;
13509                 }
13510
13511                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13512                                                                        // have created a new BB after the "call"
13513                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13514                 {
13515                     assert(!compIsForInlining());
13516                     goto RET;
13517                 }
13518
13519                 break;
13520
13521             case CEE_LDFLD:
13522             case CEE_LDSFLD:
13523             case CEE_LDFLDA:
13524             case CEE_LDSFLDA:
13525             {
13526
13527                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13528                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13529
13530                 /* Get the CP_Fieldref index */
13531                 assertImp(sz == sizeof(unsigned));
13532
13533                 _impResolveToken(CORINFO_TOKENKIND_Field);
13534
13535                 JITDUMP(" %08X", resolvedToken.token);
13536
13537                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13538
13539                 GenTree*             obj     = nullptr;
13540                 typeInfo*            tiObj   = nullptr;
13541                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13542
13543                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13544                 {
13545                     tiObj         = &impStackTop().seTypeInfo;
13546                     StackEntry se = impPopStack();
13547                     objType       = se.seTypeInfo.GetClassHandle();
13548                     obj           = se.val;
13549
13550                     if (impIsThis(obj))
13551                     {
13552                         aflags |= CORINFO_ACCESS_THIS;
13553
13554                         // An optimization for Contextful classes:
13555                         // we unwrap the proxy when we have a 'this reference'
13556
13557                         if (info.compUnwrapContextful)
13558                         {
13559                             aflags |= CORINFO_ACCESS_UNWRAP;
13560                         }
13561                     }
13562                 }
13563
13564                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13565
13566                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13567                 // handle
13568                 CorInfoType ciType = fieldInfo.fieldType;
13569                 clsHnd             = fieldInfo.structType;
13570
13571                 lclTyp = JITtype2varType(ciType);
13572
13573 #ifdef _TARGET_AMD64
13574                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13575 #endif // _TARGET_AMD64
13576
13577                 if (compIsForInlining())
13578                 {
13579                     switch (fieldInfo.fieldAccessor)
13580                     {
13581                         case CORINFO_FIELD_INSTANCE_HELPER:
13582                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13583                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13584                         case CORINFO_FIELD_STATIC_TLS:
13585
13586                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13587                             return;
13588
13589                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13590                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13591                             /* We may be able to inline the field accessors in specific instantiations of generic
13592                              * methods */
13593                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13594                             return;
13595
13596                         default:
13597                             break;
13598                     }
13599
13600                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13601                         clsHnd)
13602                     {
13603                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13604                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13605                         {
13606                             // Loading a static valuetype field usually will cause a JitHelper to be called
13607                             // for the static base. This will bloat the code.
13608                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13609
13610                             if (compInlineResult->IsFailure())
13611                             {
13612                                 return;
13613                             }
13614                         }
13615                     }
13616                 }
13617
13618                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13619                 if (isLoadAddress)
13620                 {
13621                     tiRetVal.MakeByRef();
13622                 }
13623                 else
13624                 {
13625                     tiRetVal.NormaliseForStack();
13626                 }
13627
13628                 // Perform this check always to ensure that we get field access exceptions even with
13629                 // SkipVerification.
13630                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13631
13632                 if (tiVerificationNeeded)
13633                 {
13634                     // You can also pass the unboxed struct to  LDFLD
13635                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13636                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13637                     {
13638                         bAllowPlainValueTypeAsThis = TRUE;
13639                     }
13640
13641                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13642
13643                     // If we're doing this on a heap object or from a 'safe' byref
13644                     // then the result is a safe byref too
13645                     if (isLoadAddress) // load address
13646                     {
13647                         if (fieldInfo.fieldFlags &
13648                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13649                         {
13650                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13651                             {
13652                                 tiRetVal.SetIsPermanentHomeByRef();
13653                             }
13654                         }
13655                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13656                         {
13657                             // ldflda of byref is safe if done on a gc object or on  a
13658                             // safe byref
13659                             tiRetVal.SetIsPermanentHomeByRef();
13660                         }
13661                     }
13662                 }
13663                 else
13664                 {
13665                     // tiVerificationNeeded is false.
13666                     // Raise InvalidProgramException if static load accesses non-static field
13667                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13668                     {
13669                         BADCODE("static access on an instance field");
13670                     }
13671                 }
13672
13673                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13674                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13675                 {
13676                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13677                     {
13678                         obj = gtUnusedValNode(obj);
13679                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13680                     }
13681                     obj = nullptr;
13682                 }
13683
13684                 /* Preserve 'small' int types */
13685                 if (!varTypeIsSmall(lclTyp))
13686                 {
13687                     lclTyp = genActualType(lclTyp);
13688                 }
13689
13690                 bool usesHelper = false;
13691
13692                 switch (fieldInfo.fieldAccessor)
13693                 {
13694                     case CORINFO_FIELD_INSTANCE:
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13697 #endif
13698                     {
13699                         bool nullcheckNeeded = false;
13700
13701                         obj = impCheckForNullPointer(obj);
13702
13703                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13704                         {
13705                             nullcheckNeeded = true;
13706                         }
13707
13708                         // If the object is a struct, what we really want is
13709                         // for the field to operate on the address of the struct.
13710                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13711                         {
13712                             assert(opcode == CEE_LDFLD && objType != nullptr);
13713
13714                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13715                         }
13716
13717                         /* Create the data member node */
13718                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13719
13720 #ifdef FEATURE_READYTORUN_COMPILER
13721                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13722                         {
13723                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13724                         }
13725 #endif
13726
13727                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13728
13729                         if (fgAddrCouldBeNull(obj))
13730                         {
13731                             op1->gtFlags |= GTF_EXCEPT;
13732                         }
13733
13734                         // If gtFldObj is a BYREF then our target is a value class and
13735                         // it could point anywhere, example a boxed class static int
13736                         if (obj->gtType == TYP_BYREF)
13737                         {
13738                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13739                         }
13740
13741                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13742                         if (StructHasOverlappingFields(typeFlags))
13743                         {
13744                             op1->gtField.gtFldMayOverlap = true;
13745                         }
13746
13747                         // wrap it in a address of operator if necessary
13748                         if (isLoadAddress)
13749                         {
13750                             op1 = gtNewOperNode(GT_ADDR,
13751                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13752                         }
13753                         else
13754                         {
13755                             if (compIsForInlining() &&
13756                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13757                                                                                    impInlineInfo->inlArgInfo))
13758                             {
13759                                 impInlineInfo->thisDereferencedFirst = true;
13760                             }
13761                         }
13762                     }
13763                     break;
13764
13765                     case CORINFO_FIELD_STATIC_TLS:
13766 #ifdef _TARGET_X86_
13767                         // Legacy TLS access is implemented as intrinsic on x86 only
13768
13769                         /* Create the data member node */
13770                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13771                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13772
13773                         if (isLoadAddress)
13774                         {
13775                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13776                         }
13777                         break;
13778 #else
13779                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13780
13781                         __fallthrough;
13782 #endif
13783
13784                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13785                     case CORINFO_FIELD_INSTANCE_HELPER:
13786                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13787                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13788                                                clsHnd, nullptr);
13789                         usesHelper = true;
13790                         break;
13791
13792                     case CORINFO_FIELD_STATIC_ADDRESS:
13793                         // Replace static read-only fields with constant if possible
13794                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13795                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13796                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13797                         {
13798                             CorInfoInitClassResult initClassResult =
13799                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13800                                                             impTokenLookupContextHandle);
13801
13802                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13803                             {
13804                                 void** pFldAddr = nullptr;
13805                                 void*  fldAddr =
13806                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13807
13808                                 // We should always be able to access this static's address directly
13809                                 assert(pFldAddr == nullptr);
13810
13811                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13812                                 goto FIELD_DONE;
13813                             }
13814                         }
13815
13816                         __fallthrough;
13817
13818                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13819                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13820                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13821                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13822                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13823                                                          lclTyp);
13824                         break;
13825
13826                     case CORINFO_FIELD_INTRINSIC_ZERO:
13827                     {
13828                         assert(aflags & CORINFO_ACCESS_GET);
13829                         op1 = gtNewIconNode(0, lclTyp);
13830                         goto FIELD_DONE;
13831                     }
13832                     break;
13833
13834                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13835                     {
13836                         assert(aflags & CORINFO_ACCESS_GET);
13837
13838                         LPVOID         pValue;
13839                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13840                         op1                = gtNewStringLiteralNode(iat, pValue);
13841                         goto FIELD_DONE;
13842                     }
13843                     break;
13844
13845                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13846                     {
13847                         assert(aflags & CORINFO_ACCESS_GET);
13848 #if BIGENDIAN
13849                         op1 = gtNewIconNode(0, lclTyp);
13850 #else
13851                         op1                     = gtNewIconNode(1, lclTyp);
13852 #endif
13853                         goto FIELD_DONE;
13854                     }
13855                     break;
13856
13857                     default:
13858                         assert(!"Unexpected fieldAccessor");
13859                 }
13860
13861                 if (!isLoadAddress)
13862                 {
13863
13864                     if (prefixFlags & PREFIX_VOLATILE)
13865                     {
13866                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13867                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13868
13869                         if (!usesHelper)
13870                         {
13871                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13872                                    (op1->OperGet() == GT_OBJ));
13873                             op1->gtFlags |= GTF_IND_VOLATILE;
13874                         }
13875                     }
13876
13877                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13878                     {
13879                         if (!usesHelper)
13880                         {
13881                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13882                                    (op1->OperGet() == GT_OBJ));
13883                             op1->gtFlags |= GTF_IND_UNALIGNED;
13884                         }
13885                     }
13886                 }
13887
13888                 /* Check if the class needs explicit initialization */
13889
13890                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13891                 {
13892                     GenTree* helperNode = impInitClass(&resolvedToken);
13893                     if (compDonotInline())
13894                     {
13895                         return;
13896                     }
13897                     if (helperNode != nullptr)
13898                     {
13899                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13900                     }
13901                 }
13902
13903             FIELD_DONE:
13904                 impPushOnStack(op1, tiRetVal);
13905             }
13906             break;
13907
13908             case CEE_STFLD:
13909             case CEE_STSFLD:
13910             {
13911
13912                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13913
13914                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13915
13916                 /* Get the CP_Fieldref index */
13917
13918                 assertImp(sz == sizeof(unsigned));
13919
13920                 _impResolveToken(CORINFO_TOKENKIND_Field);
13921
13922                 JITDUMP(" %08X", resolvedToken.token);
13923
13924                 int       aflags = CORINFO_ACCESS_SET;
13925                 GenTree*  obj    = nullptr;
13926                 typeInfo* tiObj  = nullptr;
13927                 typeInfo  tiVal;
13928
13929                 /* Pull the value from the stack */
13930                 StackEntry se = impPopStack();
13931                 op2           = se.val;
13932                 tiVal         = se.seTypeInfo;
13933                 clsHnd        = tiVal.GetClassHandle();
13934
13935                 if (opcode == CEE_STFLD)
13936                 {
13937                     tiObj = &impStackTop().seTypeInfo;
13938                     obj   = impPopStack().val;
13939
13940                     if (impIsThis(obj))
13941                     {
13942                         aflags |= CORINFO_ACCESS_THIS;
13943
13944                         // An optimization for Contextful classes:
13945                         // we unwrap the proxy when we have a 'this reference'
13946
13947                         if (info.compUnwrapContextful)
13948                         {
13949                             aflags |= CORINFO_ACCESS_UNWRAP;
13950                         }
13951                     }
13952                 }
13953
13954                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13955
13956                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13957                 // handle
13958                 CorInfoType ciType = fieldInfo.fieldType;
13959                 fieldClsHnd        = fieldInfo.structType;
13960
13961                 lclTyp = JITtype2varType(ciType);
13962
13963                 if (compIsForInlining())
13964                 {
13965                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13966                      * per-inst static? */
13967
13968                     switch (fieldInfo.fieldAccessor)
13969                     {
13970                         case CORINFO_FIELD_INSTANCE_HELPER:
13971                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13972                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13973                         case CORINFO_FIELD_STATIC_TLS:
13974
13975                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13976                             return;
13977
13978                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13979                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13980                             /* We may be able to inline the field accessors in specific instantiations of generic
13981                              * methods */
13982                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13983                             return;
13984
13985                         default:
13986                             break;
13987                     }
13988                 }
13989
13990                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13991
13992                 if (tiVerificationNeeded)
13993                 {
13994                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13995                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13996                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13997                 }
13998                 else
13999                 {
14000                     // tiVerificationNeed is false.
14001                     // Raise InvalidProgramException if static store accesses non-static field
14002                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14003                     {
14004                         BADCODE("static access on an instance field");
14005                     }
14006                 }
14007
14008                 // We are using stfld on a static field.
14009                 // We allow it, but need to eval any side-effects for obj
14010                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14011                 {
14012                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14013                     {
14014                         obj = gtUnusedValNode(obj);
14015                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14016                     }
14017                     obj = nullptr;
14018                 }
14019
14020                 /* Preserve 'small' int types */
14021                 if (!varTypeIsSmall(lclTyp))
14022                 {
14023                     lclTyp = genActualType(lclTyp);
14024                 }
14025
14026                 switch (fieldInfo.fieldAccessor)
14027                 {
14028                     case CORINFO_FIELD_INSTANCE:
14029 #ifdef FEATURE_READYTORUN_COMPILER
14030                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14031 #endif
14032                     {
14033                         obj = impCheckForNullPointer(obj);
14034
14035                         /* Create the data member node */
14036                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14037                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14038                         if (StructHasOverlappingFields(typeFlags))
14039                         {
14040                             op1->gtField.gtFldMayOverlap = true;
14041                         }
14042
14043 #ifdef FEATURE_READYTORUN_COMPILER
14044                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14045                         {
14046                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14047                         }
14048 #endif
14049
14050                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14051
14052                         if (fgAddrCouldBeNull(obj))
14053                         {
14054                             op1->gtFlags |= GTF_EXCEPT;
14055                         }
14056
14057                         // If gtFldObj is a BYREF then our target is a value class and
14058                         // it could point anywhere, example a boxed class static int
14059                         if (obj->gtType == TYP_BYREF)
14060                         {
14061                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14062                         }
14063
14064                         if (compIsForInlining() &&
14065                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14066                         {
14067                             impInlineInfo->thisDereferencedFirst = true;
14068                         }
14069                     }
14070                     break;
14071
14072                     case CORINFO_FIELD_STATIC_TLS:
14073 #ifdef _TARGET_X86_
14074                         // Legacy TLS access is implemented as intrinsic on x86 only
14075
14076                         /* Create the data member node */
14077                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14078                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14079
14080                         break;
14081 #else
14082                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14083
14084                         __fallthrough;
14085 #endif
14086
14087                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14088                     case CORINFO_FIELD_INSTANCE_HELPER:
14089                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14090                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14091                                                clsHnd, op2);
14092                         goto SPILL_APPEND;
14093
14094                     case CORINFO_FIELD_STATIC_ADDRESS:
14095                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14096                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14097                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14098                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14099                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14100                                                          lclTyp);
14101                         break;
14102
14103                     default:
14104                         assert(!"Unexpected fieldAccessor");
14105                 }
14106
14107                 // Create the member assignment, unless we have a struct.
14108                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14109                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14110
14111                 if (!deferStructAssign)
14112                 {
14113                     if (prefixFlags & PREFIX_VOLATILE)
14114                     {
14115                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14116                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14117                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14118                         op1->gtFlags |= GTF_IND_VOLATILE;
14119                     }
14120                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14121                     {
14122                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14123                         op1->gtFlags |= GTF_IND_UNALIGNED;
14124                     }
14125
14126                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14127                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14128                        importation and reads from the union as if it were a long during code generation. Though this
14129                        can potentially read garbage, one can get lucky to have this working correctly.
14130
14131                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14132                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14133                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14134                        it works correctly always.
14135
14136                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14137                        for V4.0.
14138                     */
14139                     CLANG_FORMAT_COMMENT_ANCHOR;
14140
14141 #ifndef _TARGET_64BIT_
14142                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14143                     // generated for ARM as well as x86, so the following IR will be accepted:
14144                     //     *  STMT      void
14145                     //         |  /--*  CNS_INT   int    2
14146                     //         \--*  ASG       long
14147                     //            \--*  CLS_VAR   long
14148
14149                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14150                         varTypeIsLong(op1->TypeGet()))
14151                     {
14152                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14153                     }
14154 #endif
14155
14156 #ifdef _TARGET_64BIT_
14157                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14158                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14159                     {
14160                         op2->gtType = TYP_I_IMPL;
14161                     }
14162                     else
14163                     {
14164                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14165                         //
14166                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14167                         {
14168                             op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14169                         }
14170                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14171                         //
14172                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14173                         {
14174                             op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14175                         }
14176                     }
14177 #endif
14178
14179 #if !FEATURE_X87_DOUBLES
14180                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14181                     // We insert a cast to the dest 'op1' type
14182                     //
14183                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14184                         varTypeIsFloating(op2->gtType))
14185                     {
14186                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14187                     }
14188 #endif // !FEATURE_X87_DOUBLES
14189
14190                     op1 = gtNewAssignNode(op1, op2);
14191
14192                     /* Mark the expression as containing an assignment */
14193
14194                     op1->gtFlags |= GTF_ASG;
14195                 }
14196
14197                 /* Check if the class needs explicit initialization */
14198
14199                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14200                 {
14201                     GenTree* helperNode = impInitClass(&resolvedToken);
14202                     if (compDonotInline())
14203                     {
14204                         return;
14205                     }
14206                     if (helperNode != nullptr)
14207                     {
14208                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14209                     }
14210                 }
14211
14212                 /* stfld can interfere with value classes (consider the sequence
14213                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14214                    spill all value class references from the stack. */
14215
14216                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14217                 {
14218                     assert(tiObj);
14219
14220                     if (impIsValueType(tiObj))
14221                     {
14222                         impSpillEvalStack();
14223                     }
14224                     else
14225                     {
14226                         impSpillValueClasses();
14227                     }
14228                 }
14229
14230                 /* Spill any refs to the same member from the stack */
14231
14232                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14233
14234                 /* stsfld also interferes with indirect accesses (for aliased
14235                    statics) and calls. But don't need to spill other statics
14236                    as we have explicitly spilled this particular static field. */
14237
14238                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14239
14240                 if (deferStructAssign)
14241                 {
14242                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14243                 }
14244             }
14245                 goto APPEND;
14246
14247             case CEE_NEWARR:
14248             {
14249
14250                 /* Get the class type index operand */
14251
14252                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14253
14254                 JITDUMP(" %08X", resolvedToken.token);
14255
14256                 if (!opts.IsReadyToRun())
14257                 {
14258                     // Need to restore array classes before creating array objects on the heap
14259                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14260                     if (op1 == nullptr)
14261                     { // compDonotInline()
14262                         return;
14263                     }
14264                 }
14265
14266                 if (tiVerificationNeeded)
14267                 {
14268                     // As per ECMA 'numElems' specified can be either int32 or native int.
14269                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14270
14271                     CORINFO_CLASS_HANDLE elemTypeHnd;
14272                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14273                     Verify(elemTypeHnd == nullptr ||
14274                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14275                            "array of byref-like type");
14276                 }
14277
14278                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14279
14280                 accessAllowedResult =
14281                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14282                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14283
14284                 /* Form the arglist: array class handle, size */
14285                 op2 = impPopStack().val;
14286                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14287
14288 #ifdef FEATURE_READYTORUN_COMPILER
14289                 if (opts.IsReadyToRun())
14290                 {
14291                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14292                                                     gtNewArgList(op2));
14293                     usingReadyToRunHelper = (op1 != nullptr);
14294
14295                     if (!usingReadyToRunHelper)
14296                     {
14297                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14298                         // and the newarr call with a single call to a dynamic R2R cell that will:
14299                         //      1) Load the context
14300                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14301                         //      3) Allocate the new array
14302                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14303
14304                         // Need to restore array classes before creating array objects on the heap
14305                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14306                         if (op1 == nullptr)
14307                         { // compDonotInline()
14308                             return;
14309                         }
14310                     }
14311                 }
14312
14313                 if (!usingReadyToRunHelper)
14314 #endif
14315                 {
14316                     args = gtNewArgList(op1, op2);
14317
14318                     /* Create a call to 'new' */
14319
14320                     // Note that this only works for shared generic code because the same helper is used for all
14321                     // reference array types
14322                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14323                 }
14324
14325                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14326
14327                 /* Remember that this basic block contains 'new' of an sd array */
14328
14329                 block->bbFlags |= BBF_HAS_NEWARRAY;
14330                 optMethodFlags |= OMF_HAS_NEWARRAY;
14331
14332                 /* Push the result of the call on the stack */
14333
14334                 impPushOnStack(op1, tiRetVal);
14335
14336                 callTyp = TYP_REF;
14337             }
14338             break;
14339
14340             case CEE_LOCALLOC:
14341                 if (tiVerificationNeeded)
14342                 {
14343                     Verify(false, "bad opcode");
14344                 }
14345
14346                 // We don't allow locallocs inside handlers
14347                 if (block->hasHndIndex())
14348                 {
14349                     BADCODE("Localloc can't be inside handler");
14350                 }
14351
14352                 setNeedsGSSecurityCookie();
14353
14354                 // Get the size to allocate
14355
14356                 op2 = impPopStack().val;
14357                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14358
14359                 if (verCurrentState.esStackDepth != 0)
14360                 {
14361                     BADCODE("Localloc can only be used when the stack is empty");
14362                 }
14363
14364                 // If the localloc is not in a loop and its size is a small constant,
14365                 // create a new local var of TYP_BLK and return its address.
14366                 {
14367                     bool convertedToLocal = false;
14368
14369                     // Need to aggressively fold here, as even fixed-size locallocs
14370                     // will have casts in the way.
14371                     op2 = gtFoldExpr(op2);
14372
14373                     if (op2->IsIntegralConst())
14374                     {
14375                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14376
14377                         if (allocSize == 0)
14378                         {
14379                             // Result is nullptr
14380                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14381                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14382                             convertedToLocal = true;
14383                         }
14384                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14385                         {
14386                             // Get the size threshold for local conversion
14387                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14388
14389 #ifdef DEBUG
14390                             // Optionally allow this to be modified
14391                             maxSize = JitConfig.JitStackAllocToLocalSize();
14392 #endif // DEBUG
14393
14394                             if (allocSize <= maxSize)
14395                             {
14396                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14397                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14398                                         stackallocAsLocal);
14399                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14400                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14401                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14402                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14403                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14404                                 convertedToLocal         = true;
14405                                 compGSReorderStackLayout = true;
14406                             }
14407                         }
14408                     }
14409
14410                     if (!convertedToLocal)
14411                     {
14412                         // Bail out if inlining and the localloc was not converted.
14413                         //
14414                         // Note we might consider allowing the inline, if the call
14415                         // site is not in a loop.
14416                         if (compIsForInlining())
14417                         {
14418                             InlineObservation obs = op2->IsIntegralConst()
14419                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14420                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14421                             compInlineResult->NoteFatal(obs);
14422                             return;
14423                         }
14424
14425                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14426                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14427                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14428
14429                         /* The FP register may not be back to the original value at the end
14430                            of the method, even if the frame size is 0, as localloc may
14431                            have modified it. So we will HAVE to reset it */
14432                         compLocallocUsed = true;
14433                     }
14434                     else
14435                     {
14436                         compLocallocOptimized = true;
14437                     }
14438                 }
14439
14440                 impPushOnStack(op1, tiRetVal);
14441                 break;
14442
14443             case CEE_ISINST:
14444             {
14445                 /* Get the type token */
14446                 assertImp(sz == sizeof(unsigned));
14447
14448                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14449
14450                 JITDUMP(" %08X", resolvedToken.token);
14451
14452                 if (!opts.IsReadyToRun())
14453                 {
14454                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14455                     if (op2 == nullptr)
14456                     { // compDonotInline()
14457                         return;
14458                     }
14459                 }
14460
14461                 if (tiVerificationNeeded)
14462                 {
14463                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14464                     // Even if this is a value class, we know it is boxed.
14465                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14466                 }
14467                 accessAllowedResult =
14468                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14469                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14470
14471                 op1 = impPopStack().val;
14472
14473                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14474
14475                 if (optTree != nullptr)
14476                 {
14477                     impPushOnStack(optTree, tiRetVal);
14478                 }
14479                 else
14480                 {
14481
14482 #ifdef FEATURE_READYTORUN_COMPILER
14483                     if (opts.IsReadyToRun())
14484                     {
14485                         GenTreeCall* opLookup =
14486                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14487                                                       gtNewArgList(op1));
14488                         usingReadyToRunHelper = (opLookup != nullptr);
14489                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14490
14491                         if (!usingReadyToRunHelper)
14492                         {
14493                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14494                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14495                             //      1) Load the context
14496                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14497                             //      stub
14498                             //      3) Perform the 'is instance' check on the input object
14499                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14500
14501                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14502                             if (op2 == nullptr)
14503                             { // compDonotInline()
14504                                 return;
14505                             }
14506                         }
14507                     }
14508
14509                     if (!usingReadyToRunHelper)
14510 #endif
14511                     {
14512                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14513                     }
14514                     if (compDonotInline())
14515                     {
14516                         return;
14517                     }
14518
14519                     impPushOnStack(op1, tiRetVal);
14520                 }
14521                 break;
14522             }
14523
14524             case CEE_REFANYVAL:
14525
14526                 // get the class handle and make a ICON node out of it
14527
14528                 _impResolveToken(CORINFO_TOKENKIND_Class);
14529
14530                 JITDUMP(" %08X", resolvedToken.token);
14531
14532                 op2 = impTokenToHandle(&resolvedToken);
14533                 if (op2 == nullptr)
14534                 { // compDonotInline()
14535                     return;
14536                 }
14537
14538                 if (tiVerificationNeeded)
14539                 {
14540                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14541                            "need refany");
14542                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14543                 }
14544
14545                 op1 = impPopStack().val;
14546                 // make certain it is normalized;
14547                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14548
14549                 // Call helper GETREFANY(classHandle, op1);
14550                 args = gtNewArgList(op2, op1);
14551                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14552
14553                 impPushOnStack(op1, tiRetVal);
14554                 break;
14555
14556             case CEE_REFANYTYPE:
14557
14558                 if (tiVerificationNeeded)
14559                 {
14560                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14561                            "need refany");
14562                 }
14563
14564                 op1 = impPopStack().val;
14565
14566                 // make certain it is normalized;
14567                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14568
14569                 if (op1->gtOper == GT_OBJ)
14570                 {
14571                     // Get the address of the refany
14572                     op1 = op1->gtOp.gtOp1;
14573
14574                     // Fetch the type from the correct slot
14575                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14576                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14577                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14578                 }
14579                 else
14580                 {
14581                     assertImp(op1->gtOper == GT_MKREFANY);
14582
14583                     // The pointer may have side-effects
14584                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14585                     {
14586                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14587 #ifdef DEBUG
14588                         impNoteLastILoffs();
14589 #endif
14590                     }
14591
14592                     // We already have the class handle
14593                     op1 = op1->gtOp.gtOp2;
14594                 }
14595
14596                 // convert native TypeHandle to RuntimeTypeHandle
14597                 {
14598                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14599
14600                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14601
14602                     // The handle struct is returned in register
14603                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14604
14605                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14606                 }
14607
14608                 impPushOnStack(op1, tiRetVal);
14609                 break;
14610
14611             case CEE_LDTOKEN:
14612             {
14613                 /* Get the Class index */
14614                 assertImp(sz == sizeof(unsigned));
14615                 lastLoadToken = codeAddr;
14616                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14617
14618                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14619
14620                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14621                 if (op1 == nullptr)
14622                 { // compDonotInline()
14623                     return;
14624                 }
14625
14626                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14627                 assert(resolvedToken.hClass != nullptr);
14628
14629                 if (resolvedToken.hMethod != nullptr)
14630                 {
14631                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14632                 }
14633                 else if (resolvedToken.hField != nullptr)
14634                 {
14635                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14636                 }
14637
14638                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14639
14640                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14641
14642                 // The handle struct is returned in register
14643                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14644
14645                 tiRetVal = verMakeTypeInfo(tokenType);
14646                 impPushOnStack(op1, tiRetVal);
14647             }
14648             break;
14649
14650             case CEE_UNBOX:
14651             case CEE_UNBOX_ANY:
14652             {
14653                 /* Get the Class index */
14654                 assertImp(sz == sizeof(unsigned));
14655
14656                 _impResolveToken(CORINFO_TOKENKIND_Class);
14657
14658                 JITDUMP(" %08X", resolvedToken.token);
14659
14660                 BOOL runtimeLookup;
14661                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14662                 if (op2 == nullptr)
14663                 {
14664                     assert(compDonotInline());
14665                     return;
14666                 }
14667
14668                 // Run this always so we can get access exceptions even with SkipVerification.
14669                 accessAllowedResult =
14670                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14671                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14672
14673                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14674                 {
14675                     if (tiVerificationNeeded)
14676                     {
14677                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14678                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14679                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14680                         tiRetVal.NormaliseForStack();
14681                     }
14682                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14683                     op1 = impPopStack().val;
14684                     goto CASTCLASS;
14685                 }
14686
14687                 /* Pop the object and create the unbox helper call */
14688                 /* You might think that for UNBOX_ANY we need to push a different */
14689                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14690                 /* for the intermediate pointer which we then transfer onto the OBJ */
14691                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14692                 if (tiVerificationNeeded)
14693                 {
14694                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14695                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14696
14697                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14698                     Verify(tiRetVal.IsValueClass(), "not value class");
14699                     tiRetVal.MakeByRef();
14700
14701                     // We always come from an objref, so this is safe byref
14702                     tiRetVal.SetIsPermanentHomeByRef();
14703                     tiRetVal.SetIsReadonlyByRef();
14704                 }
14705
14706                 op1 = impPopStack().val;
14707                 assertImp(op1->gtType == TYP_REF);
14708
14709                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14710                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14711
14712                 // Check legality and profitability of inline expansion for unboxing.
14713                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14714                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14715
14716                 if (canExpandInline && shouldExpandInline)
14717                 {
14718                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14719                     // we are doing normal unboxing
14720                     // inline the common case of the unbox helper
14721                     // UNBOX(exp) morphs into
14722                     // clone = pop(exp);
14723                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14724                     // push(clone + TARGET_POINTER_SIZE)
14725                     //
14726                     GenTree* cloneOperand;
14727                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14728                                        nullptr DEBUGARG("inline UNBOX clone1"));
14729                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14730
14731                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14732
14733                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14734                                        nullptr DEBUGARG("inline UNBOX clone2"));
14735                     op2 = impTokenToHandle(&resolvedToken);
14736                     if (op2 == nullptr)
14737                     { // compDonotInline()
14738                         return;
14739                     }
14740                     args = gtNewArgList(op2, op1);
14741                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14742
14743                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14744                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14745                     condBox->gtFlags |= GTF_RELOP_QMARK;
14746
14747                     // QMARK nodes cannot reside on the evaluation stack. Because there
14748                     // may be other trees on the evaluation stack that side-effect the
14749                     // sources of the UNBOX operation we must spill the stack.
14750
14751                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14752
14753                     // Create the address-expression to reference past the object header
14754                     // to the beginning of the value-type. Today this means adjusting
14755                     // past the base of the objects vtable field which is pointer sized.
14756
14757                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14758                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14759                 }
14760                 else
14761                 {
14762                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14763                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14764
14765                     // Don't optimize, just call the helper and be done with it
14766                     args = gtNewArgList(op2, op1);
14767                     op1 =
14768                         gtNewHelperCallNode(helper,
14769                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14770                 }
14771
14772                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14773                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14774                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14775                        );
14776
14777                 /*
14778                   ----------------------------------------------------------------------
14779                   | \ helper  |                         |                              |
14780                   |   \       |                         |                              |
14781                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14782                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14783                   | opcode  \ |                         |                              |
14784                   |---------------------------------------------------------------------
14785                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14786                   |           |                         | push the BYREF to this local |
14787                   |---------------------------------------------------------------------
14788                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14789                   |           | the BYREF               | For Linux when the           |
14790                   |           |                         |  struct is returned in two   |
14791                   |           |                         |  registers create a temp     |
14792                   |           |                         |  which address is passed to  |
14793                   |           |                         |  the unbox_nullable helper.  |
14794                   |---------------------------------------------------------------------
14795                 */
14796
14797                 if (opcode == CEE_UNBOX)
14798                 {
14799                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14800                     {
14801                         // Unbox nullable helper returns a struct type.
14802                         // We need to spill it to a temp so than can take the address of it.
14803                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14804                         // further along and potetially be exploitable.
14805
14806                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14807                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14808
14809                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14810                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14811                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14812
14813                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14814                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14815                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14816                     }
14817
14818                     assert(op1->gtType == TYP_BYREF);
14819                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14820                 }
14821                 else
14822                 {
14823                     assert(opcode == CEE_UNBOX_ANY);
14824
14825                     if (helper == CORINFO_HELP_UNBOX)
14826                     {
14827                         // Normal unbox helper returns a TYP_BYREF.
14828                         impPushOnStack(op1, tiRetVal);
14829                         oper = GT_OBJ;
14830                         goto OBJ;
14831                     }
14832
14833                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14834
14835 #if FEATURE_MULTIREG_RET
14836
14837                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14838                     {
14839                         // Unbox nullable helper returns a TYP_STRUCT.
14840                         // For the multi-reg case we need to spill it to a temp so that
14841                         // we can pass the address to the unbox_nullable jit helper.
14842
14843                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14844                         lvaTable[tmp].lvIsMultiRegArg = true;
14845                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14846
14847                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14848                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14849                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14850
14851                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14852                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14853                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14854
14855                         // In this case the return value of the unbox helper is TYP_BYREF.
14856                         // Make sure the right type is placed on the operand type stack.
14857                         impPushOnStack(op1, tiRetVal);
14858
14859                         // Load the struct.
14860                         oper = GT_OBJ;
14861
14862                         assert(op1->gtType == TYP_BYREF);
14863                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14864
14865                         goto OBJ;
14866                     }
14867                     else
14868
14869 #endif // !FEATURE_MULTIREG_RET
14870
14871                     {
14872                         // If non register passable struct we have it materialized in the RetBuf.
14873                         assert(op1->gtType == TYP_STRUCT);
14874                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14875                         assert(tiRetVal.IsValueClass());
14876                     }
14877                 }
14878
14879                 impPushOnStack(op1, tiRetVal);
14880             }
14881             break;
14882
14883             case CEE_BOX:
14884             {
14885                 /* Get the Class index */
14886                 assertImp(sz == sizeof(unsigned));
14887
14888                 _impResolveToken(CORINFO_TOKENKIND_Box);
14889
14890                 JITDUMP(" %08X", resolvedToken.token);
14891
14892                 if (tiVerificationNeeded)
14893                 {
14894                     typeInfo tiActual = impStackTop().seTypeInfo;
14895                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14896
14897                     Verify(verIsBoxable(tiBox), "boxable type expected");
14898
14899                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14900                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14901                            "boxed type has unsatisfied class constraints");
14902
14903                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14904
14905                     // Observation: the following code introduces a boxed value class on the stack, but,
14906                     // according to the ECMA spec, one would simply expect: tiRetVal =
14907                     // typeInfo(TI_REF,impGetObjectClass());
14908
14909                     // Push the result back on the stack,
14910                     // even if clsHnd is a value class we want the TI_REF
14911                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14912                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14913                 }
14914
14915                 accessAllowedResult =
14916                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14917                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14918
14919                 // Note BOX can be used on things that are not value classes, in which
14920                 // case we get a NOP.  However the verifier's view of the type on the
14921                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14922                 if (!eeIsValueClass(resolvedToken.hClass))
14923                 {
14924                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14925                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14926                     break;
14927                 }
14928
14929                 // Look ahead for unbox.any
14930                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14931                 {
14932                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14933
14934                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14935
14936                     // See if the resolved tokens describe types that are equal.
14937                     const TypeCompareState compare =
14938                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14939
14940                     // If so, box/unbox.any is a nop.
14941                     if (compare == TypeCompareState::Must)
14942                     {
14943                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14944                         // Skip the next unbox.any instruction
14945                         sz += sizeof(mdToken) + 1;
14946                         break;
14947                     }
14948                 }
14949
14950                 impImportAndPushBox(&resolvedToken);
14951                 if (compDonotInline())
14952                 {
14953                     return;
14954                 }
14955             }
14956             break;
14957
14958             case CEE_SIZEOF:
14959
14960                 /* Get the Class index */
14961                 assertImp(sz == sizeof(unsigned));
14962
14963                 _impResolveToken(CORINFO_TOKENKIND_Class);
14964
14965                 JITDUMP(" %08X", resolvedToken.token);
14966
14967                 if (tiVerificationNeeded)
14968                 {
14969                     tiRetVal = typeInfo(TI_INT);
14970                 }
14971
14972                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14973                 impPushOnStack(op1, tiRetVal);
14974                 break;
14975
14976             case CEE_CASTCLASS:
14977
14978                 /* Get the Class index */
14979
14980                 assertImp(sz == sizeof(unsigned));
14981
14982                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14983
14984                 JITDUMP(" %08X", resolvedToken.token);
14985
14986                 if (!opts.IsReadyToRun())
14987                 {
14988                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14989                     if (op2 == nullptr)
14990                     { // compDonotInline()
14991                         return;
14992                     }
14993                 }
14994
14995                 if (tiVerificationNeeded)
14996                 {
14997                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14998                     // box it
14999                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15000                 }
15001
15002                 accessAllowedResult =
15003                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15004                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15005
15006                 op1 = impPopStack().val;
15007
15008             /* Pop the address and create the 'checked cast' helper call */
15009
15010             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15011             // and op2 to contain code that creates the type handle corresponding to typeRef
15012             CASTCLASS:
15013             {
15014                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15015
15016                 if (optTree != nullptr)
15017                 {
15018                     impPushOnStack(optTree, tiRetVal);
15019                 }
15020                 else
15021                 {
15022
15023 #ifdef FEATURE_READYTORUN_COMPILER
15024                     if (opts.IsReadyToRun())
15025                     {
15026                         GenTreeCall* opLookup =
15027                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15028                                                       gtNewArgList(op1));
15029                         usingReadyToRunHelper = (opLookup != nullptr);
15030                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15031
15032                         if (!usingReadyToRunHelper)
15033                         {
15034                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15035                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15036                             //      1) Load the context
15037                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15038                             //      stub
15039                             //      3) Check the object on the stack for the type-cast
15040                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15041
15042                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15043                             if (op2 == nullptr)
15044                             { // compDonotInline()
15045                                 return;
15046                             }
15047                         }
15048                     }
15049
15050                     if (!usingReadyToRunHelper)
15051 #endif
15052                     {
15053                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15054                     }
15055                     if (compDonotInline())
15056                     {
15057                         return;
15058                     }
15059
15060                     /* Push the result back on the stack */
15061                     impPushOnStack(op1, tiRetVal);
15062                 }
15063             }
15064             break;
15065
15066             case CEE_THROW:
15067
15068                 if (compIsForInlining())
15069                 {
15070                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15071                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15072                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15073
15074                     /* Do we have just the exception on the stack ?*/
15075
15076                     if (verCurrentState.esStackDepth != 1)
15077                     {
15078                         /* if not, just don't inline the method */
15079
15080                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15081                         return;
15082                     }
15083                 }
15084
15085                 if (tiVerificationNeeded)
15086                 {
15087                     tiRetVal = impStackTop().seTypeInfo;
15088                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15089                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15090                     {
15091                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15092                     }
15093                 }
15094
15095                 block->bbSetRunRarely(); // any block with a throw is rare
15096                 /* Pop the exception object and create the 'throw' helper call */
15097
15098                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15099
15100             EVAL_APPEND:
15101                 if (verCurrentState.esStackDepth > 0)
15102                 {
15103                     impEvalSideEffects();
15104                 }
15105
15106                 assert(verCurrentState.esStackDepth == 0);
15107
15108                 goto APPEND;
15109
15110             case CEE_RETHROW:
15111
15112                 assert(!compIsForInlining());
15113
15114                 if (info.compXcptnsCount == 0)
15115                 {
15116                     BADCODE("rethrow outside catch");
15117                 }
15118
15119                 if (tiVerificationNeeded)
15120                 {
15121                     Verify(block->hasHndIndex(), "rethrow outside catch");
15122                     if (block->hasHndIndex())
15123                     {
15124                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15125                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15126                         if (HBtab->HasFilter())
15127                         {
15128                             // we better be in the handler clause part, not the filter part
15129                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15130                                    "rethrow in filter");
15131                         }
15132                     }
15133                 }
15134
15135                 /* Create the 'rethrow' helper call */
15136
15137                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15138
15139                 goto EVAL_APPEND;
15140
15141             case CEE_INITOBJ:
15142
15143                 assertImp(sz == sizeof(unsigned));
15144
15145                 _impResolveToken(CORINFO_TOKENKIND_Class);
15146
15147                 JITDUMP(" %08X", resolvedToken.token);
15148
15149                 if (tiVerificationNeeded)
15150                 {
15151                     typeInfo tiTo    = impStackTop().seTypeInfo;
15152                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15153
15154                     Verify(tiTo.IsByRef(), "byref expected");
15155                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15156
15157                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15158                            "type operand incompatible with type of address");
15159                 }
15160
15161                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15162                 op2  = gtNewIconNode(0);                                     // Value
15163                 op1  = impPopStack().val;                                    // Dest
15164                 op1  = gtNewBlockVal(op1, size);
15165                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15166                 goto SPILL_APPEND;
15167
15168             case CEE_INITBLK:
15169
15170                 if (tiVerificationNeeded)
15171                 {
15172                     Verify(false, "bad opcode");
15173                 }
15174
15175                 op3 = impPopStack().val; // Size
15176                 op2 = impPopStack().val; // Value
15177                 op1 = impPopStack().val; // Dest
15178
15179                 if (op3->IsCnsIntOrI())
15180                 {
15181                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15182                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15183                 }
15184                 else
15185                 {
15186                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15187                     size = 0;
15188                 }
15189                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15190
15191                 goto SPILL_APPEND;
15192
15193             case CEE_CPBLK:
15194
15195                 if (tiVerificationNeeded)
15196                 {
15197                     Verify(false, "bad opcode");
15198                 }
15199                 op3 = impPopStack().val; // Size
15200                 op2 = impPopStack().val; // Src
15201                 op1 = impPopStack().val; // Dest
15202
15203                 if (op3->IsCnsIntOrI())
15204                 {
15205                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15206                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15207                 }
15208                 else
15209                 {
15210                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15211                     size = 0;
15212                 }
15213                 if (op2->OperGet() == GT_ADDR)
15214                 {
15215                     op2 = op2->gtOp.gtOp1;
15216                 }
15217                 else
15218                 {
15219                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15220                 }
15221
15222                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15223                 goto SPILL_APPEND;
15224
15225             case CEE_CPOBJ:
15226
15227                 assertImp(sz == sizeof(unsigned));
15228
15229                 _impResolveToken(CORINFO_TOKENKIND_Class);
15230
15231                 JITDUMP(" %08X", resolvedToken.token);
15232
15233                 if (tiVerificationNeeded)
15234                 {
15235                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15236                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15237                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15238
15239                     Verify(tiFrom.IsByRef(), "expected byref source");
15240                     Verify(tiTo.IsByRef(), "expected byref destination");
15241
15242                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15243                            "type of source address incompatible with type operand");
15244                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15245                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15246                            "type operand incompatible with type of destination address");
15247                 }
15248
15249                 if (!eeIsValueClass(resolvedToken.hClass))
15250                 {
15251                     op1 = impPopStack().val; // address to load from
15252
15253                     impBashVarAddrsToI(op1);
15254
15255                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15256
15257                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15258                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15259
15260                     impPushOnStack(op1, typeInfo());
15261                     opcode = CEE_STIND_REF;
15262                     lclTyp = TYP_REF;
15263                     goto STIND_POST_VERIFY;
15264                 }
15265
15266                 op2 = impPopStack().val; // Src
15267                 op1 = impPopStack().val; // Dest
15268                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15269                 goto SPILL_APPEND;
15270
15271             case CEE_STOBJ:
15272             {
15273                 assertImp(sz == sizeof(unsigned));
15274
15275                 _impResolveToken(CORINFO_TOKENKIND_Class);
15276
15277                 JITDUMP(" %08X", resolvedToken.token);
15278
15279                 if (eeIsValueClass(resolvedToken.hClass))
15280                 {
15281                     lclTyp = TYP_STRUCT;
15282                 }
15283                 else
15284                 {
15285                     lclTyp = TYP_REF;
15286                 }
15287
15288                 if (tiVerificationNeeded)
15289                 {
15290
15291                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15292
15293                     // Make sure we have a good looking byref
15294                     Verify(tiPtr.IsByRef(), "pointer not byref");
15295                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15296                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15297                     {
15298                         compUnsafeCastUsed = true;
15299                     }
15300
15301                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15302                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15303
15304                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15305                     {
15306                         Verify(false, "type of value incompatible with type operand");
15307                         compUnsafeCastUsed = true;
15308                     }
15309
15310                     if (!tiCompatibleWith(argVal, ptrVal, false))
15311                     {
15312                         Verify(false, "type operand incompatible with type of address");
15313                         compUnsafeCastUsed = true;
15314                     }
15315                 }
15316                 else
15317                 {
15318                     compUnsafeCastUsed = true;
15319                 }
15320
15321                 if (lclTyp == TYP_REF)
15322                 {
15323                     opcode = CEE_STIND_REF;
15324                     goto STIND_POST_VERIFY;
15325                 }
15326
15327                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15328                 if (impIsPrimitive(jitTyp))
15329                 {
15330                     lclTyp = JITtype2varType(jitTyp);
15331                     goto STIND_POST_VERIFY;
15332                 }
15333
15334                 op2 = impPopStack().val; // Value
15335                 op1 = impPopStack().val; // Ptr
15336
15337                 assertImp(varTypeIsStruct(op2));
15338
15339                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15340
15341                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15342                 {
15343                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15344                 }
15345                 goto SPILL_APPEND;
15346             }
15347
15348             case CEE_MKREFANY:
15349
15350                 assert(!compIsForInlining());
15351
15352                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15353                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15354
15355                 JITDUMP("disabling struct promotion because of mkrefany\n");
15356                 fgNoStructPromotion = true;
15357
15358                 oper = GT_MKREFANY;
15359                 assertImp(sz == sizeof(unsigned));
15360
15361                 _impResolveToken(CORINFO_TOKENKIND_Class);
15362
15363                 JITDUMP(" %08X", resolvedToken.token);
15364
15365                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15366                 if (op2 == nullptr)
15367                 { // compDonotInline()
15368                     return;
15369                 }
15370
15371                 if (tiVerificationNeeded)
15372                 {
15373                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15374                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15375
15376                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15377                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15378                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15379                 }
15380
15381                 accessAllowedResult =
15382                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15383                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15384
15385                 op1 = impPopStack().val;
15386
15387                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15388                 // But JIT32 allowed it, so we continue to allow it.
15389                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15390
15391                 // MKREFANY returns a struct.  op2 is the class token.
15392                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15393
15394                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15395                 break;
15396
15397             case CEE_LDOBJ:
15398             {
15399                 oper = GT_OBJ;
15400                 assertImp(sz == sizeof(unsigned));
15401
15402                 _impResolveToken(CORINFO_TOKENKIND_Class);
15403
15404                 JITDUMP(" %08X", resolvedToken.token);
15405
15406             OBJ:
15407
15408                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15409
15410                 if (tiVerificationNeeded)
15411                 {
15412                     typeInfo tiPtr = impStackTop().seTypeInfo;
15413
15414                     // Make sure we have a byref
15415                     if (!tiPtr.IsByRef())
15416                     {
15417                         Verify(false, "pointer not byref");
15418                         compUnsafeCastUsed = true;
15419                     }
15420                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15421
15422                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15423                     {
15424                         Verify(false, "type of address incompatible with type operand");
15425                         compUnsafeCastUsed = true;
15426                     }
15427                     tiRetVal.NormaliseForStack();
15428                 }
15429                 else
15430                 {
15431                     compUnsafeCastUsed = true;
15432                 }
15433
15434                 if (eeIsValueClass(resolvedToken.hClass))
15435                 {
15436                     lclTyp = TYP_STRUCT;
15437                 }
15438                 else
15439                 {
15440                     lclTyp = TYP_REF;
15441                     opcode = CEE_LDIND_REF;
15442                     goto LDIND_POST_VERIFY;
15443                 }
15444
15445                 op1 = impPopStack().val;
15446
15447                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15448
15449                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15450                 if (impIsPrimitive(jitTyp))
15451                 {
15452                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15453
15454                     // Could point anywhere, example a boxed class static int
15455                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15456                     assertImp(varTypeIsArithmetic(op1->gtType));
15457                 }
15458                 else
15459                 {
15460                     // OBJ returns a struct
15461                     // and an inline argument which is the class token of the loaded obj
15462                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15463                 }
15464                 op1->gtFlags |= GTF_EXCEPT;
15465
15466                 if (prefixFlags & PREFIX_UNALIGNED)
15467                 {
15468                     op1->gtFlags |= GTF_IND_UNALIGNED;
15469                 }
15470
15471                 impPushOnStack(op1, tiRetVal);
15472                 break;
15473             }
15474
15475             case CEE_LDLEN:
15476                 if (tiVerificationNeeded)
15477                 {
15478                     typeInfo tiArray = impStackTop().seTypeInfo;
15479                     Verify(verIsSDArray(tiArray), "bad array");
15480                     tiRetVal = typeInfo(TI_INT);
15481                 }
15482
15483                 op1 = impPopStack().val;
15484                 if (!opts.MinOpts() && !opts.compDbgCode)
15485                 {
15486                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15487                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15488
15489                     /* Mark the block as containing a length expression */
15490
15491                     if (op1->gtOper == GT_LCL_VAR)
15492                     {
15493                         block->bbFlags |= BBF_HAS_IDX_LEN;
15494                     }
15495
15496                     op1 = arrLen;
15497                 }
15498                 else
15499                 {
15500                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15501                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15502                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15503                     op1 = gtNewIndir(TYP_INT, op1);
15504                     op1->gtFlags |= GTF_IND_ARR_LEN;
15505                 }
15506
15507                 /* Push the result back on the stack */
15508                 impPushOnStack(op1, tiRetVal);
15509                 break;
15510
15511             case CEE_BREAK:
15512                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15513                 goto SPILL_APPEND;
15514
15515             case CEE_NOP:
15516                 if (opts.compDbgCode)
15517                 {
15518                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15519                     goto SPILL_APPEND;
15520                 }
15521                 break;
15522
15523             /******************************** NYI *******************************/
15524
15525             case 0xCC:
15526                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15527
15528             case CEE_ILLEGAL:
15529             case CEE_MACRO_END:
15530
15531             default:
15532                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15533         }
15534
15535         codeAddr += sz;
15536         prevOpcode = opcode;
15537
15538         prefixFlags = 0;
15539     }
15540
15541     return;
15542 #undef _impResolveToken
15543 }
15544 #ifdef _PREFAST_
15545 #pragma warning(pop)
15546 #endif
15547
15548 // Push a local/argument treeon the operand stack
15549 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15550 {
15551     tiRetVal.NormaliseForStack();
15552
15553     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15554     {
15555         tiRetVal.SetUninitialisedObjRef();
15556     }
15557
15558     impPushOnStack(op, tiRetVal);
15559 }
15560
15561 // Load a local/argument on the operand stack
15562 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15563 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15564 {
15565     var_types lclTyp;
15566
15567     if (lvaTable[lclNum].lvNormalizeOnLoad())
15568     {
15569         lclTyp = lvaGetRealType(lclNum);
15570     }
15571     else
15572     {
15573         lclTyp = lvaGetActualType(lclNum);
15574     }
15575
15576     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15577 }
15578
15579 // Load an argument on the operand stack
15580 // Shared by the various CEE_LDARG opcodes
15581 // ilArgNum is the argument index as specified in IL.
15582 // It will be mapped to the correct lvaTable index
15583 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15584 {
15585     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15586
15587     if (compIsForInlining())
15588     {
15589         if (ilArgNum >= info.compArgsCount)
15590         {
15591             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15592             return;
15593         }
15594
15595         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15596                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15597     }
15598     else
15599     {
15600         if (ilArgNum >= info.compArgsCount)
15601         {
15602             BADCODE("Bad IL");
15603         }
15604
15605         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15606
15607         if (lclNum == info.compThisArg)
15608         {
15609             lclNum = lvaArg0Var;
15610         }
15611
15612         impLoadVar(lclNum, offset);
15613     }
15614 }
15615
15616 // Load a local on the operand stack
15617 // Shared by the various CEE_LDLOC opcodes
15618 // ilLclNum is the local index as specified in IL.
15619 // It will be mapped to the correct lvaTable index
15620 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15621 {
15622     if (tiVerificationNeeded)
15623     {
15624         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15625         Verify(info.compInitMem, "initLocals not set");
15626     }
15627
15628     if (compIsForInlining())
15629     {
15630         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15631         {
15632             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15633             return;
15634         }
15635
15636         // Get the local type
15637         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15638
15639         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15640
15641         /* Have we allocated a temp for this local? */
15642
15643         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15644
15645         // All vars of inlined methods should be !lvNormalizeOnLoad()
15646
15647         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15648         lclTyp = genActualType(lclTyp);
15649
15650         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15651     }
15652     else
15653     {
15654         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15655         {
15656             BADCODE("Bad IL");
15657         }
15658
15659         unsigned lclNum = info.compArgsCount + ilLclNum;
15660
15661         impLoadVar(lclNum, offset);
15662     }
15663 }
15664
15665 #ifdef _TARGET_ARM_
15666 /**************************************************************************************
15667  *
15668  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15669  *  dst struct, because struct promotion will turn it into a float/double variable while
15670  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15671  *  a float, but there is nothing that might prevent us from doing so. The tree however
15672  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15673  *
15674  *  tmpNum - the lcl dst variable num that is a struct.
15675  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15676  *  hClass - the type handle for the struct variable.
15677  *
15678  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15679  *        however, we could do a codegen of transferring from int to float registers
15680  *        (transfer, not a cast.)
15681  *
15682  */
15683 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15684 {
15685     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15686     {
15687         int       hfaSlots = GetHfaCount(hClass);
15688         var_types hfaType  = GetHfaType(hClass);
15689
15690         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15691         // type: struct/float at importer because the ABI calls out return in integer registers.
15692         // We don't want struct promotion to replace an expression like this:
15693         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15694         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15695         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15696             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15697         {
15698             // Make sure this struct type stays as struct so we can receive the call in a struct.
15699             lvaTable[tmpNum].lvIsMultiRegRet = true;
15700         }
15701     }
15702 }
15703 #endif // _TARGET_ARM_
15704
15705 #if FEATURE_MULTIREG_RET
15706 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15707 {
15708     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15709     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15710     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15711
15712     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15713     ret->gtFlags |= GTF_DONT_CSE;
15714
15715     assert(IsMultiRegReturnedType(hClass));
15716
15717     // Mark the var so that fields are not promoted and stay together.
15718     lvaTable[tmpNum].lvIsMultiRegRet = true;
15719
15720     return ret;
15721 }
15722 #endif // FEATURE_MULTIREG_RET
15723
15724 // do import for a return
15725 // returns false if inlining was aborted
15726 // opcode can be ret or call in the case of a tail.call
15727 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15728 {
15729     if (tiVerificationNeeded)
15730     {
15731         verVerifyThisPtrInitialised();
15732
15733         unsigned expectedStack = 0;
15734         if (info.compRetType != TYP_VOID)
15735         {
15736             typeInfo tiVal = impStackTop().seTypeInfo;
15737             typeInfo tiDeclared =
15738                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15739
15740             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15741
15742             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15743             expectedStack = 1;
15744         }
15745         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15746     }
15747
15748 #ifdef DEBUG
15749     // If we are importing an inlinee and have GC ref locals we always
15750     // need to have a spill temp for the return value.  This temp
15751     // should have been set up in advance, over in fgFindBasicBlocks.
15752     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15753     {
15754         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15755     }
15756 #endif // DEBUG
15757
15758     GenTree*             op2       = nullptr;
15759     GenTree*             op1       = nullptr;
15760     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15761
15762     if (info.compRetType != TYP_VOID)
15763     {
15764         StackEntry se = impPopStack();
15765         retClsHnd     = se.seTypeInfo.GetClassHandle();
15766         op2           = se.val;
15767
15768         if (!compIsForInlining())
15769         {
15770             impBashVarAddrsToI(op2);
15771             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15772             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15773             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15774                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15775                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15776                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15777                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15778
15779 #ifdef DEBUG
15780             if (opts.compGcChecks && info.compRetType == TYP_REF)
15781             {
15782                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15783                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15784                 // one-return BB.
15785
15786                 assert(op2->gtType == TYP_REF);
15787
15788                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15789                 GenTreeArgList* args = gtNewArgList(op2);
15790                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15791
15792                 if (verbose)
15793                 {
15794                     printf("\ncompGcChecks tree:\n");
15795                     gtDispTree(op2);
15796                 }
15797             }
15798 #endif
15799         }
15800         else
15801         {
15802             // inlinee's stack should be empty now.
15803             assert(verCurrentState.esStackDepth == 0);
15804
15805 #ifdef DEBUG
15806             if (verbose)
15807             {
15808                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15809                 gtDispTree(op2);
15810             }
15811 #endif
15812
15813             // Make sure the type matches the original call.
15814
15815             var_types returnType       = genActualType(op2->gtType);
15816             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15817             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15818             {
15819                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15820             }
15821
15822             if (returnType != originalCallType)
15823             {
15824                 JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
15825                         varTypeName(originalCallType));
15826                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15827                 return false;
15828             }
15829
15830             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15831             // expression. At this point, retExpr could already be set if there are multiple
15832             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15833             // the other blocks already set it. If there is only a single return block,
15834             // retExpr shouldn't be set. However, this is not true if we reimport a block
15835             // with a return. In that case, retExpr will be set, then the block will be
15836             // reimported, but retExpr won't get cleared as part of setting the block to
15837             // be reimported. The reimported retExpr value should be the same, so even if
15838             // we don't unconditionally overwrite it, it shouldn't matter.
15839             if (info.compRetNativeType != TYP_STRUCT)
15840             {
15841                 // compRetNativeType is not TYP_STRUCT.
15842                 // This implies it could be either a scalar type or SIMD vector type or
15843                 // a struct type that can be normalized to a scalar type.
15844
15845                 if (varTypeIsStruct(info.compRetType))
15846                 {
15847                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15848                     // adjust the type away from struct to integral
15849                     // and no normalizing
15850                     op2 = impFixupStructReturnType(op2, retClsHnd);
15851                 }
15852                 else
15853                 {
15854                     // Do we have to normalize?
15855                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15856                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15857                         fgCastNeeded(op2, fncRealRetType))
15858                     {
15859                         // Small-typed return values are normalized by the callee
15860                         op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
15861                     }
15862                 }
15863
15864                 if (fgNeedReturnSpillTemp())
15865                 {
15866                     assert(info.compRetNativeType != TYP_VOID &&
15867                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15868
15869                     // If this method returns a ref type, track the actual types seen
15870                     // in the returns.
15871                     if (info.compRetType == TYP_REF)
15872                     {
15873                         bool                 isExact      = false;
15874                         bool                 isNonNull    = false;
15875                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15876
15877                         if (impInlineInfo->retExpr == nullptr)
15878                         {
15879                             // This is the first return, so best known type is the type
15880                             // of this return value.
15881                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15882                             impInlineInfo->retExprClassHndIsExact = isExact;
15883                         }
15884                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15885                         {
15886                             // This return site type differs from earlier seen sites,
15887                             // so reset the info and we'll fall back to using the method's
15888                             // declared return type for the return spill temp.
15889                             impInlineInfo->retExprClassHnd        = nullptr;
15890                             impInlineInfo->retExprClassHndIsExact = false;
15891                         }
15892                     }
15893
15894                     // This is a bit of a workaround...
15895                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15896                     // not a struct (for example, the struct is composed of exactly one int, and the native
15897                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15898                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15899                     // to the *native* return type), and at least one of the return blocks is the result of
15900                     // a call, then we have a problem. The situation is like this (from a failed test case):
15901                     //
15902                     // inliner:
15903                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15904                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15905                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15906                     //
15907                     // inlinee:
15908                     //      ...
15909                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15910                     //      ret
15911                     //      ...
15912                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15913                     //      object&, class System.Func`1<!!0>)
15914                     //      ret
15915                     //
15916                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15917                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15918                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15919                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15920                     //
15921                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15922                     // native return type, which is what it will be set to eventually. We generate the
15923                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15924                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15925
15926                     bool restoreType = false;
15927                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15928                     {
15929                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15930                         op2->gtType = info.compRetNativeType;
15931                         restoreType = true;
15932                     }
15933
15934                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15935                                      (unsigned)CHECK_SPILL_ALL);
15936
15937                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15938
15939                     if (restoreType)
15940                     {
15941                         op2->gtType = TYP_STRUCT; // restore it to what it was
15942                     }
15943
15944                     op2 = tmpOp2;
15945
15946 #ifdef DEBUG
15947                     if (impInlineInfo->retExpr)
15948                     {
15949                         // Some other block(s) have seen the CEE_RET first.
15950                         // Better they spilled to the same temp.
15951                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15952                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15953                     }
15954 #endif
15955                 }
15956
15957 #ifdef DEBUG
15958                 if (verbose)
15959                 {
15960                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15961                     gtDispTree(op2);
15962                 }
15963 #endif
15964
15965                 // Report the return expression
15966                 impInlineInfo->retExpr = op2;
15967             }
15968             else
15969             {
15970                 // compRetNativeType is TYP_STRUCT.
15971                 // This implies that struct return via RetBuf arg or multi-reg struct return
15972
15973                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15974
15975                 // Assign the inlinee return into a spill temp.
15976                 // spill temp only exists if there are multiple return points
15977                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15978                 {
15979                     // in this case we have to insert multiple struct copies to the temp
15980                     // and the retexpr is just the temp.
15981                     assert(info.compRetNativeType != TYP_VOID);
15982                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15983
15984                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15985                                      (unsigned)CHECK_SPILL_ALL);
15986                 }
15987
15988 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15989 #if defined(_TARGET_ARM_)
15990                 // TODO-ARM64-NYI: HFA
15991                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15992                 // next ifdefs could be refactored in a single method with the ifdef inside.
15993                 if (IsHfa(retClsHnd))
15994                 {
15995 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15996 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15997                 ReturnTypeDesc retTypeDesc;
15998                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15999                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16000
16001                 if (retRegCount != 0)
16002                 {
16003                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16004                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16005                     // max allowed.)
16006                     assert(retRegCount == MAX_RET_REG_COUNT);
16007                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16008                     CLANG_FORMAT_COMMENT_ANCHOR;
16009 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16010
16011                     if (fgNeedReturnSpillTemp())
16012                     {
16013                         if (!impInlineInfo->retExpr)
16014                         {
16015 #if defined(_TARGET_ARM_)
16016                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16017 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16018                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16019                             impInlineInfo->retExpr =
16020                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16021 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16022                         }
16023                     }
16024                     else
16025                     {
16026                         impInlineInfo->retExpr = op2;
16027                     }
16028                 }
16029                 else
16030 #elif defined(_TARGET_ARM64_)
16031                 ReturnTypeDesc retTypeDesc;
16032                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16033                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16034
16035                 if (retRegCount != 0)
16036                 {
16037                     assert(!iciCall->HasRetBufArg());
16038                     assert(retRegCount >= 2);
16039                     if (fgNeedReturnSpillTemp())
16040                     {
16041                         if (!impInlineInfo->retExpr)
16042                         {
16043                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16044                             impInlineInfo->retExpr =
16045                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16046                         }
16047                     }
16048                     else
16049                     {
16050                         impInlineInfo->retExpr = op2;
16051                     }
16052                 }
16053                 else
16054 #endif // defined(_TARGET_ARM64_)
16055                 {
16056                     assert(iciCall->HasRetBufArg());
16057                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16058                     // spill temp only exists if there are multiple return points
16059                     if (fgNeedReturnSpillTemp())
16060                     {
16061                         // if this is the first return we have seen set the retExpr
16062                         if (!impInlineInfo->retExpr)
16063                         {
16064                             impInlineInfo->retExpr =
16065                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16066                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16067                         }
16068                     }
16069                     else
16070                     {
16071                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16072                     }
16073                 }
16074             }
16075         }
16076     }
16077
16078     if (compIsForInlining())
16079     {
16080         return true;
16081     }
16082
16083     if (info.compRetType == TYP_VOID)
16084     {
16085         // return void
16086         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16087     }
16088     else if (info.compRetBuffArg != BAD_VAR_NUM)
16089     {
16090         // Assign value to return buff (first param)
16091         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16092
16093         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16094         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16095
16096         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16097         CLANG_FORMAT_COMMENT_ANCHOR;
16098
16099 #if defined(_TARGET_AMD64_)
16100
16101         // x64 (System V and Win64) calling convention requires to
16102         // return the implicit return buffer explicitly (in RAX).
16103         // Change the return type to be BYREF.
16104         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16105 #else  // !defined(_TARGET_AMD64_)
16106         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16107         // In such case the return value of the function is changed to BYREF.
16108         // If profiler hook is not needed the return type of the function is TYP_VOID.
16109         if (compIsProfilerHookNeeded())
16110         {
16111             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16112         }
16113         else
16114         {
16115             // return void
16116             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16117         }
16118 #endif // !defined(_TARGET_AMD64_)
16119     }
16120     else if (varTypeIsStruct(info.compRetType))
16121     {
16122 #if !FEATURE_MULTIREG_RET
16123         // For both ARM architectures the HFA native types are maintained as structs.
16124         // Also on System V AMD64 the multireg structs returns are also left as structs.
16125         noway_assert(info.compRetNativeType != TYP_STRUCT);
16126 #endif
16127         op2 = impFixupStructReturnType(op2, retClsHnd);
16128         // return op2
16129         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16130     }
16131     else
16132     {
16133         // return op2
16134         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16135     }
16136
16137     // We must have imported a tailcall and jumped to RET
16138     if (prefixFlags & PREFIX_TAILCALL)
16139     {
16140 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16141         // Jit64 compat:
16142         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16143         //      tail.call
16144         //      pop
16145         //      ret
16146         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16147 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16148
16149         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16150
16151         // impImportCall() would have already appended TYP_VOID calls
16152         if (info.compRetType == TYP_VOID)
16153         {
16154             return true;
16155         }
16156     }
16157
16158     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16159 #ifdef DEBUG
16160     // Remember at which BC offset the tree was finished
16161     impNoteLastILoffs();
16162 #endif
16163     return true;
16164 }
16165
16166 /*****************************************************************************
16167  *  Mark the block as unimported.
16168  *  Note that the caller is responsible for calling impImportBlockPending(),
16169  *  with the appropriate stack-state
16170  */
16171
16172 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16173 {
16174 #ifdef DEBUG
16175     if (verbose && (block->bbFlags & BBF_IMPORTED))
16176     {
16177         printf("\nBB%02u will be reimported\n", block->bbNum);
16178     }
16179 #endif
16180
16181     block->bbFlags &= ~BBF_IMPORTED;
16182 }
16183
16184 /*****************************************************************************
16185  *  Mark the successors of the given block as unimported.
16186  *  Note that the caller is responsible for calling impImportBlockPending()
16187  *  for all the successors, with the appropriate stack-state.
16188  */
16189
16190 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16191 {
16192     const unsigned numSuccs = block->NumSucc();
16193     for (unsigned i = 0; i < numSuccs; i++)
16194     {
16195         impReimportMarkBlock(block->GetSucc(i));
16196     }
16197 }
16198
16199 /*****************************************************************************
16200  *
16201  *  Filter wrapper to handle only passed in exception code
16202  *  from it).
16203  */
16204
16205 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16206 {
16207     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16208     {
16209         return EXCEPTION_EXECUTE_HANDLER;
16210     }
16211
16212     return EXCEPTION_CONTINUE_SEARCH;
16213 }
16214
16215 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16216 {
16217     assert(block->hasTryIndex());
16218     assert(!compIsForInlining());
16219
16220     unsigned  tryIndex = block->getTryIndex();
16221     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16222
16223     if (isTryStart)
16224     {
16225         assert(block->bbFlags & BBF_TRY_BEG);
16226
16227         // The Stack must be empty
16228         //
16229         if (block->bbStkDepth != 0)
16230         {
16231             BADCODE("Evaluation stack must be empty on entry into a try block");
16232         }
16233     }
16234
16235     // Save the stack contents, we'll need to restore it later
16236     //
16237     SavedStack blockState;
16238     impSaveStackState(&blockState, false);
16239
16240     while (HBtab != nullptr)
16241     {
16242         if (isTryStart)
16243         {
16244             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16245             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16246             //
16247             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16248             {
16249                 // We  trigger an invalid program exception here unless we have a try/fault region.
16250                 //
16251                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16252                 {
16253                     BADCODE(
16254                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16255                 }
16256                 else
16257                 {
16258                     // Allow a try/fault region to proceed.
16259                     assert(HBtab->HasFaultHandler());
16260                 }
16261             }
16262
16263             /* Recursively process the handler block */
16264             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16265
16266             //  Construct the proper verification stack state
16267             //   either empty or one that contains just
16268             //   the Exception Object that we are dealing with
16269             //
16270             verCurrentState.esStackDepth = 0;
16271
16272             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16273             {
16274                 CORINFO_CLASS_HANDLE clsHnd;
16275
16276                 if (HBtab->HasFilter())
16277                 {
16278                     clsHnd = impGetObjectClass();
16279                 }
16280                 else
16281                 {
16282                     CORINFO_RESOLVED_TOKEN resolvedToken;
16283
16284                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16285                     resolvedToken.tokenScope   = info.compScopeHnd;
16286                     resolvedToken.token        = HBtab->ebdTyp;
16287                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16288                     info.compCompHnd->resolveToken(&resolvedToken);
16289
16290                     clsHnd = resolvedToken.hClass;
16291                 }
16292
16293                 // push catch arg the stack, spill to a temp if necessary
16294                 // Note: can update HBtab->ebdHndBeg!
16295                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16296             }
16297
16298             // Queue up the handler for importing
16299             //
16300             impImportBlockPending(hndBegBB);
16301
16302             if (HBtab->HasFilter())
16303             {
16304                 /* @VERIFICATION : Ideally the end of filter state should get
16305                    propagated to the catch handler, this is an incompleteness,
16306                    but is not a security/compliance issue, since the only
16307                    interesting state is the 'thisInit' state.
16308                    */
16309
16310                 verCurrentState.esStackDepth = 0;
16311
16312                 BasicBlock* filterBB = HBtab->ebdFilter;
16313
16314                 // push catch arg the stack, spill to a temp if necessary
16315                 // Note: can update HBtab->ebdFilter!
16316                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16317                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16318
16319                 impImportBlockPending(filterBB);
16320             }
16321         }
16322         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16323         {
16324             /* Recursively process the handler block */
16325
16326             verCurrentState.esStackDepth = 0;
16327
16328             // Queue up the fault handler for importing
16329             //
16330             impImportBlockPending(HBtab->ebdHndBeg);
16331         }
16332
16333         // Now process our enclosing try index (if any)
16334         //
16335         tryIndex = HBtab->ebdEnclosingTryIndex;
16336         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16337         {
16338             HBtab = nullptr;
16339         }
16340         else
16341         {
16342             HBtab = ehGetDsc(tryIndex);
16343         }
16344     }
16345
16346     // Restore the stack contents
16347     impRestoreStackState(&blockState);
16348 }
16349
16350 //***************************************************************
16351 // Import the instructions for the given basic block.  Perform
16352 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16353 // time, or whose verification pre-state is changed.
16354
16355 #ifdef _PREFAST_
16356 #pragma warning(push)
16357 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16358 #endif
16359 void Compiler::impImportBlock(BasicBlock* block)
16360 {
16361     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16362     // handle them specially. In particular, there is no IL to import for them, but we do need
16363     // to mark them as imported and put their successors on the pending import list.
16364     if (block->bbFlags & BBF_INTERNAL)
16365     {
16366         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16367         block->bbFlags |= BBF_IMPORTED;
16368
16369         const unsigned numSuccs = block->NumSucc();
16370         for (unsigned i = 0; i < numSuccs; i++)
16371         {
16372             impImportBlockPending(block->GetSucc(i));
16373         }
16374
16375         return;
16376     }
16377
16378     bool markImport;
16379
16380     assert(block);
16381
16382     /* Make the block globaly available */
16383
16384     compCurBB = block;
16385
16386 #ifdef DEBUG
16387     /* Initialize the debug variables */
16388     impCurOpcName = "unknown";
16389     impCurOpcOffs = block->bbCodeOffs;
16390 #endif
16391
16392     /* Set the current stack state to the merged result */
16393     verResetCurrentState(block, &verCurrentState);
16394
16395     /* Now walk the code and import the IL into GenTrees */
16396
16397     struct FilterVerificationExceptionsParam
16398     {
16399         Compiler*   pThis;
16400         BasicBlock* block;
16401     };
16402     FilterVerificationExceptionsParam param;
16403
16404     param.pThis = this;
16405     param.block = block;
16406
16407     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16408     {
16409         /* @VERIFICATION : For now, the only state propagation from try
16410            to it's handler is "thisInit" state (stack is empty at start of try).
16411            In general, for state that we track in verification, we need to
16412            model the possibility that an exception might happen at any IL
16413            instruction, so we really need to merge all states that obtain
16414            between IL instructions in a try block into the start states of
16415            all handlers.
16416
16417            However we do not allow the 'this' pointer to be uninitialized when
16418            entering most kinds try regions (only try/fault are allowed to have
16419            an uninitialized this pointer on entry to the try)
16420
16421            Fortunately, the stack is thrown away when an exception
16422            leads to a handler, so we don't have to worry about that.
16423            We DO, however, have to worry about the "thisInit" state.
16424            But only for the try/fault case.
16425
16426            The only allowed transition is from TIS_Uninit to TIS_Init.
16427
16428            So for a try/fault region for the fault handler block
16429            we will merge the start state of the try begin
16430            and the post-state of each block that is part of this try region
16431         */
16432
16433         // merge the start state of the try begin
16434         //
16435         if (pParam->block->bbFlags & BBF_TRY_BEG)
16436         {
16437             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16438         }
16439
16440         pParam->pThis->impImportBlockCode(pParam->block);
16441
16442         // As discussed above:
16443         // merge the post-state of each block that is part of this try region
16444         //
16445         if (pParam->block->hasTryIndex())
16446         {
16447             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16448         }
16449     }
16450     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16451     {
16452         verHandleVerificationFailure(block DEBUGARG(false));
16453     }
16454     PAL_ENDTRY
16455
16456     if (compDonotInline())
16457     {
16458         return;
16459     }
16460
16461     assert(!compDonotInline());
16462
16463     markImport = false;
16464
16465 SPILLSTACK:
16466
16467     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16468     bool        reimportSpillClique = false;
16469     BasicBlock* tgtBlock            = nullptr;
16470
16471     /* If the stack is non-empty, we might have to spill its contents */
16472
16473     if (verCurrentState.esStackDepth != 0)
16474     {
16475         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16476                                   // on the stack, its lifetime is hard to determine, simply
16477                                   // don't reuse such temps.
16478
16479         GenTree* addStmt = nullptr;
16480
16481         /* Do the successors of 'block' have any other predecessors ?
16482            We do not want to do some of the optimizations related to multiRef
16483            if we can reimport blocks */
16484
16485         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16486
16487         switch (block->bbJumpKind)
16488         {
16489             case BBJ_COND:
16490
16491                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16492
16493                 assert(impTreeLast);
16494                 assert(impTreeLast->gtOper == GT_STMT);
16495                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16496
16497                 addStmt     = impTreeLast;
16498                 impTreeLast = impTreeLast->gtPrev;
16499
16500                 /* Note if the next block has more than one ancestor */
16501
16502                 multRef |= block->bbNext->bbRefs;
16503
16504                 /* Does the next block have temps assigned? */
16505
16506                 baseTmp  = block->bbNext->bbStkTempsIn;
16507                 tgtBlock = block->bbNext;
16508
16509                 if (baseTmp != NO_BASE_TMP)
16510                 {
16511                     break;
16512                 }
16513
16514                 /* Try the target of the jump then */
16515
16516                 multRef |= block->bbJumpDest->bbRefs;
16517                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16518                 tgtBlock = block->bbJumpDest;
16519                 break;
16520
16521             case BBJ_ALWAYS:
16522                 multRef |= block->bbJumpDest->bbRefs;
16523                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16524                 tgtBlock = block->bbJumpDest;
16525                 break;
16526
16527             case BBJ_NONE:
16528                 multRef |= block->bbNext->bbRefs;
16529                 baseTmp  = block->bbNext->bbStkTempsIn;
16530                 tgtBlock = block->bbNext;
16531                 break;
16532
16533             case BBJ_SWITCH:
16534
16535                 BasicBlock** jmpTab;
16536                 unsigned     jmpCnt;
16537
16538                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16539
16540                 assert(impTreeLast);
16541                 assert(impTreeLast->gtOper == GT_STMT);
16542                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16543
16544                 addStmt     = impTreeLast;
16545                 impTreeLast = impTreeLast->gtPrev;
16546
16547                 jmpCnt = block->bbJumpSwt->bbsCount;
16548                 jmpTab = block->bbJumpSwt->bbsDstTab;
16549
16550                 do
16551                 {
16552                     tgtBlock = (*jmpTab);
16553
16554                     multRef |= tgtBlock->bbRefs;
16555
16556                     // Thanks to spill cliques, we should have assigned all or none
16557                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16558                     baseTmp = tgtBlock->bbStkTempsIn;
16559                     if (multRef > 1)
16560                     {
16561                         break;
16562                     }
16563                 } while (++jmpTab, --jmpCnt);
16564
16565                 break;
16566
16567             case BBJ_CALLFINALLY:
16568             case BBJ_EHCATCHRET:
16569             case BBJ_RETURN:
16570             case BBJ_EHFINALLYRET:
16571             case BBJ_EHFILTERRET:
16572             case BBJ_THROW:
16573                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16574                 break;
16575
16576             default:
16577                 noway_assert(!"Unexpected bbJumpKind");
16578                 break;
16579         }
16580
16581         assert(multRef >= 1);
16582
16583         /* Do we have a base temp number? */
16584
16585         bool newTemps = (baseTmp == NO_BASE_TMP);
16586
16587         if (newTemps)
16588         {
16589             /* Grab enough temps for the whole stack */
16590             baseTmp = impGetSpillTmpBase(block);
16591         }
16592
16593         /* Spill all stack entries into temps */
16594         unsigned level, tempNum;
16595
16596         JITDUMP("\nSpilling stack entries into temps\n");
16597         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16598         {
16599             GenTree* tree = verCurrentState.esStack[level].val;
16600
16601             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16602                the other. This should merge to a byref in unverifiable code.
16603                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16604                successor would be imported assuming there was a TYP_I_IMPL on
16605                the stack. Thus the value would not get GC-tracked. Hence,
16606                change the temp to TYP_BYREF and reimport the successors.
16607                Note: We should only allow this in unverifiable code.
16608             */
16609             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16610             {
16611                 lvaTable[tempNum].lvType = TYP_BYREF;
16612                 impReimportMarkSuccessors(block);
16613                 markImport = true;
16614             }
16615
16616 #ifdef _TARGET_64BIT_
16617             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16618             {
16619                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16620                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16621                 {
16622                     // Merge the current state into the entry state of block;
16623                     // the call to verMergeEntryStates must have changed
16624                     // the entry state of the block by merging the int local var
16625                     // and the native-int stack entry.
16626                     bool changed = false;
16627                     if (verMergeEntryStates(tgtBlock, &changed))
16628                     {
16629                         impRetypeEntryStateTemps(tgtBlock);
16630                         impReimportBlockPending(tgtBlock);
16631                         assert(changed);
16632                     }
16633                     else
16634                     {
16635                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16636                         break;
16637                     }
16638                 }
16639
16640                 // Some other block in the spill clique set this to "int", but now we have "native int".
16641                 // Change the type and go back to re-import any blocks that used the wrong type.
16642                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16643                 reimportSpillClique      = true;
16644             }
16645             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16646             {
16647                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16648                 // Insert a sign-extension to "native int" so we match the clique.
16649                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16650             }
16651
16652             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16653             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16654             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16655             // behavior instead of asserting and then generating bad code (where we save/restore the
16656             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16657             // imported already, we need to change the type of the local and reimport the spill clique.
16658             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16659             // the 'byref' size.
16660             if (!tiVerificationNeeded)
16661             {
16662                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16663                 {
16664                     // Some other block in the spill clique set this to "int", but now we have "byref".
16665                     // Change the type and go back to re-import any blocks that used the wrong type.
16666                     lvaTable[tempNum].lvType = TYP_BYREF;
16667                     reimportSpillClique      = true;
16668                 }
16669                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16670                 {
16671                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16672                     // Insert a sign-extension to "native int" so we match the clique size.
16673                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16674                 }
16675             }
16676 #endif // _TARGET_64BIT_
16677
16678 #if FEATURE_X87_DOUBLES
16679             // X87 stack doesn't differentiate between float/double
16680             // so promoting is no big deal.
16681             // For everybody else keep it as float until we have a collision and then promote
16682             // Just like for x64's TYP_INT<->TYP_I_IMPL
16683
16684             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16685             {
16686                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16687             }
16688
16689 #else // !FEATURE_X87_DOUBLES
16690
16691             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16692             {
16693                 // Some other block in the spill clique set this to "float", but now we have "double".
16694                 // Change the type and go back to re-import any blocks that used the wrong type.
16695                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16696                 reimportSpillClique      = true;
16697             }
16698             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16699             {
16700                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16701                 // Insert a cast to "double" so we match the clique.
16702                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
16703             }
16704
16705 #endif // FEATURE_X87_DOUBLES
16706
16707             /* If addStmt has a reference to tempNum (can only happen if we
16708                are spilling to the temps already used by a previous block),
16709                we need to spill addStmt */
16710
16711             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16712             {
16713                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16714
16715                 if (addTree->gtOper == GT_JTRUE)
16716                 {
16717                     GenTree* relOp = addTree->gtOp.gtOp1;
16718                     assert(relOp->OperIsCompare());
16719
16720                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16721
16722                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16723                     {
16724                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16725                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16726                         type              = genActualType(lvaTable[temp].TypeGet());
16727                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16728                     }
16729
16730                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16731                     {
16732                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16733                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16734                         type              = genActualType(lvaTable[temp].TypeGet());
16735                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16736                     }
16737                 }
16738                 else
16739                 {
16740                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16741
16742                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16743                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16744                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16745                 }
16746             }
16747
16748             /* Spill the stack entry, and replace with the temp */
16749
16750             if (!impSpillStackEntry(level, tempNum
16751 #ifdef DEBUG
16752                                     ,
16753                                     true, "Spill Stack Entry"
16754 #endif
16755                                     ))
16756             {
16757                 if (markImport)
16758                 {
16759                     BADCODE("bad stack state");
16760                 }
16761
16762                 // Oops. Something went wrong when spilling. Bad code.
16763                 verHandleVerificationFailure(block DEBUGARG(true));
16764
16765                 goto SPILLSTACK;
16766             }
16767         }
16768
16769         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16770
16771         if (addStmt)
16772         {
16773             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16774         }
16775     }
16776
16777     // Some of the append/spill logic works on compCurBB
16778
16779     assert(compCurBB == block);
16780
16781     /* Save the tree list in the block */
16782     impEndTreeList(block);
16783
16784     // impEndTreeList sets BBF_IMPORTED on the block
16785     // We do *NOT* want to set it later than this because
16786     // impReimportSpillClique might clear it if this block is both a
16787     // predecessor and successor in the current spill clique
16788     assert(block->bbFlags & BBF_IMPORTED);
16789
16790     // If we had a int/native int, or float/double collision, we need to re-import
16791     if (reimportSpillClique)
16792     {
16793         // This will re-import all the successors of block (as well as each of their predecessors)
16794         impReimportSpillClique(block);
16795
16796         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16797         const unsigned numSuccs = block->NumSucc();
16798         for (unsigned i = 0; i < numSuccs; i++)
16799         {
16800             BasicBlock* succ = block->GetSucc(i);
16801             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16802             {
16803                 impImportBlockPending(succ);
16804             }
16805         }
16806     }
16807     else // the normal case
16808     {
16809         // otherwise just import the successors of block
16810
16811         /* Does this block jump to any other blocks? */
16812         const unsigned numSuccs = block->NumSucc();
16813         for (unsigned i = 0; i < numSuccs; i++)
16814         {
16815             impImportBlockPending(block->GetSucc(i));
16816         }
16817     }
16818 }
16819 #ifdef _PREFAST_
16820 #pragma warning(pop)
16821 #endif
16822
16823 /*****************************************************************************/
16824 //
16825 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16826 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16827 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16828 // (its "pre-state").
16829
16830 void Compiler::impImportBlockPending(BasicBlock* block)
16831 {
16832 #ifdef DEBUG
16833     if (verbose)
16834     {
16835         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16836     }
16837 #endif
16838
16839     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16840     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16841     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16842
16843     // If the block has not been imported, add to pending set.
16844     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16845
16846     // Initialize bbEntryState just the first time we try to add this block to the pending list
16847     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16848     // We use NULL to indicate the 'common' state to avoid memory allocation
16849     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16850         (impGetPendingBlockMember(block) == 0))
16851     {
16852         verInitBBEntryState(block, &verCurrentState);
16853         assert(block->bbStkDepth == 0);
16854         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16855         assert(addToPending);
16856         assert(impGetPendingBlockMember(block) == 0);
16857     }
16858     else
16859     {
16860         // The stack should have the same height on entry to the block from all its predecessors.
16861         if (block->bbStkDepth != verCurrentState.esStackDepth)
16862         {
16863 #ifdef DEBUG
16864             char buffer[400];
16865             sprintf_s(buffer, sizeof(buffer),
16866                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16867                       "Previous depth was %d, current depth is %d",
16868                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16869                       verCurrentState.esStackDepth);
16870             buffer[400 - 1] = 0;
16871             NO_WAY(buffer);
16872 #else
16873             NO_WAY("Block entered with different stack depths");
16874 #endif
16875         }
16876
16877         // Additionally, if we need to verify, merge the verification state.
16878         if (tiVerificationNeeded)
16879         {
16880             // Merge the current state into the entry state of block; if this does not change the entry state
16881             // by merging, do not add the block to the pending-list.
16882             bool changed = false;
16883             if (!verMergeEntryStates(block, &changed))
16884             {
16885                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16886                 addToPending = true; // We will pop it off, and check the flag set above.
16887             }
16888             else if (changed)
16889             {
16890                 addToPending = true;
16891
16892                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16893             }
16894         }
16895
16896         if (!addToPending)
16897         {
16898             return;
16899         }
16900
16901         if (block->bbStkDepth > 0)
16902         {
16903             // We need to fix the types of any spill temps that might have changed:
16904             //   int->native int, float->double, int->byref, etc.
16905             impRetypeEntryStateTemps(block);
16906         }
16907
16908         // OK, we must add to the pending list, if it's not already in it.
16909         if (impGetPendingBlockMember(block) != 0)
16910         {
16911             return;
16912         }
16913     }
16914
16915     // Get an entry to add to the pending list
16916
16917     PendingDsc* dsc;
16918
16919     if (impPendingFree)
16920     {
16921         // We can reuse one of the freed up dscs.
16922         dsc            = impPendingFree;
16923         impPendingFree = dsc->pdNext;
16924     }
16925     else
16926     {
16927         // We have to create a new dsc
16928         dsc = new (this, CMK_Unknown) PendingDsc;
16929     }
16930
16931     dsc->pdBB                 = block;
16932     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16933     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16934
16935     // Save the stack trees for later
16936
16937     if (verCurrentState.esStackDepth)
16938     {
16939         impSaveStackState(&dsc->pdSavedStack, false);
16940     }
16941
16942     // Add the entry to the pending list
16943
16944     dsc->pdNext    = impPendingList;
16945     impPendingList = dsc;
16946     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16947
16948     // Various assertions require us to now to consider the block as not imported (at least for
16949     // the final time...)
16950     block->bbFlags &= ~BBF_IMPORTED;
16951
16952 #ifdef DEBUG
16953     if (verbose && 0)
16954     {
16955         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16956     }
16957 #endif
16958 }
16959
16960 /*****************************************************************************/
16961 //
16962 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16963 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16964 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16965
16966 void Compiler::impReimportBlockPending(BasicBlock* block)
16967 {
16968     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16969
16970     assert(block->bbFlags & BBF_IMPORTED);
16971
16972     // OK, we must add to the pending list, if it's not already in it.
16973     if (impGetPendingBlockMember(block) != 0)
16974     {
16975         return;
16976     }
16977
16978     // Get an entry to add to the pending list
16979
16980     PendingDsc* dsc;
16981
16982     if (impPendingFree)
16983     {
16984         // We can reuse one of the freed up dscs.
16985         dsc            = impPendingFree;
16986         impPendingFree = dsc->pdNext;
16987     }
16988     else
16989     {
16990         // We have to create a new dsc
16991         dsc = new (this, CMK_ImpStack) PendingDsc;
16992     }
16993
16994     dsc->pdBB = block;
16995
16996     if (block->bbEntryState)
16997     {
16998         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16999         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17000         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17001     }
17002     else
17003     {
17004         dsc->pdThisPtrInit        = TIS_Bottom;
17005         dsc->pdSavedStack.ssDepth = 0;
17006         dsc->pdSavedStack.ssTrees = nullptr;
17007     }
17008
17009     // Add the entry to the pending list
17010
17011     dsc->pdNext    = impPendingList;
17012     impPendingList = dsc;
17013     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17014
17015     // Various assertions require us to now to consider the block as not imported (at least for
17016     // the final time...)
17017     block->bbFlags &= ~BBF_IMPORTED;
17018
17019 #ifdef DEBUG
17020     if (verbose && 0)
17021     {
17022         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17023     }
17024 #endif
17025 }
17026
17027 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17028 {
17029     if (comp->impBlockListNodeFreeList == nullptr)
17030     {
17031         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17032     }
17033     else
17034     {
17035         BlockListNode* res             = comp->impBlockListNodeFreeList;
17036         comp->impBlockListNodeFreeList = res->m_next;
17037         return res;
17038     }
17039 }
17040
17041 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17042 {
17043     node->m_next             = impBlockListNodeFreeList;
17044     impBlockListNodeFreeList = node;
17045 }
17046
17047 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17048 {
17049     bool toDo = true;
17050
17051     noway_assert(!fgComputePredsDone);
17052     if (!fgCheapPredsValid)
17053     {
17054         fgComputeCheapPreds();
17055     }
17056
17057     BlockListNode* succCliqueToDo = nullptr;
17058     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17059     while (toDo)
17060     {
17061         toDo = false;
17062         // Look at the successors of every member of the predecessor to-do list.
17063         while (predCliqueToDo != nullptr)
17064         {
17065             BlockListNode* node = predCliqueToDo;
17066             predCliqueToDo      = node->m_next;
17067             BasicBlock* blk     = node->m_blk;
17068             FreeBlockListNode(node);
17069
17070             const unsigned numSuccs = blk->NumSucc();
17071             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17072             {
17073                 BasicBlock* succ = blk->GetSucc(succNum);
17074                 // If it's not already in the clique, add it, and also add it
17075                 // as a member of the successor "toDo" set.
17076                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17077                 {
17078                     callback->Visit(SpillCliqueSucc, succ);
17079                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17080                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17081                     toDo           = true;
17082                 }
17083             }
17084         }
17085         // Look at the predecessors of every member of the successor to-do list.
17086         while (succCliqueToDo != nullptr)
17087         {
17088             BlockListNode* node = succCliqueToDo;
17089             succCliqueToDo      = node->m_next;
17090             BasicBlock* blk     = node->m_blk;
17091             FreeBlockListNode(node);
17092
17093             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17094             {
17095                 BasicBlock* predBlock = pred->block;
17096                 // If it's not already in the clique, add it, and also add it
17097                 // as a member of the predecessor "toDo" set.
17098                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17099                 {
17100                     callback->Visit(SpillCliquePred, predBlock);
17101                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17102                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17103                     toDo           = true;
17104                 }
17105             }
17106         }
17107     }
17108
17109     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17110     // miss walking back to include the predecessor we started from.
17111     // This most likely cause: missing or out of date bbPreds
17112     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17113 }
17114
17115 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17116 {
17117     if (predOrSucc == SpillCliqueSucc)
17118     {
17119         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17120         blk->bbStkTempsIn = m_baseTmp;
17121     }
17122     else
17123     {
17124         assert(predOrSucc == SpillCliquePred);
17125         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17126         blk->bbStkTempsOut = m_baseTmp;
17127     }
17128 }
17129
17130 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17131 {
17132     // For Preds we could be a little smarter and just find the existing store
17133     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17134     // just re-import the whole block (just like we do for successors)
17135
17136     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17137     {
17138         // If we haven't imported this block and we're not going to (because it isn't on
17139         // the pending list) then just ignore it for now.
17140
17141         // This block has either never been imported (EntryState == NULL) or it failed
17142         // verification. Neither state requires us to force it to be imported now.
17143         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17144         return;
17145     }
17146
17147     // For successors we have a valid verCurrentState, so just mark them for reimport
17148     // the 'normal' way
17149     // Unlike predecessors, we *DO* need to reimport the current block because the
17150     // initial import had the wrong entry state types.
17151     // Similarly, blocks that are currently on the pending list, still need to call
17152     // impImportBlockPending to fixup their entry state.
17153     if (predOrSucc == SpillCliqueSucc)
17154     {
17155         m_pComp->impReimportMarkBlock(blk);
17156
17157         // Set the current stack state to that of the blk->bbEntryState
17158         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17159         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17160
17161         m_pComp->impImportBlockPending(blk);
17162     }
17163     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17164     {
17165         // As described above, we are only visiting predecessors so they can
17166         // add the appropriate casts, since we have already done that for the current
17167         // block, it does not need to be reimported.
17168         // Nor do we need to reimport blocks that are still pending, but not yet
17169         // imported.
17170         //
17171         // For predecessors, we have no state to seed the EntryState, so we just have
17172         // to assume the existing one is correct.
17173         // If the block is also a successor, it will get the EntryState properly
17174         // updated when it is visited as a successor in the above "if" block.
17175         assert(predOrSucc == SpillCliquePred);
17176         m_pComp->impReimportBlockPending(blk);
17177     }
17178 }
17179
17180 // Re-type the incoming lclVar nodes to match the varDsc.
17181 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17182 {
17183     if (blk->bbEntryState != nullptr)
17184     {
17185         EntryState* es = blk->bbEntryState;
17186         for (unsigned level = 0; level < es->esStackDepth; level++)
17187         {
17188             GenTree* tree = es->esStack[level].val;
17189             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17190             {
17191                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17192                 noway_assert(lclNum < lvaCount);
17193                 LclVarDsc* varDsc              = lvaTable + lclNum;
17194                 es->esStack[level].val->gtType = varDsc->TypeGet();
17195             }
17196         }
17197     }
17198 }
17199
17200 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17201 {
17202     if (block->bbStkTempsOut != NO_BASE_TMP)
17203     {
17204         return block->bbStkTempsOut;
17205     }
17206
17207 #ifdef DEBUG
17208     if (verbose)
17209     {
17210         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17211     }
17212 #endif // DEBUG
17213
17214     // Otherwise, choose one, and propagate to all members of the spill clique.
17215     // Grab enough temps for the whole stack.
17216     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17217     SetSpillTempsBase callback(baseTmp);
17218
17219     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17220     // to one spill clique, and similarly can only be the sucessor to one spill clique
17221     impWalkSpillCliqueFromPred(block, &callback);
17222
17223     return baseTmp;
17224 }
17225
17226 void Compiler::impReimportSpillClique(BasicBlock* block)
17227 {
17228 #ifdef DEBUG
17229     if (verbose)
17230     {
17231         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17232     }
17233 #endif // DEBUG
17234
17235     // If we get here, it is because this block is already part of a spill clique
17236     // and one predecessor had an outgoing live stack slot of type int, and this
17237     // block has an outgoing live stack slot of type native int.
17238     // We need to reset these before traversal because they have already been set
17239     // by the previous walk to determine all the members of the spill clique.
17240     impInlineRoot()->impSpillCliquePredMembers.Reset();
17241     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17242
17243     ReimportSpillClique callback(this);
17244
17245     impWalkSpillCliqueFromPred(block, &callback);
17246 }
17247
17248 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17249 // a copy of "srcState", cloning tree pointers as required.
17250 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17251 {
17252     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17253     {
17254         block->bbEntryState = nullptr;
17255         return;
17256     }
17257
17258     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17259
17260     // block->bbEntryState.esRefcount = 1;
17261
17262     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17263     block->bbEntryState->thisInitialized = TIS_Bottom;
17264
17265     if (srcState->esStackDepth > 0)
17266     {
17267         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17268         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17269
17270         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17271         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17272         {
17273             GenTree* tree                           = srcState->esStack[level].val;
17274             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17275         }
17276     }
17277
17278     if (verTrackObjCtorInitState)
17279     {
17280         verSetThisInit(block, srcState->thisInitialized);
17281     }
17282
17283     return;
17284 }
17285
17286 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17287 {
17288     assert(tis != TIS_Bottom); // Precondition.
17289     if (block->bbEntryState == nullptr)
17290     {
17291         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17292     }
17293
17294     block->bbEntryState->thisInitialized = tis;
17295 }
17296
17297 /*
17298  * Resets the current state to the state at the start of the basic block
17299  */
17300 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17301 {
17302
17303     if (block->bbEntryState == nullptr)
17304     {
17305         destState->esStackDepth    = 0;
17306         destState->thisInitialized = TIS_Bottom;
17307         return;
17308     }
17309
17310     destState->esStackDepth = block->bbEntryState->esStackDepth;
17311
17312     if (destState->esStackDepth > 0)
17313     {
17314         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17315
17316         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17317     }
17318
17319     destState->thisInitialized = block->bbThisOnEntry();
17320
17321     return;
17322 }
17323
17324 ThisInitState BasicBlock::bbThisOnEntry()
17325 {
17326     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17327 }
17328
17329 unsigned BasicBlock::bbStackDepthOnEntry()
17330 {
17331     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17332 }
17333
17334 void BasicBlock::bbSetStack(void* stackBuffer)
17335 {
17336     assert(bbEntryState);
17337     assert(stackBuffer);
17338     bbEntryState->esStack = (StackEntry*)stackBuffer;
17339 }
17340
17341 StackEntry* BasicBlock::bbStackOnEntry()
17342 {
17343     assert(bbEntryState);
17344     return bbEntryState->esStack;
17345 }
17346
17347 void Compiler::verInitCurrentState()
17348 {
17349     verTrackObjCtorInitState        = FALSE;
17350     verCurrentState.thisInitialized = TIS_Bottom;
17351
17352     if (tiVerificationNeeded)
17353     {
17354         // Track this ptr initialization
17355         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17356         {
17357             verTrackObjCtorInitState        = TRUE;
17358             verCurrentState.thisInitialized = TIS_Uninit;
17359         }
17360     }
17361
17362     // initialize stack info
17363
17364     verCurrentState.esStackDepth = 0;
17365     assert(verCurrentState.esStack != nullptr);
17366
17367     // copy current state to entry state of first BB
17368     verInitBBEntryState(fgFirstBB, &verCurrentState);
17369 }
17370
17371 Compiler* Compiler::impInlineRoot()
17372 {
17373     if (impInlineInfo == nullptr)
17374     {
17375         return this;
17376     }
17377     else
17378     {
17379         return impInlineInfo->InlineRoot;
17380     }
17381 }
17382
17383 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17384 {
17385     if (predOrSucc == SpillCliquePred)
17386     {
17387         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17388     }
17389     else
17390     {
17391         assert(predOrSucc == SpillCliqueSucc);
17392         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17393     }
17394 }
17395
17396 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17397 {
17398     if (predOrSucc == SpillCliquePred)
17399     {
17400         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17401     }
17402     else
17403     {
17404         assert(predOrSucc == SpillCliqueSucc);
17405         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17406     }
17407 }
17408
17409 /*****************************************************************************
17410  *
17411  *  Convert the instrs ("import") into our internal format (trees). The
17412  *  basic flowgraph has already been constructed and is passed in.
17413  */
17414
17415 void Compiler::impImport(BasicBlock* method)
17416 {
17417 #ifdef DEBUG
17418     if (verbose)
17419     {
17420         printf("*************** In impImport() for %s\n", info.compFullName);
17421     }
17422 #endif
17423
17424     /* Allocate the stack contents */
17425
17426     if (info.compMaxStack <= _countof(impSmallStack))
17427     {
17428         /* Use local variable, don't waste time allocating on the heap */
17429
17430         impStkSize              = _countof(impSmallStack);
17431         verCurrentState.esStack = impSmallStack;
17432     }
17433     else
17434     {
17435         impStkSize              = info.compMaxStack;
17436         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17437     }
17438
17439     // initialize the entry state at start of method
17440     verInitCurrentState();
17441
17442     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17443     Compiler* inlineRoot = impInlineRoot();
17444     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17445     {
17446         // We have initialized these previously, but to size 0.  Make them larger.
17447         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17448         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17449         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17450     }
17451     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17452     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17453     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17454     impBlockListNodeFreeList = nullptr;
17455
17456 #ifdef DEBUG
17457     impLastILoffsStmt   = nullptr;
17458     impNestedStackSpill = false;
17459 #endif
17460     impBoxTemp = BAD_VAR_NUM;
17461
17462     impPendingList = impPendingFree = nullptr;
17463
17464     /* Add the entry-point to the worker-list */
17465
17466     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17467     // from EH normalization.
17468     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17469     // out.
17470     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17471     {
17472         // Treat these as imported.
17473         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17474         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17475         method->bbFlags |= BBF_IMPORTED;
17476     }
17477
17478     impImportBlockPending(method);
17479
17480     /* Import blocks in the worker-list until there are no more */
17481
17482     while (impPendingList)
17483     {
17484         /* Remove the entry at the front of the list */
17485
17486         PendingDsc* dsc = impPendingList;
17487         impPendingList  = impPendingList->pdNext;
17488         impSetPendingBlockMember(dsc->pdBB, 0);
17489
17490         /* Restore the stack state */
17491
17492         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17493         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17494         if (verCurrentState.esStackDepth)
17495         {
17496             impRestoreStackState(&dsc->pdSavedStack);
17497         }
17498
17499         /* Add the entry to the free list for reuse */
17500
17501         dsc->pdNext    = impPendingFree;
17502         impPendingFree = dsc;
17503
17504         /* Now import the block */
17505
17506         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17507         {
17508
17509 #ifdef _TARGET_64BIT_
17510             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17511             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17512             // method for further explanation on why we raise this exception instead of making the jitted
17513             // code throw the verification exception during execution.
17514             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17515             {
17516                 BADCODE("Basic block marked as not verifiable");
17517             }
17518             else
17519 #endif // _TARGET_64BIT_
17520             {
17521                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17522                 impEndTreeList(dsc->pdBB);
17523             }
17524         }
17525         else
17526         {
17527             impImportBlock(dsc->pdBB);
17528
17529             if (compDonotInline())
17530             {
17531                 return;
17532             }
17533             if (compIsForImportOnly() && !tiVerificationNeeded)
17534             {
17535                 return;
17536             }
17537         }
17538     }
17539
17540 #ifdef DEBUG
17541     if (verbose && info.compXcptnsCount)
17542     {
17543         printf("\nAfter impImport() added block for try,catch,finally");
17544         fgDispBasicBlocks();
17545         printf("\n");
17546     }
17547
17548     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17549     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17550     {
17551         block->bbFlags &= ~BBF_VISITED;
17552     }
17553 #endif
17554
17555     assert(!compIsForInlining() || !tiVerificationNeeded);
17556 }
17557
17558 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17559 // The invariant here is that if it's not a ref or a method and has a class handle
17560 // it's a valuetype
17561 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17562 {
17563     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17564     {
17565         return true;
17566     }
17567     else
17568     {
17569         return false;
17570     }
17571 }
17572
17573 /*****************************************************************************
17574  *  Check to see if the tree is the address of a local or
17575     the address of a field in a local.
17576
17577     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17578
17579  */
17580
17581 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17582 {
17583     if (tree->gtOper != GT_ADDR)
17584     {
17585         return FALSE;
17586     }
17587
17588     GenTree* op = tree->gtOp.gtOp1;
17589     while (op->gtOper == GT_FIELD)
17590     {
17591         op = op->gtField.gtFldObj;
17592         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17593         {
17594             op = op->gtOp.gtOp1;
17595         }
17596         else
17597         {
17598             return false;
17599         }
17600     }
17601
17602     if (op->gtOper == GT_LCL_VAR)
17603     {
17604         *lclVarTreeOut = op;
17605         return TRUE;
17606     }
17607     else
17608     {
17609         return FALSE;
17610     }
17611 }
17612
17613 //------------------------------------------------------------------------
17614 // impMakeDiscretionaryInlineObservations: make observations that help
17615 // determine the profitability of a discretionary inline
17616 //
17617 // Arguments:
17618 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17619 //    inlineResult -- InlineResult accumulating information about this inline
17620 //
17621 // Notes:
17622 //    If inlining or prejitting the root, this method also makes
17623 //    various observations about the method that factor into inline
17624 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17625
17626 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17627 {
17628     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17629            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17630            );
17631
17632     // If we're really inlining, we should just have one result in play.
17633     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17634
17635     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17636     // to the trouble of estimating the native code size. Even if it did, it
17637     // shouldn't be relying on the result of this method.
17638     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17639
17640     // Note if the caller contains NEWOBJ or NEWARR.
17641     Compiler* rootCompiler = impInlineRoot();
17642
17643     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17644     {
17645         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17646     }
17647
17648     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17649     {
17650         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17651     }
17652
17653     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17654     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17655
17656     if (isSpecialMethod)
17657     {
17658         if (calleeIsStatic)
17659         {
17660             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17661         }
17662         else
17663         {
17664             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17665         }
17666     }
17667     else if (!calleeIsStatic)
17668     {
17669         // Callee is an instance method.
17670         //
17671         // Check if the callee has the same 'this' as the root.
17672         if (pInlineInfo != nullptr)
17673         {
17674             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17675             assert(thisArg);
17676             bool isSameThis = impIsThis(thisArg);
17677             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17678         }
17679     }
17680
17681     // Note if the callee's class is a promotable struct
17682     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17683     {
17684         lvaStructPromotionInfo structPromotionInfo;
17685         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17686         if (structPromotionInfo.canPromote)
17687         {
17688             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17689         }
17690     }
17691
17692 #ifdef FEATURE_SIMD
17693
17694     // Note if this method is has SIMD args or return value
17695     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17696     {
17697         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17698     }
17699
17700 #endif // FEATURE_SIMD
17701
17702     // Roughly classify callsite frequency.
17703     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17704
17705     // If this is a prejit root, or a maximally hot block...
17706     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17707     {
17708         frequency = InlineCallsiteFrequency::HOT;
17709     }
17710     // No training data.  Look for loop-like things.
17711     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17712     // However, give it to things nearby.
17713     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17714              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17715     {
17716         frequency = InlineCallsiteFrequency::LOOP;
17717     }
17718     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17719     {
17720         frequency = InlineCallsiteFrequency::WARM;
17721     }
17722     // Now modify the multiplier based on where we're called from.
17723     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17724     {
17725         frequency = InlineCallsiteFrequency::RARE;
17726     }
17727     else
17728     {
17729         frequency = InlineCallsiteFrequency::BORING;
17730     }
17731
17732     // Also capture the block weight of the call site.  In the prejit
17733     // root case, assume there's some hot call site for this method.
17734     unsigned weight = 0;
17735
17736     if (pInlineInfo != nullptr)
17737     {
17738         weight = pInlineInfo->iciBlock->bbWeight;
17739     }
17740     else
17741     {
17742         weight = BB_MAX_WEIGHT;
17743     }
17744
17745     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17746     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17747 }
17748
17749 /*****************************************************************************
17750  This method makes STATIC inlining decision based on the IL code.
17751  It should not make any inlining decision based on the context.
17752  If forceInline is true, then the inlining decision should not depend on
17753  performance heuristics (code size, etc.).
17754  */
17755
17756 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17757                               CORINFO_METHOD_INFO*  methInfo,
17758                               bool                  forceInline,
17759                               InlineResult*         inlineResult)
17760 {
17761     unsigned codeSize = methInfo->ILCodeSize;
17762
17763     // We shouldn't have made up our minds yet...
17764     assert(!inlineResult->IsDecided());
17765
17766     if (methInfo->EHcount)
17767     {
17768         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17769         return;
17770     }
17771
17772     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17773     {
17774         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17775         return;
17776     }
17777
17778     // For now we don't inline varargs (import code can't handle it)
17779
17780     if (methInfo->args.isVarArg())
17781     {
17782         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17783         return;
17784     }
17785
17786     // Reject if it has too many locals.
17787     // This is currently an implementation limit due to fixed-size arrays in the
17788     // inline info, rather than a performance heuristic.
17789
17790     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17791
17792     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17793     {
17794         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17795         return;
17796     }
17797
17798     // Make sure there aren't too many arguments.
17799     // This is currently an implementation limit due to fixed-size arrays in the
17800     // inline info, rather than a performance heuristic.
17801
17802     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17803
17804     if (methInfo->args.numArgs > MAX_INL_ARGS)
17805     {
17806         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17807         return;
17808     }
17809
17810     // Note force inline state
17811
17812     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17813
17814     // Note IL code size
17815
17816     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17817
17818     if (inlineResult->IsFailure())
17819     {
17820         return;
17821     }
17822
17823     // Make sure maxstack is not too big
17824
17825     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17826
17827     if (inlineResult->IsFailure())
17828     {
17829         return;
17830     }
17831 }
17832
17833 /*****************************************************************************
17834  */
17835
17836 void Compiler::impCheckCanInline(GenTree*               call,
17837                                  CORINFO_METHOD_HANDLE  fncHandle,
17838                                  unsigned               methAttr,
17839                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17840                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17841                                  InlineResult*          inlineResult)
17842 {
17843     // Either EE or JIT might throw exceptions below.
17844     // If that happens, just don't inline the method.
17845
17846     struct Param
17847     {
17848         Compiler*              pThis;
17849         GenTree*               call;
17850         CORINFO_METHOD_HANDLE  fncHandle;
17851         unsigned               methAttr;
17852         CORINFO_CONTEXT_HANDLE exactContextHnd;
17853         InlineResult*          result;
17854         InlineCandidateInfo**  ppInlineCandidateInfo;
17855     } param;
17856     memset(&param, 0, sizeof(param));
17857
17858     param.pThis                 = this;
17859     param.call                  = call;
17860     param.fncHandle             = fncHandle;
17861     param.methAttr              = methAttr;
17862     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17863     param.result                = inlineResult;
17864     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17865
17866     bool success = eeRunWithErrorTrap<Param>(
17867         [](Param* pParam) {
17868             DWORD                  dwRestrictions = 0;
17869             CorInfoInitClassResult initClassResult;
17870
17871 #ifdef DEBUG
17872             const char* methodName;
17873             const char* className;
17874             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17875
17876             if (JitConfig.JitNoInline())
17877             {
17878                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17879                 goto _exit;
17880             }
17881 #endif
17882
17883             /* Try to get the code address/size for the method */
17884
17885             CORINFO_METHOD_INFO methInfo;
17886             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17887             {
17888                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17889                 goto _exit;
17890             }
17891
17892             bool forceInline;
17893             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17894
17895             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17896
17897             if (pParam->result->IsFailure())
17898             {
17899                 assert(pParam->result->IsNever());
17900                 goto _exit;
17901             }
17902
17903             // Speculatively check if initClass() can be done.
17904             // If it can be done, we will try to inline the method. If inlining
17905             // succeeds, then we will do the non-speculative initClass() and commit it.
17906             // If this speculative call to initClass() fails, there is no point
17907             // trying to inline this method.
17908             initClassResult =
17909                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17910                                                            pParam->exactContextHnd /* context */,
17911                                                            TRUE /* speculative */);
17912
17913             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17914             {
17915                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17916                 goto _exit;
17917             }
17918
17919             // Given the EE the final say in whether to inline or not.
17920             // This should be last since for verifiable code, this can be expensive
17921
17922             /* VM Inline check also ensures that the method is verifiable if needed */
17923             CorInfoInline vmResult;
17924             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17925                                                                   &dwRestrictions);
17926
17927             if (vmResult == INLINE_FAIL)
17928             {
17929                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17930             }
17931             else if (vmResult == INLINE_NEVER)
17932             {
17933                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17934             }
17935
17936             if (pParam->result->IsFailure())
17937             {
17938                 // Make sure not to report this one.  It was already reported by the VM.
17939                 pParam->result->SetReported();
17940                 goto _exit;
17941             }
17942
17943             // check for unsupported inlining restrictions
17944             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17945
17946             if (dwRestrictions & INLINE_SAME_THIS)
17947             {
17948                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17949                 assert(thisArg);
17950
17951                 if (!pParam->pThis->impIsThis(thisArg))
17952                 {
17953                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17954                     goto _exit;
17955                 }
17956             }
17957
17958             /* Get the method properties */
17959
17960             CORINFO_CLASS_HANDLE clsHandle;
17961             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17962             unsigned clsAttr;
17963             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17964
17965             /* Get the return type */
17966
17967             var_types fncRetType;
17968             fncRetType = pParam->call->TypeGet();
17969
17970 #ifdef DEBUG
17971             var_types fncRealRetType;
17972             fncRealRetType = JITtype2varType(methInfo.args.retType);
17973
17974             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17975                    // <BUGNUM> VSW 288602 </BUGNUM>
17976                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17977                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17978                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17979 #endif
17980
17981             //
17982             // Allocate an InlineCandidateInfo structure
17983             //
17984             InlineCandidateInfo* pInfo;
17985             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17986
17987             pInfo->dwRestrictions  = dwRestrictions;
17988             pInfo->methInfo        = methInfo;
17989             pInfo->methAttr        = pParam->methAttr;
17990             pInfo->clsHandle       = clsHandle;
17991             pInfo->clsAttr         = clsAttr;
17992             pInfo->fncRetType      = fncRetType;
17993             pInfo->exactContextHnd = pParam->exactContextHnd;
17994             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17995             pInfo->initClassResult = initClassResult;
17996
17997             *(pParam->ppInlineCandidateInfo) = pInfo;
17998
17999         _exit:;
18000         },
18001         &param);
18002     if (!success)
18003     {
18004         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18005     }
18006 }
18007
18008 //------------------------------------------------------------------------
18009 // impInlineRecordArgInfo: record information about an inline candidate argument
18010 //
18011 // Arguments:
18012 //   pInlineInfo - inline info for the inline candidate
18013 //   curArgVal - tree for the caller actual argument value
18014 //   argNum - logical index of this argument
18015 //   inlineResult - result of ongoing inline evaluation
18016 //
18017 // Notes:
18018 //
18019 //   Checks for various inline blocking conditions and makes notes in
18020 //   the inline info arg table about the properties of the actual. These
18021 //   properties are used later by impFetchArg to determine how best to
18022 //   pass the argument into the inlinee.
18023
18024 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18025                                       GenTree*      curArgVal,
18026                                       unsigned      argNum,
18027                                       InlineResult* inlineResult)
18028 {
18029     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18030
18031     if (curArgVal->gtOper == GT_MKREFANY)
18032     {
18033         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18034         return;
18035     }
18036
18037     inlCurArgInfo->argNode = curArgVal;
18038
18039     GenTree* lclVarTree;
18040     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18041     {
18042         inlCurArgInfo->argIsByRefToStructLocal = true;
18043 #ifdef FEATURE_SIMD
18044         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18045         {
18046             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18047         }
18048 #endif // FEATURE_SIMD
18049     }
18050
18051     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18052     {
18053         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18054         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18055     }
18056
18057     if (curArgVal->gtOper == GT_LCL_VAR)
18058     {
18059         inlCurArgInfo->argIsLclVar = true;
18060
18061         /* Remember the "original" argument number */
18062         curArgVal->gtLclVar.gtLclILoffs = argNum;
18063     }
18064
18065     if ((curArgVal->OperKind() & GTK_CONST) ||
18066         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18067     {
18068         inlCurArgInfo->argIsInvariant = true;
18069         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18070         {
18071             // Abort inlining at this call site
18072             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18073             return;
18074         }
18075     }
18076
18077     // If the arg is a local that is address-taken, we can't safely
18078     // directly substitute it into the inlinee.
18079     //
18080     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18081     // that has a stronger meaning: that the arg value can change in
18082     // the method body. Using that flag prevents type propagation,
18083     // which is safe in this case.
18084     //
18085     // Instead mark the arg as having a caller local ref.
18086     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18087     {
18088         inlCurArgInfo->argHasCallerLocalRef = true;
18089     }
18090
18091 #ifdef DEBUG
18092     if (verbose)
18093     {
18094         if (inlCurArgInfo->argIsThis)
18095         {
18096             printf("thisArg:");
18097         }
18098         else
18099         {
18100             printf("\nArgument #%u:", argNum);
18101         }
18102         if (inlCurArgInfo->argIsLclVar)
18103         {
18104             printf(" is a local var");
18105         }
18106         if (inlCurArgInfo->argIsInvariant)
18107         {
18108             printf(" is a constant");
18109         }
18110         if (inlCurArgInfo->argHasGlobRef)
18111         {
18112             printf(" has global refs");
18113         }
18114         if (inlCurArgInfo->argHasCallerLocalRef)
18115         {
18116             printf(" has caller local ref");
18117         }
18118         if (inlCurArgInfo->argHasSideEff)
18119         {
18120             printf(" has side effects");
18121         }
18122         if (inlCurArgInfo->argHasLdargaOp)
18123         {
18124             printf(" has ldarga effect");
18125         }
18126         if (inlCurArgInfo->argHasStargOp)
18127         {
18128             printf(" has starg effect");
18129         }
18130         if (inlCurArgInfo->argIsByRefToStructLocal)
18131         {
18132             printf(" is byref to a struct local");
18133         }
18134
18135         printf("\n");
18136         gtDispTree(curArgVal);
18137         printf("\n");
18138     }
18139 #endif
18140 }
18141
18142 //------------------------------------------------------------------------
18143 // impInlineInitVars: setup inline information for inlinee args and locals
18144 //
18145 // Arguments:
18146 //    pInlineInfo - inline info for the inline candidate
18147 //
18148 // Notes:
18149 //    This method primarily adds caller-supplied info to the inlArgInfo
18150 //    and sets up the lclVarInfo table.
18151 //
18152 //    For args, the inlArgInfo records properties of the actual argument
18153 //    including the tree node that produces the arg value. This node is
18154 //    usually the tree node present at the call, but may also differ in
18155 //    various ways:
18156 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18157 //      expr chain for the actual node. Note this will either be the original
18158 //      call (which will be a failed inline by this point), or the return
18159 //      expression from some set of inlines.
18160 //    - when argument type casting is needed the necessary casts are added
18161 //      around the argument node.
18162 //    - if an argment can be simplified by folding then the node here is the
18163 //      folded value.
18164 //
18165 //   The method may make observations that lead to marking this candidate as
18166 //   a failed inline. If this happens the initialization is abandoned immediately
18167 //   to try and reduce the jit time cost for a failed inline.
18168
18169 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18170 {
18171     assert(!compIsForInlining());
18172
18173     GenTree*             call         = pInlineInfo->iciCall;
18174     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18175     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18176     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18177     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18178     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18179
18180     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18181
18182     /* init the argument stuct */
18183
18184     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18185
18186     /* Get hold of the 'this' pointer and the argument list proper */
18187
18188     GenTree* thisArg = call->gtCall.gtCallObjp;
18189     GenTree* argList = call->gtCall.gtCallArgs;
18190     unsigned argCnt  = 0; // Count of the arguments
18191
18192     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18193
18194     if (thisArg)
18195     {
18196         inlArgInfo[0].argIsThis = true;
18197         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18198         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18199
18200         if (inlineResult->IsFailure())
18201         {
18202             return;
18203         }
18204
18205         /* Increment the argument count */
18206         argCnt++;
18207     }
18208
18209     /* Record some information about each of the arguments */
18210     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18211
18212 #if USER_ARGS_COME_LAST
18213     unsigned typeCtxtArg = thisArg ? 1 : 0;
18214 #else  // USER_ARGS_COME_LAST
18215     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18216 #endif // USER_ARGS_COME_LAST
18217
18218     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18219     {
18220         if (argTmp == argList && hasRetBuffArg)
18221         {
18222             continue;
18223         }
18224
18225         // Ignore the type context argument
18226         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18227         {
18228             pInlineInfo->typeContextArg = typeCtxtArg;
18229             typeCtxtArg                 = 0xFFFFFFFF;
18230             continue;
18231         }
18232
18233         assert(argTmp->gtOper == GT_LIST);
18234         GenTree* arg       = argTmp->gtOp.gtOp1;
18235         GenTree* actualArg = arg->gtRetExprVal();
18236         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18237
18238         if (inlineResult->IsFailure())
18239         {
18240             return;
18241         }
18242
18243         /* Increment the argument count */
18244         argCnt++;
18245     }
18246
18247     /* Make sure we got the arg number right */
18248     assert(argCnt == methInfo->args.totalILArgs());
18249
18250 #ifdef FEATURE_SIMD
18251     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18252 #endif // FEATURE_SIMD
18253
18254     /* We have typeless opcodes, get type information from the signature */
18255
18256     if (thisArg)
18257     {
18258         var_types sigType;
18259
18260         if (clsAttr & CORINFO_FLG_VALUECLASS)
18261         {
18262             sigType = TYP_BYREF;
18263         }
18264         else
18265         {
18266             sigType = TYP_REF;
18267         }
18268
18269         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18270         lclVarInfo[0].lclHasLdlocaOp = false;
18271
18272 #ifdef FEATURE_SIMD
18273         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18274         // the inlining multiplier) for anything in that assembly.
18275         // But we only need to normalize it if it is a TYP_STRUCT
18276         // (which we need to do even if we have already set foundSIMDType).
18277         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18278         {
18279             if (sigType == TYP_STRUCT)
18280             {
18281                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18282             }
18283             foundSIMDType = true;
18284         }
18285 #endif // FEATURE_SIMD
18286         lclVarInfo[0].lclTypeInfo = sigType;
18287
18288         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18289                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18290                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18291
18292         if (genActualType(thisArg->gtType) != genActualType(sigType))
18293         {
18294             if (sigType == TYP_REF)
18295             {
18296                 /* The argument cannot be bashed into a ref (see bug 750871) */
18297                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18298                 return;
18299             }
18300
18301             /* This can only happen with byrefs <-> ints/shorts */
18302
18303             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18304             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18305
18306             if (sigType == TYP_BYREF)
18307             {
18308                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18309             }
18310             else if (thisArg->gtType == TYP_BYREF)
18311             {
18312                 assert(sigType == TYP_I_IMPL);
18313
18314                 /* If possible change the BYREF to an int */
18315                 if (thisArg->IsVarAddr())
18316                 {
18317                     thisArg->gtType              = TYP_I_IMPL;
18318                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18319                 }
18320                 else
18321                 {
18322                     /* Arguments 'int <- byref' cannot be bashed */
18323                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18324                     return;
18325                 }
18326             }
18327         }
18328     }
18329
18330     /* Init the types of the arguments and make sure the types
18331      * from the trees match the types in the signature */
18332
18333     CORINFO_ARG_LIST_HANDLE argLst;
18334     argLst = methInfo->args.args;
18335
18336     unsigned i;
18337     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18338     {
18339         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18340
18341         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18342
18343 #ifdef FEATURE_SIMD
18344         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18345         {
18346             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18347             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18348             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18349             foundSIMDType = true;
18350             if (sigType == TYP_STRUCT)
18351             {
18352                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18353                 sigType              = structType;
18354             }
18355         }
18356 #endif // FEATURE_SIMD
18357
18358         lclVarInfo[i].lclTypeInfo    = sigType;
18359         lclVarInfo[i].lclHasLdlocaOp = false;
18360
18361         /* Does the tree type match the signature type? */
18362
18363         GenTree* inlArgNode = inlArgInfo[i].argNode;
18364
18365         if (sigType != inlArgNode->gtType)
18366         {
18367             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18368                but in bad IL cases with caller-callee signature mismatches we can see other types.
18369                Intentionally reject cases with mismatches so the jit is more flexible when
18370                encountering bad IL. */
18371
18372             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18373                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18374                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18375
18376             if (!isPlausibleTypeMatch)
18377             {
18378                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18379                 return;
18380             }
18381
18382             /* Is it a narrowing or widening cast?
18383              * Widening casts are ok since the value computed is already
18384              * normalized to an int (on the IL stack) */
18385
18386             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18387             {
18388                 if (sigType == TYP_BYREF)
18389                 {
18390                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18391                 }
18392                 else if (inlArgNode->gtType == TYP_BYREF)
18393                 {
18394                     assert(varTypeIsIntOrI(sigType));
18395
18396                     /* If possible bash the BYREF to an int */
18397                     if (inlArgNode->IsVarAddr())
18398                     {
18399                         inlArgNode->gtType           = TYP_I_IMPL;
18400                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18401                     }
18402                     else
18403                     {
18404                         /* Arguments 'int <- byref' cannot be changed */
18405                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18406                         return;
18407                     }
18408                 }
18409                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18410                 {
18411                     /* Narrowing cast */
18412
18413                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18414                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18415                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18416                     {
18417                         /* We don't need to insert a cast here as the variable
18418                            was assigned a normalized value of the right type */
18419
18420                         continue;
18421                     }
18422
18423                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18424
18425                     inlArgInfo[i].argIsLclVar = false;
18426
18427                     /* Try to fold the node in case we have constant arguments */
18428
18429                     if (inlArgInfo[i].argIsInvariant)
18430                     {
18431                         inlArgNode            = gtFoldExprConst(inlArgNode);
18432                         inlArgInfo[i].argNode = inlArgNode;
18433                         assert(inlArgNode->OperIsConst());
18434                     }
18435                 }
18436 #ifdef _TARGET_64BIT_
18437                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18438                 {
18439                     // This should only happen for int -> native int widening
18440                     inlArgNode = inlArgInfo[i].argNode =
18441                         gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18442
18443                     inlArgInfo[i].argIsLclVar = false;
18444
18445                     /* Try to fold the node in case we have constant arguments */
18446
18447                     if (inlArgInfo[i].argIsInvariant)
18448                     {
18449                         inlArgNode            = gtFoldExprConst(inlArgNode);
18450                         inlArgInfo[i].argNode = inlArgNode;
18451                         assert(inlArgNode->OperIsConst());
18452                     }
18453                 }
18454 #endif // _TARGET_64BIT_
18455             }
18456         }
18457     }
18458
18459     /* Init the types of the local variables */
18460
18461     CORINFO_ARG_LIST_HANDLE localsSig;
18462     localsSig = methInfo->locals.args;
18463
18464     for (i = 0; i < methInfo->locals.numArgs; i++)
18465     {
18466         bool      isPinned;
18467         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18468
18469         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18470         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18471         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18472
18473         if (varTypeIsGC(type))
18474         {
18475             pInlineInfo->numberOfGcRefLocals++;
18476         }
18477
18478         if (isPinned)
18479         {
18480             // Pinned locals may cause inlines to fail.
18481             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18482             if (inlineResult->IsFailure())
18483             {
18484                 return;
18485             }
18486         }
18487
18488         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18489
18490         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18491         // out on the inline.
18492         if (type == TYP_STRUCT)
18493         {
18494             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18495             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18496             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18497             {
18498                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18499                 if (inlineResult->IsFailure())
18500                 {
18501                     return;
18502                 }
18503
18504                 // Do further notification in the case where the call site is rare; some policies do
18505                 // not track the relative hotness of call sites for "always" inline cases.
18506                 if (pInlineInfo->iciBlock->isRunRarely())
18507                 {
18508                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18509                     if (inlineResult->IsFailure())
18510                     {
18511
18512                         return;
18513                     }
18514                 }
18515             }
18516         }
18517
18518         localsSig = info.compCompHnd->getArgNext(localsSig);
18519
18520 #ifdef FEATURE_SIMD
18521         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18522         {
18523             foundSIMDType = true;
18524             if (featureSIMD && type == TYP_STRUCT)
18525             {
18526                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18527                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18528             }
18529         }
18530 #endif // FEATURE_SIMD
18531     }
18532
18533 #ifdef FEATURE_SIMD
18534     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
18535     {
18536         foundSIMDType = true;
18537     }
18538     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18539 #endif // FEATURE_SIMD
18540 }
18541
18542 //------------------------------------------------------------------------
18543 // impInlineFetchLocal: get a local var that represents an inlinee local
18544 //
18545 // Arguments:
18546 //    lclNum -- number of the inlinee local
18547 //    reason -- debug string describing purpose of the local var
18548 //
18549 // Returns:
18550 //    Number of the local to use
18551 //
18552 // Notes:
18553 //    This method is invoked only for locals actually used in the
18554 //    inlinee body.
18555 //
18556 //    Allocates a new temp if necessary, and copies key properties
18557 //    over from the inlinee local var info.
18558
18559 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18560 {
18561     assert(compIsForInlining());
18562
18563     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18564
18565     if (tmpNum == BAD_VAR_NUM)
18566     {
18567         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18568         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18569
18570         // The lifetime of this local might span multiple BBs.
18571         // So it is a long lifetime local.
18572         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18573
18574         // Copy over key info
18575         lvaTable[tmpNum].lvType                 = lclTyp;
18576         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18577         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18578         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18579         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18580
18581         // Copy over class handle for ref types. Note this may be a
18582         // shared type -- someday perhaps we can get the exact
18583         // signature and pass in a more precise type.
18584         if (lclTyp == TYP_REF)
18585         {
18586             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18587         }
18588
18589         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18590         {
18591             if (varTypeIsStruct(lclTyp))
18592             {
18593                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18594             }
18595             else
18596             {
18597                 // This is a wrapped primitive.  Make sure the verstate knows that
18598                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18599             }
18600         }
18601
18602 #ifdef DEBUG
18603         // Sanity check that we're properly prepared for gc ref locals.
18604         if (varTypeIsGC(lclTyp))
18605         {
18606             // Since there are gc locals we should have seen them earlier
18607             // and if there was a return value, set up the spill temp.
18608             assert(impInlineInfo->HasGcRefLocals());
18609             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18610         }
18611         else
18612         {
18613             // Make sure all pinned locals count as gc refs.
18614             assert(!inlineeLocal.lclIsPinned);
18615         }
18616 #endif // DEBUG
18617     }
18618
18619     return tmpNum;
18620 }
18621
18622 //------------------------------------------------------------------------
18623 // impInlineFetchArg: return tree node for argument value in an inlinee
18624 //
18625 // Arguments:
18626 //    lclNum -- argument number in inlinee IL
18627 //    inlArgInfo -- argument info for inlinee
18628 //    lclVarInfo -- var info for inlinee
18629 //
18630 // Returns:
18631 //    Tree for the argument's value. Often an inlinee-scoped temp
18632 //    GT_LCL_VAR but can be other tree kinds, if the argument
18633 //    expression from the caller can be directly substituted into the
18634 //    inlinee body.
18635 //
18636 // Notes:
18637 //    Must be used only for arguments -- use impInlineFetchLocal for
18638 //    inlinee locals.
18639 //
18640 //    Direct substitution is performed when the formal argument cannot
18641 //    change value in the inlinee body (no starg or ldarga), and the
18642 //    actual argument expression's value cannot be changed if it is
18643 //    substituted it into the inlinee body.
18644 //
18645 //    Even if an inlinee-scoped temp is returned here, it may later be
18646 //    "bashed" to a caller-supplied tree when arguments are actually
18647 //    passed (see fgInlinePrependStatements). Bashing can happen if
18648 //    the argument ends up being single use and other conditions are
18649 //    met. So the contents of the tree returned here may not end up
18650 //    being the ones ultimately used for the argument.
18651 //
18652 //    This method will side effect inlArgInfo. It should only be called
18653 //    for actual uses of the argument in the inlinee.
18654
18655 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18656 {
18657     // Cache the relevant arg and lcl info for this argument.
18658     // We will modify argInfo but not lclVarInfo.
18659     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18660     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18661     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18662     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18663     GenTree*             op1              = nullptr;
18664
18665     if (argInfo.argIsInvariant && !argCanBeModified)
18666     {
18667         // Directly substitute constants or addresses of locals
18668         //
18669         // Clone the constant. Note that we cannot directly use
18670         // argNode in the trees even if !argInfo.argIsUsed as this
18671         // would introduce aliasing between inlArgInfo[].argNode and
18672         // impInlineExpr. Then gtFoldExpr() could change it, causing
18673         // further references to the argument working off of the
18674         // bashed copy.
18675         op1 = gtCloneExpr(argInfo.argNode);
18676         PREFIX_ASSUME(op1 != nullptr);
18677         argInfo.argTmpNum = BAD_VAR_NUM;
18678
18679         // We may need to retype to ensure we match the callee's view of the type.
18680         // Otherwise callee-pass throughs of arguments can create return type
18681         // mismatches that block inlining.
18682         //
18683         // Note argument type mismatches that prevent inlining should
18684         // have been caught in impInlineInitVars.
18685         if (op1->TypeGet() != lclTyp)
18686         {
18687             op1->gtType = genActualType(lclTyp);
18688         }
18689     }
18690     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18691     {
18692         // Directly substitute unaliased caller locals for args that cannot be modified
18693         //
18694         // Use the caller-supplied node if this is the first use.
18695         op1               = argInfo.argNode;
18696         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18697
18698         // Use an equivalent copy if this is the second or subsequent
18699         // use, or if we need to retype.
18700         //
18701         // Note argument type mismatches that prevent inlining should
18702         // have been caught in impInlineInitVars.
18703         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18704         {
18705             assert(op1->gtOper == GT_LCL_VAR);
18706             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18707
18708             var_types newTyp = lclTyp;
18709
18710             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18711             {
18712                 newTyp = genActualType(lclTyp);
18713             }
18714
18715             // Create a new lcl var node - remember the argument lclNum
18716             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18717         }
18718     }
18719     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18720     {
18721         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18722            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18723            This way we will increase the chance for this byref to be optimized away by
18724            a subsequent "dereference" operation.
18725
18726            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18727            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18728            For example, if the caller is:
18729                 ldloca.s   V_1  // V_1 is a local struct
18730                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18731            and the callee being inlined has:
18732                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18733                     ldarga.s   ptrToInts
18734                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18735            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18736            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18737         */
18738         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18739         op1 = gtCloneExpr(argInfo.argNode);
18740     }
18741     else
18742     {
18743         /* Argument is a complex expression - it must be evaluated into a temp */
18744
18745         if (argInfo.argHasTmp)
18746         {
18747             assert(argInfo.argIsUsed);
18748             assert(argInfo.argTmpNum < lvaCount);
18749
18750             /* Create a new lcl var node - remember the argument lclNum */
18751             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18752
18753             /* This is the second or later use of the this argument,
18754             so we have to use the temp (instead of the actual arg) */
18755             argInfo.argBashTmpNode = nullptr;
18756         }
18757         else
18758         {
18759             /* First time use */
18760             assert(!argInfo.argIsUsed);
18761
18762             /* Reserve a temp for the expression.
18763             * Use a large size node as we may change it later */
18764
18765             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18766
18767             lvaTable[tmpNum].lvType = lclTyp;
18768
18769             // For ref types, determine the type of the temp.
18770             if (lclTyp == TYP_REF)
18771             {
18772                 if (!argCanBeModified)
18773                 {
18774                     // If the arg can't be modified in the method
18775                     // body, use the type of the value, if
18776                     // known. Otherwise, use the declared type.
18777                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18778                 }
18779                 else
18780                 {
18781                     // Arg might be modified, use the declared type of
18782                     // the argument.
18783                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18784                 }
18785             }
18786
18787             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18788             if (argInfo.argHasLdargaOp)
18789             {
18790                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18791             }
18792
18793             if (lclInfo.lclVerTypeInfo.IsStruct())
18794             {
18795                 if (varTypeIsStruct(lclTyp))
18796                 {
18797                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18798                 }
18799                 else
18800                 {
18801                     // This is a wrapped primitive.  Make sure the verstate knows that
18802                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18803                 }
18804             }
18805
18806             argInfo.argHasTmp = true;
18807             argInfo.argTmpNum = tmpNum;
18808
18809             // If we require strict exception order, then arguments must
18810             // be evaluated in sequence before the body of the inlined method.
18811             // So we need to evaluate them to a temp.
18812             // Also, if arguments have global or local references, we need to
18813             // evaluate them to a temp before the inlined body as the
18814             // inlined body may be modifying the global ref.
18815             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18816             // if it is a struct, because it requires some additional handling.
18817
18818             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18819                 !argInfo.argHasCallerLocalRef)
18820             {
18821                 /* Get a *LARGE* LCL_VAR node */
18822                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18823
18824                 /* Record op1 as the very first use of this argument.
18825                 If there are no further uses of the arg, we may be
18826                 able to use the actual arg node instead of the temp.
18827                 If we do see any further uses, we will clear this. */
18828                 argInfo.argBashTmpNode = op1;
18829             }
18830             else
18831             {
18832                 /* Get a small LCL_VAR node */
18833                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18834                 /* No bashing of this argument */
18835                 argInfo.argBashTmpNode = nullptr;
18836             }
18837         }
18838     }
18839
18840     // Mark this argument as used.
18841     argInfo.argIsUsed = true;
18842
18843     return op1;
18844 }
18845
18846 /******************************************************************************
18847  Is this the original "this" argument to the call being inlined?
18848
18849  Note that we do not inline methods with "starg 0", and so we do not need to
18850  worry about it.
18851 */
18852
18853 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18854 {
18855     assert(compIsForInlining());
18856     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18857 }
18858
18859 //-----------------------------------------------------------------------------
18860 // This function checks if a dereference in the inlinee can guarantee that
18861 // the "this" is non-NULL.
18862 // If we haven't hit a branch or a side effect, and we are dereferencing
18863 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18864 // then we can avoid a separate null pointer check.
18865 //
18866 // "additionalTreesToBeEvaluatedBefore"
18867 // is the set of pending trees that have not yet been added to the statement list,
18868 // and which have been removed from verCurrentState.esStack[]
18869
18870 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
18871                                                                   GenTree*    variableBeingDereferenced,
18872                                                                   InlArgInfo* inlArgInfo)
18873 {
18874     assert(compIsForInlining());
18875     assert(opts.OptEnabled(CLFLG_INLINING));
18876
18877     BasicBlock* block = compCurBB;
18878
18879     GenTree* stmt;
18880     GenTree* expr;
18881
18882     if (block != fgFirstBB)
18883     {
18884         return FALSE;
18885     }
18886
18887     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18888     {
18889         return FALSE;
18890     }
18891
18892     if (additionalTreesToBeEvaluatedBefore &&
18893         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18894     {
18895         return FALSE;
18896     }
18897
18898     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18899     {
18900         expr = stmt->gtStmt.gtStmtExpr;
18901
18902         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18903         {
18904             return FALSE;
18905         }
18906     }
18907
18908     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18909     {
18910         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18911         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18912         {
18913             return FALSE;
18914         }
18915     }
18916
18917     return TRUE;
18918 }
18919
18920 //------------------------------------------------------------------------
18921 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18922 //
18923 // Arguments:
18924 //    callNode -- call under scrutiny
18925 //    exactContextHnd -- context handle for inlining
18926 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18927 //    callInfo -- call info from VM
18928 //
18929 // Notes:
18930 //    If callNode is an inline candidate, this method sets the flag
18931 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18932 //    filled in the associated InlineCandidateInfo.
18933 //
18934 //    If callNode is not an inline candidate, and the reason is
18935 //    something that is inherent to the method being called, the
18936 //    method may be marked as "noinline" to short-circuit any
18937 //    future assessments of calls to this method.
18938
18939 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
18940                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18941                                       bool                   exactContextNeedsRuntimeLookup,
18942                                       CORINFO_CALL_INFO*     callInfo)
18943 {
18944     // Let the strategy know there's another call
18945     impInlineRoot()->m_inlineStrategy->NoteCall();
18946
18947     if (!opts.OptEnabled(CLFLG_INLINING))
18948     {
18949         /* XXX Mon 8/18/2008
18950          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18951          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18952          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18953          * figure out why we did not set MAXOPT for this compile.
18954          */
18955         assert(!compIsForInlining());
18956         return;
18957     }
18958
18959     if (compIsForImportOnly())
18960     {
18961         // Don't bother creating the inline candidate during verification.
18962         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18963         // that leads to the creation of multiple instances of Compiler.
18964         return;
18965     }
18966
18967     GenTreeCall* call = callNode->AsCall();
18968     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18969
18970     // Don't inline if not optimizing root method
18971     if (opts.compDbgCode)
18972     {
18973         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18974         return;
18975     }
18976
18977     // Don't inline if inlining into root method is disabled.
18978     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18979     {
18980         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18981         return;
18982     }
18983
18984     // Inlining candidate determination needs to honor only IL tail prefix.
18985     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18986     if (call->IsTailPrefixedCall())
18987     {
18988         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18989         return;
18990     }
18991
18992     // Tail recursion elimination takes precedence over inlining.
18993     // TODO: We may want to do some of the additional checks from fgMorphCall
18994     // here to reduce the chance we don't inline a call that won't be optimized
18995     // as a fast tail call or turned into a loop.
18996     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18997     {
18998         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18999         return;
19000     }
19001
19002     if (call->IsVirtual())
19003     {
19004         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19005         return;
19006     }
19007
19008     /* Ignore helper calls */
19009
19010     if (call->gtCallType == CT_HELPER)
19011     {
19012         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19013         return;
19014     }
19015
19016     /* Ignore indirect calls */
19017     if (call->gtCallType == CT_INDIRECT)
19018     {
19019         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19020         return;
19021     }
19022
19023     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19024      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19025      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19026
19027     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19028     unsigned              methAttr;
19029
19030     // Reuse method flags from the original callInfo if possible
19031     if (fncHandle == callInfo->hMethod)
19032     {
19033         methAttr = callInfo->methodFlags;
19034     }
19035     else
19036     {
19037         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19038     }
19039
19040 #ifdef DEBUG
19041     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19042     {
19043         methAttr |= CORINFO_FLG_FORCEINLINE;
19044     }
19045 #endif
19046
19047     // Check for COMPlus_AggressiveInlining
19048     if (compDoAggressiveInlining)
19049     {
19050         methAttr |= CORINFO_FLG_FORCEINLINE;
19051     }
19052
19053     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19054     {
19055         /* Don't bother inline blocks that are in the filter region */
19056         if (bbInCatchHandlerILRange(compCurBB))
19057         {
19058 #ifdef DEBUG
19059             if (verbose)
19060             {
19061                 printf("\nWill not inline blocks that are in the catch handler region\n");
19062             }
19063
19064 #endif
19065
19066             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19067             return;
19068         }
19069
19070         if (bbInFilterILRange(compCurBB))
19071         {
19072 #ifdef DEBUG
19073             if (verbose)
19074             {
19075                 printf("\nWill not inline blocks that are in the filter region\n");
19076             }
19077 #endif
19078
19079             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19080             return;
19081         }
19082     }
19083
19084     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19085
19086     if (opts.compNeedSecurityCheck)
19087     {
19088         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19089         return;
19090     }
19091
19092     /* Check if we tried to inline this method before */
19093
19094     if (methAttr & CORINFO_FLG_DONT_INLINE)
19095     {
19096         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19097         return;
19098     }
19099
19100     /* Cannot inline synchronized methods */
19101
19102     if (methAttr & CORINFO_FLG_SYNCH)
19103     {
19104         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19105         return;
19106     }
19107
19108     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19109
19110     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19111     {
19112         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19113         return;
19114     }
19115
19116     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19117     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19118
19119     if (inlineResult.IsFailure())
19120     {
19121         return;
19122     }
19123
19124     // The old value should be NULL
19125     assert(call->gtInlineCandidateInfo == nullptr);
19126
19127     // The new value should not be NULL.
19128     assert(inlineCandidateInfo != nullptr);
19129     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19130
19131     call->gtInlineCandidateInfo = inlineCandidateInfo;
19132
19133     // Mark the call node as inline candidate.
19134     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19135
19136     // Let the strategy know there's another candidate.
19137     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19138
19139     // Since we're not actually inlining yet, and this call site is
19140     // still just an inline candidate, there's nothing to report.
19141     inlineResult.SetReported();
19142 }
19143
19144 /******************************************************************************/
19145 // Returns true if the given intrinsic will be implemented by target-specific
19146 // instructions
19147
19148 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19149 {
19150 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19151     switch (intrinsicId)
19152     {
19153         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19154         // instructions to directly compute round/ceiling/floor.
19155         //
19156         // TODO: Because the x86 backend only targets SSE for floating-point code,
19157         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19158         //       implemented those intrinsics as x87 instructions). If this poses
19159         //       a CQ problem, it may be necessary to change the implementation of
19160         //       the helper calls to decrease call overhead or switch back to the
19161         //       x87 instructions. This is tracked by #7097.
19162         case CORINFO_INTRINSIC_Sqrt:
19163         case CORINFO_INTRINSIC_Abs:
19164             return true;
19165
19166         case CORINFO_INTRINSIC_Round:
19167         case CORINFO_INTRINSIC_Ceiling:
19168         case CORINFO_INTRINSIC_Floor:
19169             return compSupports(InstructionSet_SSE41);
19170
19171         default:
19172             return false;
19173     }
19174 #elif defined(_TARGET_ARM64_)
19175     switch (intrinsicId)
19176     {
19177         case CORINFO_INTRINSIC_Sqrt:
19178         case CORINFO_INTRINSIC_Abs:
19179         case CORINFO_INTRINSIC_Round:
19180         case CORINFO_INTRINSIC_Floor:
19181         case CORINFO_INTRINSIC_Ceiling:
19182             return true;
19183
19184         default:
19185             return false;
19186     }
19187 #elif defined(_TARGET_ARM_)
19188     switch (intrinsicId)
19189     {
19190         case CORINFO_INTRINSIC_Sqrt:
19191         case CORINFO_INTRINSIC_Abs:
19192         case CORINFO_INTRINSIC_Round:
19193             return true;
19194
19195         default:
19196             return false;
19197     }
19198 #elif defined(_TARGET_X86_)
19199     switch (intrinsicId)
19200     {
19201         case CORINFO_INTRINSIC_Sin:
19202         case CORINFO_INTRINSIC_Cos:
19203         case CORINFO_INTRINSIC_Sqrt:
19204         case CORINFO_INTRINSIC_Abs:
19205         case CORINFO_INTRINSIC_Round:
19206             return true;
19207
19208         default:
19209             return false;
19210     }
19211 #else
19212     // TODO: This portion of logic is not implemented for other arch.
19213     // The reason for returning true is that on all other arch the only intrinsic
19214     // enabled are target intrinsics.
19215     return true;
19216 #endif //_TARGET_AMD64_
19217 }
19218
19219 /******************************************************************************/
19220 // Returns true if the given intrinsic will be implemented by calling System.Math
19221 // methods.
19222
19223 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19224 {
19225     // Currently, if a math intrinsic is not implemented by target-specific
19226     // instructions, it will be implemented by a System.Math call. In the
19227     // future, if we turn to implementing some of them with helper calls,
19228     // this predicate needs to be revisited.
19229     return !IsTargetIntrinsic(intrinsicId);
19230 }
19231
19232 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19233 {
19234     switch (intrinsicId)
19235     {
19236         case CORINFO_INTRINSIC_Sin:
19237         case CORINFO_INTRINSIC_Cbrt:
19238         case CORINFO_INTRINSIC_Sqrt:
19239         case CORINFO_INTRINSIC_Abs:
19240         case CORINFO_INTRINSIC_Cos:
19241         case CORINFO_INTRINSIC_Round:
19242         case CORINFO_INTRINSIC_Cosh:
19243         case CORINFO_INTRINSIC_Sinh:
19244         case CORINFO_INTRINSIC_Tan:
19245         case CORINFO_INTRINSIC_Tanh:
19246         case CORINFO_INTRINSIC_Asin:
19247         case CORINFO_INTRINSIC_Asinh:
19248         case CORINFO_INTRINSIC_Acos:
19249         case CORINFO_INTRINSIC_Acosh:
19250         case CORINFO_INTRINSIC_Atan:
19251         case CORINFO_INTRINSIC_Atan2:
19252         case CORINFO_INTRINSIC_Atanh:
19253         case CORINFO_INTRINSIC_Log10:
19254         case CORINFO_INTRINSIC_Pow:
19255         case CORINFO_INTRINSIC_Exp:
19256         case CORINFO_INTRINSIC_Ceiling:
19257         case CORINFO_INTRINSIC_Floor:
19258             return true;
19259         default:
19260             return false;
19261     }
19262 }
19263
19264 bool Compiler::IsMathIntrinsic(GenTree* tree)
19265 {
19266     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19267 }
19268
19269 //------------------------------------------------------------------------
19270 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19271 //   normal call
19272 //
19273 // Arguments:
19274 //     call -- the call node to examine/modify
19275 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19276 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19277 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19278 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19279 //
19280 // Notes:
19281 //     Virtual calls in IL will always "invoke" the base class method.
19282 //
19283 //     This transformation looks for evidence that the type of 'this'
19284 //     in the call is exactly known, is a final class or would invoke
19285 //     a final method, and if that and other safety checks pan out,
19286 //     modifies the call and the call info to create a direct call.
19287 //
19288 //     This transformation is initially done in the importer and not
19289 //     in some subsequent optimization pass because we want it to be
19290 //     upstream of inline candidate identification.
19291 //
19292 //     However, later phases may supply improved type information that
19293 //     can enable further devirtualization. We currently reinvoke this
19294 //     code after inlining, if the return value of the inlined call is
19295 //     the 'this obj' of a subsequent virtual call.
19296 //
19297 //     If devirtualization succeeds and the call's this object is the
19298 //     result of a box, the jit will ask the EE for the unboxed entry
19299 //     point. If this exists, the jit will see if it can rework the box
19300 //     to instead make a local copy. If that is doable, the call is
19301 //     updated to invoke the unboxed entry on the local copy.
19302 //
19303 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19304                                    CORINFO_METHOD_HANDLE*  method,
19305                                    unsigned*               methodFlags,
19306                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19307                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19308 {
19309     assert(call != nullptr);
19310     assert(method != nullptr);
19311     assert(methodFlags != nullptr);
19312     assert(contextHandle != nullptr);
19313
19314     // This should be a virtual vtable or virtual stub call.
19315     assert(call->IsVirtual());
19316
19317     // Bail if not optimizing
19318     if (opts.MinOpts())
19319     {
19320         return;
19321     }
19322
19323     // Bail if debuggable codegen
19324     if (opts.compDbgCode)
19325     {
19326         return;
19327     }
19328
19329 #if defined(DEBUG)
19330     // Bail if devirt is disabled.
19331     if (JitConfig.JitEnableDevirtualization() == 0)
19332     {
19333         return;
19334     }
19335
19336     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19337 #endif // DEBUG
19338
19339     // Fetch information about the virtual method we're calling.
19340     CORINFO_METHOD_HANDLE baseMethod        = *method;
19341     unsigned              baseMethodAttribs = *methodFlags;
19342
19343     if (baseMethodAttribs == 0)
19344     {
19345         // For late devirt we may not have method attributes, so fetch them.
19346         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19347     }
19348     else
19349     {
19350 #if defined(DEBUG)
19351         // Validate that callInfo has up to date method flags
19352         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19353
19354         // All the base method attributes should agree, save that
19355         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19356         // because of concurrent jitting activity.
19357         //
19358         // Note we don't look at this particular flag bit below, and
19359         // later on (if we do try and inline) we will rediscover why
19360         // the method can't be inlined, so there's no danger here in
19361         // seeing this particular flag bit in different states between
19362         // the cached and fresh values.
19363         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19364         {
19365             assert(!"mismatched method attributes");
19366         }
19367 #endif // DEBUG
19368     }
19369
19370     // In R2R mode, we might see virtual stub calls to
19371     // non-virtuals. For instance cases where the non-virtual method
19372     // is in a different assembly but is called via CALLVIRT. For
19373     // verison resilience we must allow for the fact that the method
19374     // might become virtual in some update.
19375     //
19376     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19377     // regular call+nullcheck upstream, so we won't reach this
19378     // point.
19379     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19380     {
19381         assert(call->IsVirtualStub());
19382         assert(opts.IsReadyToRun());
19383         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19384         return;
19385     }
19386
19387     // See what we know about the type of 'this' in the call.
19388     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19389     GenTree*             actualThisObj = nullptr;
19390     bool                 isExact       = false;
19391     bool                 objIsNonNull  = false;
19392     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19393
19394     // See if we have special knowlege that can get us a type or a better type.
19395     if ((objClass == nullptr) || !isExact)
19396     {
19397         actualThisObj = thisObj;
19398
19399         // Walk back through any return expression placeholders
19400         while (actualThisObj->OperGet() == GT_RET_EXPR)
19401         {
19402             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19403         }
19404
19405         // See if we landed on a call to a special intrinsic method
19406         if (actualThisObj->IsCall())
19407         {
19408             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19409             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19410             {
19411                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19412                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19413                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19414                 if (specialObjClass != nullptr)
19415                 {
19416                     objClass     = specialObjClass;
19417                     isExact      = true;
19418                     objIsNonNull = true;
19419                 }
19420             }
19421         }
19422     }
19423
19424     // Bail if we know nothing.
19425     if (objClass == nullptr)
19426     {
19427         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19428         return;
19429     }
19430
19431     // Fetch information about the class that introduced the virtual method.
19432     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19433     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19434
19435 #if !defined(FEATURE_CORECLR)
19436     // If base class is not beforefieldinit then devirtualizing may
19437     // cause us to miss a base class init trigger. Spec says we don't
19438     // need a trigger for ref class callvirts but desktop seems to
19439     // have one anyways. So defer.
19440     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19441     {
19442         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19443         return;
19444     }
19445 #endif // FEATURE_CORECLR
19446
19447     // Is the call an interface call?
19448     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19449
19450     // If the objClass is sealed (final), then we may be able to devirtualize.
19451     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19452     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19453
19454 #if defined(DEBUG)
19455     const char* callKind       = isInterface ? "interface" : "virtual";
19456     const char* objClassNote   = "[?]";
19457     const char* objClassName   = "?objClass";
19458     const char* baseClassName  = "?baseClass";
19459     const char* baseMethodName = "?baseMethod";
19460
19461     if (verbose || doPrint)
19462     {
19463         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19464         objClassName   = info.compCompHnd->getClassName(objClass);
19465         baseClassName  = info.compCompHnd->getClassName(baseClass);
19466         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19467
19468         if (verbose)
19469         {
19470             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19471                    "    class for 'this' is %s%s (attrib %08x)\n"
19472                    "    base method is %s::%s\n",
19473                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19474         }
19475     }
19476 #endif // defined(DEBUG)
19477
19478     // Bail if obj class is an interface.
19479     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19480     //   IL_021d:  ldloc.0
19481     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19482     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19483     {
19484         JITDUMP("--- obj class is interface, sorry\n");
19485         return;
19486     }
19487
19488     if (isInterface)
19489     {
19490         assert(call->IsVirtualStub());
19491         JITDUMP("--- base class is interface\n");
19492     }
19493
19494     // Fetch the method that would be called based on the declared type of 'this'
19495     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19496     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19497
19498     // If we failed to get a handle, we can't devirtualize.  This can
19499     // happen when prejitting, if the devirtualization crosses
19500     // servicing bubble boundaries.
19501     if (derivedMethod == nullptr)
19502     {
19503         JITDUMP("--- no derived method, sorry\n");
19504         return;
19505     }
19506
19507     // Fetch method attributes to see if method is marked final.
19508     DWORD      derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19509     const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19510
19511 #if defined(DEBUG)
19512     const char* derivedClassName  = "?derivedClass";
19513     const char* derivedMethodName = "?derivedMethod";
19514
19515     const char* note = "speculative";
19516     if (isExact)
19517     {
19518         note = "exact";
19519     }
19520     else if (objClassIsFinal)
19521     {
19522         note = "final class";
19523     }
19524     else if (derivedMethodIsFinal)
19525     {
19526         note = "final method";
19527     }
19528
19529     if (verbose || doPrint)
19530     {
19531         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19532         if (verbose)
19533         {
19534             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19535             gtDispTree(call);
19536         }
19537     }
19538 #endif // defined(DEBUG)
19539
19540     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19541     {
19542         // Type is not exact, and neither class or method is final.
19543         //
19544         // We could speculatively devirtualize, but there's no
19545         // reason to believe the derived method is the one that
19546         // is likely to be invoked.
19547         //
19548         // If there's currently no further overriding (that is, at
19549         // the time of jitting, objClass has no subclasses that
19550         // override this method), then perhaps we'd be willing to
19551         // make a bet...?
19552         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19553         return;
19554     }
19555
19556     // For interface calls we must have an exact type or final class.
19557     if (isInterface && !isExact && !objClassIsFinal)
19558     {
19559         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19560         return;
19561     }
19562
19563     JITDUMP("    %s; can devirtualize\n", note);
19564
19565     // Make the updates.
19566     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19567     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19568     call->gtCallMethHnd = derivedMethod;
19569     call->gtCallType    = CT_USER_FUNC;
19570
19571     // Virtual calls include an implicit null check, which we may
19572     // now need to make explicit.
19573     if (!objIsNonNull)
19574     {
19575         call->gtFlags |= GTF_CALL_NULLCHECK;
19576     }
19577
19578     // Clear the inline candidate info (may be non-null since
19579     // it's a union field used for other things by virtual
19580     // stubs)
19581     call->gtInlineCandidateInfo = nullptr;
19582
19583 #if defined(DEBUG)
19584     if (verbose)
19585     {
19586         printf("... after devirt...\n");
19587         gtDispTree(call);
19588     }
19589
19590     if (doPrint)
19591     {
19592         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19593                baseMethodName, derivedClassName, derivedMethodName, note);
19594     }
19595 #endif // defined(DEBUG)
19596
19597     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19598     if (thisObj->IsBoxedValue())
19599     {
19600         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19601
19602         // Note for some shared methods the unboxed entry point requires an extra parameter.
19603         bool                  requiresInstMethodTableArg = false;
19604         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19605             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19606
19607         if (unboxedEntryMethod != nullptr)
19608         {
19609             // Since the call is the only consumer of the box, we know the box can't escape
19610             // since it is being passed an interior pointer.
19611             //
19612             // So, revise the box to simply create a local copy, use the address of that copy
19613             // as the this pointer, and update the entry point to the unboxed entry.
19614             //
19615             // Ideally, we then inline the boxed method and and if it turns out not to modify
19616             // the copy, we can undo the copy too.
19617             if (requiresInstMethodTableArg)
19618             {
19619                 // Perform a trial box removal and ask for the type handle tree.
19620                 JITDUMP("Unboxed entry needs method table arg...\n");
19621                 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
19622
19623                 if (methodTableArg != nullptr)
19624                 {
19625                     // If that worked, turn the box into a copy to a local var
19626                     JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
19627                     GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19628
19629                     if (localCopyThis != nullptr)
19630                     {
19631                         // Pass the local var as this and the type handle as a new arg
19632                         JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
19633                         call->gtCallObjp = localCopyThis;
19634
19635                         // Prepend for R2L arg passing or empty L2R passing
19636                         if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
19637                         {
19638                             call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
19639                         }
19640                         // Append for non-empty L2R
19641                         else
19642                         {
19643                             GenTreeArgList* beforeArg = call->gtCallArgs;
19644                             while (beforeArg->Rest() != nullptr)
19645                             {
19646                                 beforeArg = beforeArg->Rest();
19647                             }
19648
19649                             beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
19650                         }
19651
19652                         call->gtCallMethHnd = unboxedEntryMethod;
19653                         derivedMethod       = unboxedEntryMethod;
19654
19655                         // Method attributes will differ because unboxed entry point is shared
19656                         const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
19657                         JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
19658                                 unboxedMethodAttribs);
19659                         derivedMethodAttribs = unboxedMethodAttribs;
19660                     }
19661                     else
19662                     {
19663                         JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
19664                     }
19665                 }
19666                 else
19667                 {
19668                     JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
19669                 }
19670             }
19671             else
19672             {
19673                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19674                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19675
19676                 if (localCopyThis != nullptr)
19677                 {
19678                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19679                     call->gtCallObjp    = localCopyThis;
19680                     call->gtCallMethHnd = unboxedEntryMethod;
19681                     derivedMethod       = unboxedEntryMethod;
19682                 }
19683                 else
19684                 {
19685                     JITDUMP("Sorry, failed to undo the box\n");
19686                 }
19687             }
19688         }
19689         else
19690         {
19691             // Many of the low-level methods on value classes won't have unboxed entries,
19692             // as they need access to the type of the object.
19693             //
19694             // Note this may be a cue for us to stack allocate the boxed object, since
19695             // we probably know that these objects don't escape.
19696             JITDUMP("Sorry, failed to find unboxed entry point\n");
19697         }
19698     }
19699
19700     // Fetch the class that introduced the derived method.
19701     //
19702     // Note this may not equal objClass, if there is a
19703     // final method that objClass inherits.
19704     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19705
19706     // Need to update call info too. This is fragile
19707     // but hopefully the derived method conforms to
19708     // the base in most other ways.
19709     *method        = derivedMethod;
19710     *methodFlags   = derivedMethodAttribs;
19711     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19712
19713     // Update context handle.
19714     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19715     {
19716         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19717     }
19718
19719 #ifdef FEATURE_READYTORUN_COMPILER
19720     if (opts.IsReadyToRun())
19721     {
19722         // For R2R, getCallInfo triggers bookkeeping on the zap
19723         // side so we need to call it here.
19724         //
19725         // First, cons up a suitable resolved token.
19726         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19727
19728         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19729         derivedResolvedToken.tokenContext = *contextHandle;
19730         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19731         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19732         derivedResolvedToken.hClass       = derivedClass;
19733         derivedResolvedToken.hMethod      = derivedMethod;
19734
19735         // Look up the new call info.
19736         CORINFO_CALL_INFO derivedCallInfo;
19737         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19738
19739         // Update the call.
19740         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19741         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19742         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19743     }
19744 #endif // FEATURE_READYTORUN_COMPILER
19745 }
19746
19747 //------------------------------------------------------------------------
19748 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19749 //   to an intrinsic returns an exact type
19750 //
19751 // Arguments:
19752 //     methodHnd -- handle for the special intrinsic method
19753 //
19754 // Returns:
19755 //     Exact class handle returned by the intrinsic call, if known.
19756 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19757
19758 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19759 {
19760     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19761
19762     CORINFO_CLASS_HANDLE result = nullptr;
19763
19764     // See what intrinisc we have...
19765     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19766     switch (ni)
19767     {
19768         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19769         {
19770             // Expect one class generic parameter; figure out which it is.
19771             CORINFO_SIG_INFO sig;
19772             info.compCompHnd->getMethodSig(methodHnd, &sig);
19773             assert(sig.sigInst.classInstCount == 1);
19774             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19775             assert(typeHnd != nullptr);
19776
19777             // Lookup can incorrect when we have __Canon as it won't appear
19778             // to implement any interface types.
19779             //
19780             // And if we do not have a final type, devirt & inlining is
19781             // unlikely to result in much simplification.
19782             //
19783             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19784             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19785             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19786
19787             if (isFinalType)
19788             {
19789                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19790                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19791                         result != nullptr ? eeGetClassName(result) : "unknown");
19792             }
19793             else
19794             {
19795                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19796             }
19797
19798             break;
19799         }
19800
19801         default:
19802         {
19803             JITDUMP("This special intrinsic not handled, sorry...\n");
19804             break;
19805         }
19806     }
19807
19808     return result;
19809 }
19810
19811 //------------------------------------------------------------------------
19812 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19813 //
19814 // Arguments:
19815 //    token - init value for the allocated token.
19816 //
19817 // Return Value:
19818 //    pointer to token into jit-allocated memory.
19819 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19820 {
19821     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19822     *memory                        = token;
19823     return memory;
19824 }
19825
19826 //------------------------------------------------------------------------
19827 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19828 //
19829 class SpillRetExprHelper
19830 {
19831 public:
19832     SpillRetExprHelper(Compiler* comp) : comp(comp)
19833     {
19834     }
19835
19836     void StoreRetExprResultsInArgs(GenTreeCall* call)
19837     {
19838         GenTree* args = call->gtCallArgs;
19839         if (args != nullptr)
19840         {
19841             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19842         }
19843         GenTree* thisArg = call->gtCallObjp;
19844         if (thisArg != nullptr)
19845         {
19846             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19847         }
19848     }
19849
19850 private:
19851     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19852     {
19853         assert((pTree != nullptr) && (*pTree != nullptr));
19854         GenTree* tree = *pTree;
19855         if ((tree->gtFlags & GTF_CALL) == 0)
19856         {
19857             // Trees with ret_expr are marked as GTF_CALL.
19858             return Compiler::WALK_SKIP_SUBTREES;
19859         }
19860         if (tree->OperGet() == GT_RET_EXPR)
19861         {
19862             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19863             walker->StoreRetExprAsLocalVar(pTree);
19864         }
19865         return Compiler::WALK_CONTINUE;
19866     }
19867
19868     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19869     {
19870         GenTree* retExpr = *pRetExpr;
19871         assert(retExpr->OperGet() == GT_RET_EXPR);
19872         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19873         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19874         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19875         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19876     }
19877
19878 private:
19879     Compiler* comp;
19880 };
19881
19882 //------------------------------------------------------------------------
19883 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19884 //                         Spill ret_expr in the call node, because they can't be cloned.
19885 //
19886 // Arguments:
19887 //    call - fat calli candidate
19888 //
19889 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19890 {
19891     setMethodHasFatPointer();
19892     call->SetFatPointerCandidate();
19893     SpillRetExprHelper helper(this);
19894     helper.StoreRetExprResultsInArgs(call);
19895 }