Merge pull request #16217 from adamsitnik/eventPipeAssert
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (!varTypeIsSmall(lclTyp))
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTree* tree     = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTree* firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTree* tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTree* stmtPrev  = stmtBefore->gtPrev;
676     stmt->gtPrev       = stmtPrev;
677     stmt->gtNext       = stmtBefore;
678     stmtPrev->gtNext   = stmt;
679     stmtBefore->gtPrev = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTree* expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTree* expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTree*    val,
729                                 unsigned    curLevel,
730                                 GenTree**   pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTree* asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTree*             val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTree**            pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTree* asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTree*   temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855 #ifdef DEBUG
856             if (verbose)
857             {
858                 printf("Calling impNormStructVal on:\n");
859                 gtDispTree(temp);
860             }
861 #endif
862             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
863 #ifdef DEBUG
864             if (verbose)
865             {
866                 printf("resulting tree:\n");
867                 gtDispTree(temp);
868             }
869 #endif
870         }
871
872         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873         treeList = gtNewListNode(temp, treeList);
874     }
875
876     if (sig != nullptr)
877     {
878         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880         {
881             // Make sure that all valuetypes (including enums) that we push are loaded.
882             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883             // all valuetypes in the method signature are already loaded.
884             // We need to be able to find the size of the valuetypes, but we cannot
885             // do a class-load from within GC.
886             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
887         }
888
889         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890         CORINFO_CLASS_HANDLE    argClass;
891         CORINFO_CLASS_HANDLE    argRealClass;
892         GenTreeArgList*         args;
893
894         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895         {
896             PREFIX_ASSUME(args != nullptr);
897
898             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899
900             // insert implied casts (from float to double or double to float)
901
902             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903             {
904                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
905             }
906             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907             {
908                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
909             }
910
911             // insert any widening or narrowing casts for backwards compatibility
912
913             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914
915             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917             {
918                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920                 // primitive types.
921                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922                 // details).
923                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924                 {
925                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
926                 }
927
928                 // Make sure that all valuetypes (including enums) that we push are loaded.
929                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930                 // all valuetypes in the method signature are already loaded.
931                 // We need to be able to find the size of the valuetypes, but we cannot
932                 // do a class-load from within GC.
933                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
934             }
935
936             argLst = info.compCompHnd->getArgNext(argLst);
937         }
938     }
939
940     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941     {
942         // Prepend the prefixTree
943
944         // Simple in-place reversal to place treeList
945         // at the end of a reversed prefixTree
946         while (prefixTree != nullptr)
947         {
948             GenTreeArgList* next = prefixTree->Rest();
949             prefixTree->Rest()   = treeList;
950             treeList             = prefixTree;
951             prefixTree           = next;
952         }
953     }
954     return treeList;
955 }
956
957 /*****************************************************************************
958  *
959  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960  *  The first "skipReverseCount" items are not reversed.
961  */
962
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
964
965 {
966     assert(skipReverseCount <= count);
967
968     GenTreeArgList* list = impPopList(count, sig);
969
970     // reverse the list
971     if (list == nullptr || skipReverseCount == count)
972     {
973         return list;
974     }
975
976     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
977     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978
979     if (skipReverseCount == 0)
980     {
981         ptr = list;
982     }
983     else
984     {
985         lastSkipNode = list;
986         // Get to the first node that needs to be reversed
987         for (unsigned i = 0; i < skipReverseCount - 1; i++)
988         {
989             lastSkipNode = lastSkipNode->Rest();
990         }
991
992         PREFIX_ASSUME(lastSkipNode != nullptr);
993         ptr = lastSkipNode->Rest();
994     }
995
996     GenTreeArgList* reversedList = nullptr;
997
998     do
999     {
1000         GenTreeArgList* tmp = ptr->Rest();
1001         ptr->Rest()         = reversedList;
1002         reversedList        = ptr;
1003         ptr                 = tmp;
1004     } while (ptr != nullptr);
1005
1006     if (skipReverseCount)
1007     {
1008         lastSkipNode->Rest() = reversedList;
1009         return list;
1010     }
1011     else
1012     {
1013         return reversedList;
1014     }
1015 }
1016
1017 /*****************************************************************************
1018    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1019    class of type 'clsHnd'.  It returns the tree that should be appended to the
1020    statement list that represents the assignment.
1021    Temp assignments may be appended to impTreeList if spilling is necessary.
1022    curLevel is the stack level for which a spill may be being done.
1023  */
1024
1025 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1026                                    GenTree*             src,
1027                                    CORINFO_CLASS_HANDLE structHnd,
1028                                    unsigned             curLevel,
1029                                    GenTree**            pAfterStmt, /* = NULL */
1030                                    BasicBlock*          block       /* = NULL */
1031                                    )
1032 {
1033     assert(varTypeIsStruct(dest));
1034
1035     while (dest->gtOper == GT_COMMA)
1036     {
1037         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1038
1039         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1040         if (pAfterStmt)
1041         {
1042             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1043         }
1044         else
1045         {
1046             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1047         }
1048
1049         // set dest to the second thing
1050         dest = dest->gtOp.gtOp2;
1051     }
1052
1053     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1055
1056     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1058     {
1059         // Make this a NOP
1060         return gtNewNothingNode();
1061     }
1062
1063     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064     // or re-creating a Blk node if it is.
1065     GenTree* destAddr;
1066
1067     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1068     {
1069         destAddr = dest->gtOp.gtOp1;
1070     }
1071     else
1072     {
1073         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1074     }
1075
1076     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1077 }
1078
1079 /*****************************************************************************/
1080
1081 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1082                                       GenTree*             src,
1083                                       CORINFO_CLASS_HANDLE structHnd,
1084                                       unsigned             curLevel,
1085                                       GenTree**            pAfterStmt, /* = NULL */
1086                                       BasicBlock*          block       /* = NULL */
1087                                       )
1088 {
1089     var_types destType;
1090     GenTree*  dest      = nullptr;
1091     unsigned  destFlags = 0;
1092
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095     // TODO-ARM-BUG: Does ARM need this?
1096     // TODO-ARM64-BUG: Does ARM64 need this?
1097     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100            (src->TypeGet() != TYP_STRUCT &&
1101             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103     assert(varTypeIsStruct(src));
1104
1105     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107            src->gtOper == GT_COMMA ||
1108            (src->TypeGet() != TYP_STRUCT &&
1109             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111     if (destAddr->OperGet() == GT_ADDR)
1112     {
1113         GenTree* destNode = destAddr->gtGetOp1();
1114         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115         // will be morphed, don't insert an OBJ(ADDR).
1116         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1120                 )
1121         {
1122             dest = destNode;
1123         }
1124         destType = destNode->TypeGet();
1125     }
1126     else
1127     {
1128         destType = src->TypeGet();
1129     }
1130
1131     var_types asgType = src->TypeGet();
1132
1133     if (src->gtOper == GT_CALL)
1134     {
1135         if (src->AsCall()->TreatAsHasRetBufArg(this))
1136         {
1137             // Case of call returning a struct via hidden retbuf arg
1138
1139             // insert the return value buffer into the argument list as first byref parameter
1140             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1141
1142             // now returns void, not a struct
1143             src->gtType = TYP_VOID;
1144
1145             // return the morphed call node
1146             return src;
1147         }
1148         else
1149         {
1150             // Case of call returning a struct in one or more registers.
1151
1152             var_types returnType = (var_types)src->gtCall.gtReturnType;
1153
1154             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155             src->gtType = genActualType(returnType);
1156
1157             // First we try to change this to "LclVar/LclFld = call"
1158             //
1159             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1160             {
1161                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162                 // That is, the IR will be of the form lclVar = call for multi-reg return
1163                 //
1164                 GenTree* lcl = destAddr->gtOp.gtOp1;
1165                 if (src->AsCall()->HasMultiRegRetVal())
1166                 {
1167                     // Mark the struct LclVar as used in a MultiReg return context
1168                     //  which currently makes it non promotable.
1169                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170                     // handle multireg returns.
1171                     lcl->gtFlags |= GTF_DONT_CSE;
1172                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1173                 }
1174                 else // The call result is not a multireg return
1175                 {
1176                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177                     lcl->ChangeOper(GT_LCL_FLD);
1178                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179                     lcl->gtType = src->gtType;
1180                     asgType     = src->gtType;
1181                 }
1182
1183                 dest = lcl;
1184
1185 #if defined(_TARGET_ARM_)
1186                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187                 // but that method has not been updadted to include ARM.
1188                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189                 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1193
1194                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196                 // handle multireg returns.
1197                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198                 // non-multireg returns.
1199                 lcl->gtFlags |= GTF_DONT_CSE;
1200                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1201 #endif
1202             }
1203             else // we don't have a GT_ADDR of a GT_LCL_VAR
1204             {
1205                 // !!! The destination could be on stack. !!!
1206                 // This flag will let us choose the correct write barrier.
1207                 asgType   = returnType;
1208                 destFlags = GTF_IND_TGTANYWHERE;
1209             }
1210         }
1211     }
1212     else if (src->gtOper == GT_RET_EXPR)
1213     {
1214         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215         noway_assert(call->gtOper == GT_CALL);
1216
1217         if (call->HasRetBufArg())
1218         {
1219             // insert the return value buffer into the argument list as first byref parameter
1220             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1221
1222             // now returns void, not a struct
1223             src->gtType  = TYP_VOID;
1224             call->gtType = TYP_VOID;
1225
1226             // We already have appended the write to 'dest' GT_CALL's args
1227             // So now we just return an empty node (pruning the GT_RET_EXPR)
1228             return src;
1229         }
1230         else
1231         {
1232             // Case of inline method returning a struct in one or more registers.
1233             //
1234             var_types returnType = (var_types)call->gtReturnType;
1235
1236             // We won't need a return buffer
1237             asgType      = returnType;
1238             src->gtType  = genActualType(returnType);
1239             call->gtType = src->gtType;
1240
1241             // If we've changed the type, and it no longer matches a local destination,
1242             // we must use an indirection.
1243             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1244             {
1245                 dest = nullptr;
1246             }
1247
1248             // !!! The destination could be on stack. !!!
1249             // This flag will let us choose the correct write barrier.
1250             destFlags = GTF_IND_TGTANYWHERE;
1251         }
1252     }
1253     else if (src->OperIsBlk())
1254     {
1255         asgType = impNormStructType(structHnd);
1256         if (src->gtOper == GT_OBJ)
1257         {
1258             assert(src->gtObj.gtClass == structHnd);
1259         }
1260     }
1261     else if (src->gtOper == GT_INDEX)
1262     {
1263         asgType = impNormStructType(structHnd);
1264         assert(src->gtIndex.gtStructElemClass == structHnd);
1265     }
1266     else if (src->gtOper == GT_MKREFANY)
1267     {
1268         // Since we are assigning the result of a GT_MKREFANY,
1269         // "destAddr" must point to a refany.
1270
1271         GenTree* destAddrClone;
1272         destAddr =
1273             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1274
1275         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1281         GenTree* typeSlot =
1282             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1283
1284         // append the assign of the pointer value
1285         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1286         if (pAfterStmt)
1287         {
1288             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1289         }
1290         else
1291         {
1292             impAppendTree(asg, curLevel, impCurStmtOffs);
1293         }
1294
1295         // return the assign of the type value, to be appended
1296         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1297     }
1298     else if (src->gtOper == GT_COMMA)
1299     {
1300         // The second thing is the struct or its address.
1301         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1302         if (pAfterStmt)
1303         {
1304             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1305         }
1306         else
1307         {
1308             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1309         }
1310
1311         // Evaluate the second thing using recursion.
1312         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1313     }
1314     else if (src->IsLocal())
1315     {
1316         asgType = src->TypeGet();
1317     }
1318     else if (asgType == TYP_STRUCT)
1319     {
1320         asgType     = impNormStructType(structHnd);
1321         src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323         if (asgType == TYP_STRUCT)
1324         {
1325             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1327         }
1328 #endif
1329     }
1330     if (dest == nullptr)
1331     {
1332         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333         // if this is a known struct type.
1334         if (asgType == TYP_STRUCT)
1335         {
1336             dest = gtNewObjNode(structHnd, destAddr);
1337             gtSetObjGcInfo(dest->AsObj());
1338             // Although an obj as a call argument was always assumed to be a globRef
1339             // (which is itself overly conservative), that is not true of the operands
1340             // of a block assignment.
1341             dest->gtFlags &= ~GTF_GLOB_REF;
1342             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1343         }
1344         else if (varTypeIsStruct(asgType))
1345         {
1346             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1347         }
1348         else
1349         {
1350             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1351         }
1352     }
1353     else
1354     {
1355         dest->gtType = asgType;
1356     }
1357
1358     dest->gtFlags |= destFlags;
1359     destFlags = dest->gtFlags;
1360
1361     // return an assignment node, to be appended
1362     GenTree* asgNode = gtNewAssignNode(dest, src);
1363     gtBlockOpInit(asgNode, dest, src, false);
1364
1365     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1366     // of assignments.
1367     if ((destFlags & GTF_DONT_CSE) == 0)
1368     {
1369         dest->gtFlags &= ~(GTF_DONT_CSE);
1370     }
1371     return asgNode;
1372 }
1373
1374 /*****************************************************************************
1375    Given a struct value, and the class handle for that structure, return
1376    the expression for the address for that structure value.
1377
1378    willDeref - does the caller guarantee to dereference the pointer.
1379 */
1380
1381 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1382                                     CORINFO_CLASS_HANDLE structHnd,
1383                                     unsigned             curLevel,
1384                                     bool                 willDeref)
1385 {
1386     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1387
1388     var_types type = structVal->TypeGet();
1389
1390     genTreeOps oper = structVal->gtOper;
1391
1392     if (oper == GT_OBJ && willDeref)
1393     {
1394         assert(structVal->gtObj.gtClass == structHnd);
1395         return (structVal->gtObj.Addr());
1396     }
1397     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1398     {
1399         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1400
1401         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1402
1403         // The 'return value' is now the temp itself
1404
1405         type          = genActualType(lvaTable[tmpNum].TypeGet());
1406         GenTree* temp = gtNewLclvNode(tmpNum, type);
1407         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1408         return temp;
1409     }
1410     else if (oper == GT_COMMA)
1411     {
1412         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1413
1414         GenTree* oldTreeLast  = impTreeLast;
1415         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1416         structVal->gtType     = TYP_BYREF;
1417
1418         if (oldTreeLast != impTreeLast)
1419         {
1420             // Some temp assignment statement was placed on the statement list
1421             // for Op2, but that would be out of order with op1, so we need to
1422             // spill op1 onto the statement list after whatever was last
1423             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1424             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1425             structVal->gtOp.gtOp1 = gtNewNothingNode();
1426         }
1427
1428         return (structVal);
1429     }
1430
1431     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1432 }
1433
1434 //------------------------------------------------------------------------
1435 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1436 //                    and optionally determine the GC layout of the struct.
1437 //
1438 // Arguments:
1439 //    structHnd       - The class handle for the struct type of interest.
1440 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1441 //                      into which the gcLayout will be written.
1442 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1443 //                      which will be set to the number of GC fields in the struct.
1444 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1445 //                      type, set to the SIMD base type
1446 //
1447 // Return Value:
1448 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1449 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1450 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1451 //
1452 // Assumptions:
1453 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1454 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1455 //
1456 // Notes:
1457 //    Normalizing the type involves examining the struct type to determine if it should
1458 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1459 //    for full enregistration, e.g. TYP_SIMD16.
1460
1461 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1462                                       BYTE*                gcLayout,
1463                                       unsigned*            pNumGCVars,
1464                                       var_types*           pSimdBaseType)
1465 {
1466     assert(structHnd != NO_CLASS_HANDLE);
1467
1468     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1469     var_types   structType  = TYP_STRUCT;
1470
1471     // On coreclr the check for GC includes a "may" to account for the special
1472     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1473     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1474     // pointer.
1475     const bool mayContainGCPtrs =
1476         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1477
1478 #ifdef FEATURE_SIMD
1479     // Check to see if this is a SIMD type.
1480     if (featureSIMD && !mayContainGCPtrs)
1481     {
1482         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1483
1484         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1485         {
1486             unsigned int sizeBytes;
1487             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1488             if (simdBaseType != TYP_UNKNOWN)
1489             {
1490                 assert(sizeBytes == originalSize);
1491                 structType = getSIMDTypeForSize(sizeBytes);
1492                 if (pSimdBaseType != nullptr)
1493                 {
1494                     *pSimdBaseType = simdBaseType;
1495                 }
1496                 // Also indicate that we use floating point registers.
1497                 compFloatingPointUsed = true;
1498             }
1499         }
1500     }
1501 #endif // FEATURE_SIMD
1502
1503     // Fetch GC layout info if requested
1504     if (gcLayout != nullptr)
1505     {
1506         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1507
1508         // Verify that the quick test up above via the class attributes gave a
1509         // safe view of the type's GCness.
1510         //
1511         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1512         // does not report any gc fields.
1513
1514         assert(mayContainGCPtrs || (numGCVars == 0));
1515
1516         if (pNumGCVars != nullptr)
1517         {
1518             *pNumGCVars = numGCVars;
1519         }
1520     }
1521     else
1522     {
1523         // Can't safely ask for number of GC pointers without also
1524         // asking for layout.
1525         assert(pNumGCVars == nullptr);
1526     }
1527
1528     return structType;
1529 }
1530
1531 //****************************************************************************
1532 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1533 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1534 //
1535 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1536                                     CORINFO_CLASS_HANDLE structHnd,
1537                                     unsigned             curLevel,
1538                                     bool                 forceNormalization /*=false*/)
1539 {
1540     assert(forceNormalization || varTypeIsStruct(structVal));
1541     assert(structHnd != NO_CLASS_HANDLE);
1542     var_types structType = structVal->TypeGet();
1543     bool      makeTemp   = false;
1544     if (structType == TYP_STRUCT)
1545     {
1546         structType = impNormStructType(structHnd);
1547     }
1548     bool                 alreadyNormalized = false;
1549     GenTreeLclVarCommon* structLcl         = nullptr;
1550
1551     genTreeOps oper = structVal->OperGet();
1552     switch (oper)
1553     {
1554         // GT_RETURN and GT_MKREFANY don't capture the handle.
1555         case GT_RETURN:
1556             break;
1557         case GT_MKREFANY:
1558             alreadyNormalized = true;
1559             break;
1560
1561         case GT_CALL:
1562             structVal->gtCall.gtRetClsHnd = structHnd;
1563             makeTemp                      = true;
1564             break;
1565
1566         case GT_RET_EXPR:
1567             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1568             makeTemp                         = true;
1569             break;
1570
1571         case GT_ARGPLACE:
1572             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1573             break;
1574
1575         case GT_INDEX:
1576             // This will be transformed to an OBJ later.
1577             alreadyNormalized                    = true;
1578             structVal->gtIndex.gtStructElemClass = structHnd;
1579             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1580             break;
1581
1582         case GT_FIELD:
1583             // Wrap it in a GT_OBJ.
1584             structVal->gtType = structType;
1585             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1586             break;
1587
1588         case GT_LCL_VAR:
1589         case GT_LCL_FLD:
1590             structLcl = structVal->AsLclVarCommon();
1591             // Wrap it in a GT_OBJ.
1592             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1593             __fallthrough;
1594
1595         case GT_OBJ:
1596         case GT_BLK:
1597         case GT_DYN_BLK:
1598         case GT_ASG:
1599             // These should already have the appropriate type.
1600             assert(structVal->gtType == structType);
1601             alreadyNormalized = true;
1602             break;
1603
1604         case GT_IND:
1605             assert(structVal->gtType == structType);
1606             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1607             alreadyNormalized = true;
1608             break;
1609
1610 #ifdef FEATURE_SIMD
1611         case GT_SIMD:
1612             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1613             break;
1614 #endif // FEATURE_SIMD
1615 #ifdef FEATURE_HW_INTRINSICS
1616         case GT_HWIntrinsic:
1617             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1618             break;
1619 #endif
1620
1621         case GT_COMMA:
1622         {
1623             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1624             GenTree* blockNode = structVal->gtOp.gtOp2;
1625             assert(blockNode->gtType == structType);
1626
1627             // Is this GT_COMMA(op1, GT_COMMA())?
1628             GenTree* parent = structVal;
1629             if (blockNode->OperGet() == GT_COMMA)
1630             {
1631                 // Find the last node in the comma chain.
1632                 do
1633                 {
1634                     assert(blockNode->gtType == structType);
1635                     parent    = blockNode;
1636                     blockNode = blockNode->gtOp.gtOp2;
1637                 } while (blockNode->OperGet() == GT_COMMA);
1638             }
1639
1640             if (blockNode->OperGet() == GT_FIELD)
1641             {
1642                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1643                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1644             }
1645
1646 #ifdef FEATURE_SIMD
1647             if (blockNode->OperGet() == GT_SIMD)
1648             {
1649                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1650                 alreadyNormalized  = true;
1651             }
1652             else
1653 #endif
1654 #ifdef FEATURE_HW_INTRINSICS
1655                 if (blockNode->OperGet() == GT_HWIntrinsic && blockNode->AsHWIntrinsic()->isSIMD())
1656             {
1657                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1658                 alreadyNormalized  = true;
1659             }
1660             else
1661 #endif
1662             {
1663                 noway_assert(blockNode->OperIsBlk());
1664
1665                 // Sink the GT_COMMA below the blockNode addr.
1666                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1667                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1668                 //
1669                 // In case of a chained GT_COMMA case, we sink the last
1670                 // GT_COMMA below the blockNode addr.
1671                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1672                 assert(blockNodeAddr->gtType == TYP_BYREF);
1673                 GenTree* commaNode    = parent;
1674                 commaNode->gtType     = TYP_BYREF;
1675                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1676                 blockNode->gtOp.gtOp1 = commaNode;
1677                 if (parent == structVal)
1678                 {
1679                     structVal = blockNode;
1680                 }
1681                 alreadyNormalized = true;
1682             }
1683         }
1684         break;
1685
1686         default:
1687             noway_assert(!"Unexpected node in impNormStructVal()");
1688             break;
1689     }
1690     structVal->gtType  = structType;
1691     GenTree* structObj = structVal;
1692
1693     if (!alreadyNormalized || forceNormalization)
1694     {
1695         if (makeTemp)
1696         {
1697             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1698
1699             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1700
1701             // The structVal is now the temp itself
1702
1703             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1704             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1705             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1706         }
1707         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1708         {
1709             // Wrap it in a GT_OBJ
1710             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1711         }
1712     }
1713
1714     if (structLcl != nullptr)
1715     {
1716         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1717         // so we don't set GTF_EXCEPT here.
1718         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1719         {
1720             structObj->gtFlags &= ~GTF_GLOB_REF;
1721         }
1722     }
1723     else
1724     {
1725         // In general a OBJ is an indirection and could raise an exception.
1726         structObj->gtFlags |= GTF_EXCEPT;
1727     }
1728     return (structObj);
1729 }
1730
1731 /******************************************************************************/
1732 // Given a type token, generate code that will evaluate to the correct
1733 // handle representation of that token (type handle, field handle, or method handle)
1734 //
1735 // For most cases, the handle is determined at compile-time, and the code
1736 // generated is simply an embedded handle.
1737 //
1738 // Run-time lookup is required if the enclosing method is shared between instantiations
1739 // and the token refers to formal type parameters whose instantiation is not known
1740 // at compile-time.
1741 //
1742 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1743                                     BOOL*                   pRuntimeLookup /* = NULL */,
1744                                     BOOL                    mustRestoreHandle /* = FALSE */,
1745                                     BOOL                    importParent /* = FALSE */)
1746 {
1747     assert(!fgGlobalMorph);
1748
1749     CORINFO_GENERICHANDLE_RESULT embedInfo;
1750     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1751
1752     if (pRuntimeLookup)
1753     {
1754         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1755     }
1756
1757     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1758     {
1759         switch (embedInfo.handleType)
1760         {
1761             case CORINFO_HANDLETYPE_CLASS:
1762                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1763                 break;
1764
1765             case CORINFO_HANDLETYPE_METHOD:
1766                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1767                 break;
1768
1769             case CORINFO_HANDLETYPE_FIELD:
1770                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1771                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1772                 break;
1773
1774             default:
1775                 break;
1776         }
1777     }
1778
1779     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1780     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1781                                       embedInfo.compileTimeHandle);
1782
1783     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1784     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1785     {
1786         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1787     }
1788
1789     return result;
1790 }
1791
1792 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1793                                    CORINFO_LOOKUP*         pLookup,
1794                                    unsigned                handleFlags,
1795                                    void*                   compileTimeHandle)
1796 {
1797     if (!pLookup->lookupKind.needsRuntimeLookup)
1798     {
1799         // No runtime lookup is required.
1800         // Access is direct or memory-indirect (of a fixed address) reference
1801
1802         CORINFO_GENERIC_HANDLE handle       = nullptr;
1803         void*                  pIndirection = nullptr;
1804         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1805
1806         if (pLookup->constLookup.accessType == IAT_VALUE)
1807         {
1808             handle = pLookup->constLookup.handle;
1809         }
1810         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1811         {
1812             pIndirection = pLookup->constLookup.addr;
1813         }
1814         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1815     }
1816     else if (compIsForInlining())
1817     {
1818         // Don't import runtime lookups when inlining
1819         // Inlining has to be aborted in such a case
1820         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1821         return nullptr;
1822     }
1823     else
1824     {
1825         // Need to use dictionary-based access which depends on the typeContext
1826         // which is only available at runtime, not at compile-time.
1827
1828         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1829     }
1830 }
1831
1832 #ifdef FEATURE_READYTORUN_COMPILER
1833 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1834                                              unsigned              handleFlags,
1835                                              void*                 compileTimeHandle)
1836 {
1837     CORINFO_GENERIC_HANDLE handle       = nullptr;
1838     void*                  pIndirection = nullptr;
1839     assert(pLookup->accessType != IAT_PPVALUE);
1840
1841     if (pLookup->accessType == IAT_VALUE)
1842     {
1843         handle = pLookup->handle;
1844     }
1845     else if (pLookup->accessType == IAT_PVALUE)
1846     {
1847         pIndirection = pLookup->addr;
1848     }
1849     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1850 }
1851
1852 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1853     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1854     CorInfoHelpFunc         helper,
1855     var_types               type,
1856     GenTreeArgList*         args /* =NULL*/,
1857     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1858 {
1859     CORINFO_CONST_LOOKUP lookup;
1860     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1861     {
1862         return nullptr;
1863     }
1864
1865     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1866
1867     op1->setEntryPoint(lookup);
1868
1869     return op1;
1870 }
1871 #endif
1872
1873 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1874 {
1875     GenTree* op1 = nullptr;
1876
1877     switch (pCallInfo->kind)
1878     {
1879         case CORINFO_CALL:
1880             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1881
1882 #ifdef FEATURE_READYTORUN_COMPILER
1883             if (opts.IsReadyToRun())
1884             {
1885                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1886             }
1887             else
1888             {
1889                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1890             }
1891 #endif
1892             break;
1893
1894         case CORINFO_CALL_CODE_POINTER:
1895             if (compIsForInlining())
1896             {
1897                 // Don't import runtime lookups when inlining
1898                 // Inlining has to be aborted in such a case
1899                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1900                 return nullptr;
1901             }
1902
1903             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1904             break;
1905
1906         default:
1907             noway_assert(!"unknown call kind");
1908             break;
1909     }
1910
1911     return op1;
1912 }
1913
1914 //------------------------------------------------------------------------
1915 // getRuntimeContextTree: find pointer to context for runtime lookup.
1916 //
1917 // Arguments:
1918 //    kind - lookup kind.
1919 //
1920 // Return Value:
1921 //    Return GenTree pointer to generic shared context.
1922 //
1923 // Notes:
1924 //    Reports about generic context using.
1925
1926 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1927 {
1928     GenTree* ctxTree = nullptr;
1929
1930     // Collectible types requires that for shared generic code, if we use the generic context parameter
1931     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1932     // context parameter is this that we don't need the eager reporting logic.)
1933     lvaGenericsContextUseCount++;
1934
1935     if (kind == CORINFO_LOOKUP_THISOBJ)
1936     {
1937         // this Object
1938         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1939
1940         // Vtable pointer of this object
1941         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1942         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1943         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1944     }
1945     else
1946     {
1947         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1948
1949         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1950     }
1951     return ctxTree;
1952 }
1953
1954 /*****************************************************************************/
1955 /* Import a dictionary lookup to access a handle in code shared between
1956    generic instantiations.
1957    The lookup depends on the typeContext which is only available at
1958    runtime, and not at compile-time.
1959    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1960    The cases are:
1961
1962    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1963       instantiation-specific handle, and the tokens to lookup the handle.
1964    2. pLookup->indirections != CORINFO_USEHELPER :
1965       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1966           to get the handle.
1967       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1968           If it is non-NULL, it is the handle required. Else, call a helper
1969           to lookup the handle.
1970  */
1971
1972 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1973                                           CORINFO_LOOKUP*         pLookup,
1974                                           void*                   compileTimeHandle)
1975 {
1976
1977     // This method can only be called from the importer instance of the Compiler.
1978     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1979     assert(!compIsForInlining());
1980
1981     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1982
1983     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1984     // It's available only via the run-time helper function
1985     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1986     {
1987 #ifdef FEATURE_READYTORUN_COMPILER
1988         if (opts.IsReadyToRun())
1989         {
1990             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1991                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1992         }
1993 #endif
1994         GenTree* argNode =
1995             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1996         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1997
1998         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1999     }
2000
2001     // Slot pointer
2002     GenTree* slotPtrTree = ctxTree;
2003
2004     if (pRuntimeLookup->testForNull)
2005     {
2006         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2007                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2008     }
2009
2010     GenTree* indOffTree = nullptr;
2011
2012     // Applied repeated indirections
2013     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2014     {
2015         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2016         {
2017             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2018                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2019         }
2020
2021         if (i != 0)
2022         {
2023             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2024             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2025             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2026         }
2027
2028         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2029         {
2030             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2031         }
2032
2033         if (pRuntimeLookup->offsets[i] != 0)
2034         {
2035             slotPtrTree =
2036                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2037         }
2038     }
2039
2040     // No null test required
2041     if (!pRuntimeLookup->testForNull)
2042     {
2043         if (pRuntimeLookup->indirections == 0)
2044         {
2045             return slotPtrTree;
2046         }
2047
2048         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2049         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2050
2051         if (!pRuntimeLookup->testForFixup)
2052         {
2053             return slotPtrTree;
2054         }
2055
2056         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2057
2058         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2059         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2060
2061         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2062         // downcast the pointer to a TYP_INT on 64-bit targets
2063         slot = impImplicitIorI4Cast(slot, TYP_INT);
2064         // Use a GT_AND to check for the lowest bit and indirect if it is set
2065         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2066         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2067         relop->gtFlags |= GTF_RELOP_QMARK;
2068
2069         // slot = GT_IND(slot - 1)
2070         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2071         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2072         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2073         indir->gtFlags |= GTF_IND_NONFAULTING;
2074         indir->gtFlags |= GTF_IND_INVARIANT;
2075
2076         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2077         GenTree* asg   = gtNewAssignNode(slot, indir);
2078         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2079         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2080         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2081
2082         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2083     }
2084
2085     assert(pRuntimeLookup->indirections != 0);
2086
2087     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2088
2089     // Extract the handle
2090     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2091     handle->gtFlags |= GTF_IND_NONFAULTING;
2092
2093     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2094                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2095
2096     // Call to helper
2097     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2098
2099     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2100     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2101
2102     // Check for null and possibly call helper
2103     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2104     relop->gtFlags |= GTF_RELOP_QMARK;
2105
2106     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2107                                                        gtNewNothingNode(), // do nothing if nonnull
2108                                                        helperCall);
2109
2110     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2111
2112     unsigned tmp;
2113     if (handleCopy->IsLocal())
2114     {
2115         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2116     }
2117     else
2118     {
2119         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2120     }
2121
2122     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2123     return gtNewLclvNode(tmp, TYP_I_IMPL);
2124 }
2125
2126 /******************************************************************************
2127  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2128  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2129  *     else, grab a new temp.
2130  *  For structs (which can be pushed on the stack using obj, etc),
2131  *  special handling is needed
2132  */
2133
2134 struct RecursiveGuard
2135 {
2136 public:
2137     RecursiveGuard()
2138     {
2139         m_pAddress = nullptr;
2140     }
2141
2142     ~RecursiveGuard()
2143     {
2144         if (m_pAddress)
2145         {
2146             *m_pAddress = false;
2147         }
2148     }
2149
2150     void Init(bool* pAddress, bool bInitialize)
2151     {
2152         assert(pAddress && *pAddress == false && "Recursive guard violation");
2153         m_pAddress = pAddress;
2154
2155         if (bInitialize)
2156         {
2157             *m_pAddress = true;
2158         }
2159     }
2160
2161 protected:
2162     bool* m_pAddress;
2163 };
2164
2165 bool Compiler::impSpillStackEntry(unsigned level,
2166                                   unsigned tnum
2167 #ifdef DEBUG
2168                                   ,
2169                                   bool        bAssertOnRecursion,
2170                                   const char* reason
2171 #endif
2172                                   )
2173 {
2174
2175 #ifdef DEBUG
2176     RecursiveGuard guard;
2177     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2178 #endif
2179
2180     GenTree* tree = verCurrentState.esStack[level].val;
2181
2182     /* Allocate a temp if we haven't been asked to use a particular one */
2183
2184     if (tiVerificationNeeded)
2185     {
2186         // Ignore bad temp requests (they will happen with bad code and will be
2187         // catched when importing the destblock)
2188         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2189         {
2190             return false;
2191         }
2192     }
2193     else
2194     {
2195         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2196         {
2197             return false;
2198         }
2199     }
2200
2201     bool isNewTemp = false;
2202
2203     if (tnum == BAD_VAR_NUM)
2204     {
2205         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2206         isNewTemp = true;
2207     }
2208     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2209     {
2210         // if verification is needed and tnum's type is incompatible with
2211         // type on that stack, we grab a new temp. This is safe since
2212         // we will throw a verification exception in the dest block.
2213
2214         var_types valTyp = tree->TypeGet();
2215         var_types dstTyp = lvaTable[tnum].TypeGet();
2216
2217         // if the two types are different, we return. This will only happen with bad code and will
2218         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2219         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2220             !(
2221 #ifndef _TARGET_64BIT_
2222                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2223 #endif // !_TARGET_64BIT_
2224                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2225         {
2226             if (verNeedsVerification())
2227             {
2228                 return false;
2229             }
2230         }
2231     }
2232
2233     /* Assign the spilled entry to the temp */
2234     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2235
2236     // If temp is newly introduced and a ref type, grab what type info we can.
2237     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2238     {
2239         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2240         lvaSetClass(tnum, tree, stkHnd);
2241     }
2242
2243     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2244     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2245     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2246     verCurrentState.esStack[level].val = temp;
2247
2248     return true;
2249 }
2250
2251 /*****************************************************************************
2252  *
2253  *  Ensure that the stack has only spilled values
2254  */
2255
2256 void Compiler::impSpillStackEnsure(bool spillLeaves)
2257 {
2258     assert(!spillLeaves || opts.compDbgCode);
2259
2260     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2261     {
2262         GenTree* tree = verCurrentState.esStack[level].val;
2263
2264         if (!spillLeaves && tree->OperIsLeaf())
2265         {
2266             continue;
2267         }
2268
2269         // Temps introduced by the importer itself don't need to be spilled
2270
2271         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2272
2273         if (isTempLcl)
2274         {
2275             continue;
2276         }
2277
2278         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2279     }
2280 }
2281
2282 void Compiler::impSpillEvalStack()
2283 {
2284     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2285     {
2286         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2287     }
2288 }
2289
2290 /*****************************************************************************
2291  *
2292  *  If the stack contains any trees with side effects in them, assign those
2293  *  trees to temps and append the assignments to the statement list.
2294  *  On return the stack is guaranteed to be empty.
2295  */
2296
2297 inline void Compiler::impEvalSideEffects()
2298 {
2299     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2300     verCurrentState.esStackDepth = 0;
2301 }
2302
2303 /*****************************************************************************
2304  *
2305  *  If the stack contains any trees with side effects in them, assign those
2306  *  trees to temps and replace them on the stack with refs to their temps.
2307  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2308  */
2309
2310 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2311 {
2312     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2313
2314     /* Before we make any appends to the tree list we must spill the
2315      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2316
2317     impSpillSpecialSideEff();
2318
2319     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2320     {
2321         chkLevel = verCurrentState.esStackDepth;
2322     }
2323
2324     assert(chkLevel <= verCurrentState.esStackDepth);
2325
2326     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2327
2328     for (unsigned i = 0; i < chkLevel; i++)
2329     {
2330         GenTree* tree = verCurrentState.esStack[i].val;
2331
2332         GenTree* lclVarTree;
2333
2334         if ((tree->gtFlags & spillFlags) != 0 ||
2335             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2336              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2337              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2338                                            // lvAddrTaken flag.
2339         {
2340             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2341         }
2342     }
2343 }
2344
2345 /*****************************************************************************
2346  *
2347  *  If the stack contains any trees with special side effects in them, assign
2348  *  those trees to temps and replace them on the stack with refs to their temps.
2349  */
2350
2351 inline void Compiler::impSpillSpecialSideEff()
2352 {
2353     // Only exception objects need to be carefully handled
2354
2355     if (!compCurBB->bbCatchTyp)
2356     {
2357         return;
2358     }
2359
2360     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2361     {
2362         GenTree* tree = verCurrentState.esStack[level].val;
2363         // Make sure if we have an exception object in the sub tree we spill ourselves.
2364         if (gtHasCatchArg(tree))
2365         {
2366             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2367         }
2368     }
2369 }
2370
2371 /*****************************************************************************
2372  *
2373  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2374  */
2375
2376 void Compiler::impSpillValueClasses()
2377 {
2378     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2379     {
2380         GenTree* tree = verCurrentState.esStack[level].val;
2381
2382         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2383         {
2384             // Tree walk was aborted, which means that we found a
2385             // value class on the stack.  Need to spill that
2386             // stack entry.
2387
2388             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2389         }
2390     }
2391 }
2392
2393 /*****************************************************************************
2394  *
2395  *  Callback that checks if a tree node is TYP_STRUCT
2396  */
2397
2398 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2399 {
2400     fgWalkResult walkResult = WALK_CONTINUE;
2401
2402     if ((*pTree)->gtType == TYP_STRUCT)
2403     {
2404         // Abort the walk and indicate that we found a value class
2405
2406         walkResult = WALK_ABORT;
2407     }
2408
2409     return walkResult;
2410 }
2411
2412 /*****************************************************************************
2413  *
2414  *  If the stack contains any trees with references to local #lclNum, assign
2415  *  those trees to temps and replace their place on the stack with refs to
2416  *  their temps.
2417  */
2418
2419 void Compiler::impSpillLclRefs(ssize_t lclNum)
2420 {
2421     /* Before we make any appends to the tree list we must spill the
2422      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2423
2424     impSpillSpecialSideEff();
2425
2426     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2427     {
2428         GenTree* tree = verCurrentState.esStack[level].val;
2429
2430         /* If the tree may throw an exception, and the block has a handler,
2431            then we need to spill assignments to the local if the local is
2432            live on entry to the handler.
2433            Just spill 'em all without considering the liveness */
2434
2435         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2436
2437         /* Skip the tree if it doesn't have an affected reference,
2438            unless xcptnCaught */
2439
2440         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2441         {
2442             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2443         }
2444     }
2445 }
2446
2447 /*****************************************************************************
2448  *
2449  *  Push catch arg onto the stack.
2450  *  If there are jumps to the beginning of the handler, insert basic block
2451  *  and spill catch arg to a temp. Update the handler block if necessary.
2452  *
2453  *  Returns the basic block of the actual handler.
2454  */
2455
2456 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2457 {
2458     // Do not inject the basic block twice on reimport. This should be
2459     // hit only under JIT stress. See if the block is the one we injected.
2460     // Note that EH canonicalization can inject internal blocks here. We might
2461     // be able to re-use such a block (but we don't, right now).
2462     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2463         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2464     {
2465         GenTree* tree = hndBlk->bbTreeList;
2466
2467         if (tree != nullptr && tree->gtOper == GT_STMT)
2468         {
2469             tree = tree->gtStmt.gtStmtExpr;
2470             assert(tree != nullptr);
2471
2472             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2473                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2474             {
2475                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2476
2477                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2478
2479                 return hndBlk->bbNext;
2480             }
2481         }
2482
2483         // If we get here, it must have been some other kind of internal block. It's possible that
2484         // someone prepended something to our injected block, but that's unlikely.
2485     }
2486
2487     /* Push the exception address value on the stack */
2488     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2489
2490     /* Mark the node as having a side-effect - i.e. cannot be
2491      * moved around since it is tied to a fixed location (EAX) */
2492     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2493
2494 #if defined(JIT32_GCENCODER)
2495     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2496 #else
2497     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2498 #endif // defined(JIT32_GCENCODER)
2499
2500     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2501     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2502     {
2503         if (hndBlk->bbRefs == 1)
2504         {
2505             hndBlk->bbRefs++;
2506         }
2507
2508         /* Create extra basic block for the spill */
2509         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2510         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2511         newBlk->setBBWeight(hndBlk->bbWeight);
2512         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2513
2514         /* Account for the new link we are about to create */
2515         hndBlk->bbRefs++;
2516
2517         /* Spill into a temp */
2518         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2519         lvaTable[tempNum].lvType = TYP_REF;
2520         arg                      = gtNewTempAssign(tempNum, arg);
2521
2522         hndBlk->bbStkTempsIn = tempNum;
2523
2524         /* Report the debug info. impImportBlockCode won't treat
2525          * the actual handler as exception block and thus won't do it for us. */
2526         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2527         {
2528             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2529             arg            = gtNewStmt(arg, impCurStmtOffs);
2530         }
2531
2532         fgInsertStmtAtEnd(newBlk, arg);
2533
2534         arg = gtNewLclvNode(tempNum, TYP_REF);
2535     }
2536
2537     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2538
2539     return hndBlk;
2540 }
2541
2542 /*****************************************************************************
2543  *
2544  *  Given a tree, clone it. *pClone is set to the cloned tree.
2545  *  Returns the original tree if the cloning was easy,
2546  *   else returns the temp to which the tree had to be spilled to.
2547  *  If the tree has side-effects, it will be spilled to a temp.
2548  */
2549
2550 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2551                                 GenTree**            pClone,
2552                                 CORINFO_CLASS_HANDLE structHnd,
2553                                 unsigned             curLevel,
2554                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2555 {
2556     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2557     {
2558         GenTree* clone = gtClone(tree, true);
2559
2560         if (clone)
2561         {
2562             *pClone = clone;
2563             return tree;
2564         }
2565     }
2566
2567     /* Store the operand in a temp and return the temp */
2568
2569     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2570
2571     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2572     // return a struct type. It also may modify the struct type to a more
2573     // specialized type (e.g. a SIMD type).  So we will get the type from
2574     // the lclVar AFTER calling impAssignTempGen().
2575
2576     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2577     var_types type = genActualType(lvaTable[temp].TypeGet());
2578
2579     *pClone = gtNewLclvNode(temp, type);
2580     return gtNewLclvNode(temp, type);
2581 }
2582
2583 /*****************************************************************************
2584  * Remember the IL offset (including stack-empty info) for the trees we will
2585  * generate now.
2586  */
2587
2588 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2589 {
2590     if (compIsForInlining())
2591     {
2592         GenTree* callStmt = impInlineInfo->iciStmt;
2593         assert(callStmt->gtOper == GT_STMT);
2594         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2595     }
2596     else
2597     {
2598         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2599         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2600         impCurStmtOffs    = offs | stkBit;
2601     }
2602 }
2603
2604 /*****************************************************************************
2605  * Returns current IL offset with stack-empty and call-instruction info incorporated
2606  */
2607 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2608 {
2609     if (compIsForInlining())
2610     {
2611         return BAD_IL_OFFSET;
2612     }
2613     else
2614     {
2615         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2616         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2617         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2618         return offs | stkBit | callInstructionBit;
2619     }
2620 }
2621
2622 //------------------------------------------------------------------------
2623 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2624 //
2625 // Arguments:
2626 //    prevOpcode - last importer opcode
2627 //
2628 // Return Value:
2629 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2630 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2631 {
2632     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2633     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2634     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2635 }
2636
2637 /*****************************************************************************
2638  *
2639  *  Remember the instr offset for the statements
2640  *
2641  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2642  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2643  *  as some of the trees corresponding to code up to impCurOpcOffs might
2644  *  still be sitting on the stack.
2645  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2646  *  This should be called when an opcode finally/explicitly causes
2647  *  impAppendTree(tree) to be called (as opposed to being called because of
2648  *  a spill caused by the opcode)
2649  */
2650
2651 #ifdef DEBUG
2652
2653 void Compiler::impNoteLastILoffs()
2654 {
2655     if (impLastILoffsStmt == nullptr)
2656     {
2657         // We should have added a statement for the current basic block
2658         // Is this assert correct ?
2659
2660         assert(impTreeLast);
2661         assert(impTreeLast->gtOper == GT_STMT);
2662
2663         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2664     }
2665     else
2666     {
2667         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2668         impLastILoffsStmt                          = nullptr;
2669     }
2670 }
2671
2672 #endif // DEBUG
2673
2674 /*****************************************************************************
2675  * We don't create any GenTree (excluding spills) for a branch.
2676  * For debugging info, we need a placeholder so that we can note
2677  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2678  */
2679
2680 void Compiler::impNoteBranchOffs()
2681 {
2682     if (opts.compDbgCode)
2683     {
2684         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2685     }
2686 }
2687
2688 /*****************************************************************************
2689  * Locate the next stmt boundary for which we need to record info.
2690  * We will have to spill the stack at such boundaries if it is not
2691  * already empty.
2692  * Returns the next stmt boundary (after the start of the block)
2693  */
2694
2695 unsigned Compiler::impInitBlockLineInfo()
2696 {
2697     /* Assume the block does not correspond with any IL offset. This prevents
2698        us from reporting extra offsets. Extra mappings can cause confusing
2699        stepping, especially if the extra mapping is a jump-target, and the
2700        debugger does not ignore extra mappings, but instead rewinds to the
2701        nearest known offset */
2702
2703     impCurStmtOffsSet(BAD_IL_OFFSET);
2704
2705     if (compIsForInlining())
2706     {
2707         return ~0;
2708     }
2709
2710     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2711
2712     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2713     {
2714         impCurStmtOffsSet(blockOffs);
2715     }
2716
2717     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2718     {
2719         impCurStmtOffsSet(blockOffs);
2720     }
2721
2722     /* Always report IL offset 0 or some tests get confused.
2723        Probably a good idea anyways */
2724
2725     if (blockOffs == 0)
2726     {
2727         impCurStmtOffsSet(blockOffs);
2728     }
2729
2730     if (!info.compStmtOffsetsCount)
2731     {
2732         return ~0;
2733     }
2734
2735     /* Find the lowest explicit stmt boundary within the block */
2736
2737     /* Start looking at an entry that is based on our instr offset */
2738
2739     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2740
2741     if (index >= info.compStmtOffsetsCount)
2742     {
2743         index = info.compStmtOffsetsCount - 1;
2744     }
2745
2746     /* If we've guessed too far, back up */
2747
2748     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2749     {
2750         index--;
2751     }
2752
2753     /* If we guessed short, advance ahead */
2754
2755     while (info.compStmtOffsets[index] < blockOffs)
2756     {
2757         index++;
2758
2759         if (index == info.compStmtOffsetsCount)
2760         {
2761             return info.compStmtOffsetsCount;
2762         }
2763     }
2764
2765     assert(index < info.compStmtOffsetsCount);
2766
2767     if (info.compStmtOffsets[index] == blockOffs)
2768     {
2769         /* There is an explicit boundary for the start of this basic block.
2770            So we will start with bbCodeOffs. Else we will wait until we
2771            get to the next explicit boundary */
2772
2773         impCurStmtOffsSet(blockOffs);
2774
2775         index++;
2776     }
2777
2778     return index;
2779 }
2780
2781 /*****************************************************************************/
2782
2783 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2784 {
2785     switch (opcode)
2786     {
2787         case CEE_CALL:
2788         case CEE_CALLI:
2789         case CEE_CALLVIRT:
2790             return true;
2791
2792         default:
2793             return false;
2794     }
2795 }
2796
2797 /*****************************************************************************/
2798
2799 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2800 {
2801     switch (opcode)
2802     {
2803         case CEE_CALL:
2804         case CEE_CALLI:
2805         case CEE_CALLVIRT:
2806         case CEE_JMP:
2807         case CEE_NEWOBJ:
2808         case CEE_NEWARR:
2809             return true;
2810
2811         default:
2812             return false;
2813     }
2814 }
2815
2816 /*****************************************************************************/
2817
2818 // One might think it is worth caching these values, but results indicate
2819 // that it isn't.
2820 // In addition, caching them causes SuperPMI to be unable to completely
2821 // encapsulate an individual method context.
2822 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2823 {
2824     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2825     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2826     return refAnyClass;
2827 }
2828
2829 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2830 {
2831     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2832     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2833     return typeHandleClass;
2834 }
2835
2836 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2837 {
2838     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2839     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2840     return argIteratorClass;
2841 }
2842
2843 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2844 {
2845     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2846     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2847     return stringClass;
2848 }
2849
2850 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2851 {
2852     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2853     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2854     return objectClass;
2855 }
2856
2857 /*****************************************************************************
2858  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2859  *  set its type to TYP_BYREF when we create it. We know if it can be
2860  *  changed to TYP_I_IMPL only at the point where we use it
2861  */
2862
2863 /* static */
2864 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2865 {
2866     if (tree1->IsVarAddr())
2867     {
2868         tree1->gtType = TYP_I_IMPL;
2869     }
2870
2871     if (tree2 && tree2->IsVarAddr())
2872     {
2873         tree2->gtType = TYP_I_IMPL;
2874     }
2875 }
2876
2877 /*****************************************************************************
2878  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2879  *  to make that an explicit cast in our trees, so any implicit casts that
2880  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2881  *  turned into explicit casts here.
2882  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2883  */
2884
2885 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2886 {
2887     var_types currType   = genActualType(tree->gtType);
2888     var_types wantedType = genActualType(dstTyp);
2889
2890     if (wantedType != currType)
2891     {
2892         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2893         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2894         {
2895             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2896             {
2897                 tree->gtType = TYP_I_IMPL;
2898             }
2899         }
2900 #ifdef _TARGET_64BIT_
2901         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2902         {
2903             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2904             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2905         }
2906         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2907         {
2908             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2909             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2910         }
2911 #endif // _TARGET_64BIT_
2912     }
2913
2914     return tree;
2915 }
2916
2917 /*****************************************************************************
2918  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2919  *  but we want to make that an explicit cast in our trees, so any implicit casts
2920  *  that exist in the IL are turned into explicit casts here.
2921  */
2922
2923 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2924 {
2925 #ifndef LEGACY_BACKEND
2926     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2927     {
2928         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2929     }
2930 #endif // !LEGACY_BACKEND
2931
2932     return tree;
2933 }
2934
2935 //------------------------------------------------------------------------
2936 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2937 //    with a GT_COPYBLK node.
2938 //
2939 // Arguments:
2940 //    sig - The InitializeArray signature.
2941 //
2942 // Return Value:
2943 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2944 //    nullptr otherwise.
2945 //
2946 // Notes:
2947 //    The function recognizes the following IL pattern:
2948 //      ldc <length> or a list of ldc <lower bound>/<length>
2949 //      newarr or newobj
2950 //      dup
2951 //      ldtoken <field handle>
2952 //      call InitializeArray
2953 //    The lower bounds need not be constant except when the array rank is 1.
2954 //    The function recognizes all kinds of arrays thus enabling a small runtime
2955 //    such as CoreRT to skip providing an implementation for InitializeArray.
2956
2957 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2958 {
2959     assert(sig->numArgs == 2);
2960
2961     GenTree* fieldTokenNode = impStackTop(0).val;
2962     GenTree* arrayLocalNode = impStackTop(1).val;
2963
2964     //
2965     // Verify that the field token is known and valid.  Note that It's also
2966     // possible for the token to come from reflection, in which case we cannot do
2967     // the optimization and must therefore revert to calling the helper.  You can
2968     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2969     //
2970
2971     // Check to see if the ldtoken helper call is what we see here.
2972     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2973         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2974     {
2975         return nullptr;
2976     }
2977
2978     // Strip helper call away
2979     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2980
2981     if (fieldTokenNode->gtOper == GT_IND)
2982     {
2983         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2984     }
2985
2986     // Check for constant
2987     if (fieldTokenNode->gtOper != GT_CNS_INT)
2988     {
2989         return nullptr;
2990     }
2991
2992     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2993     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2994     {
2995         return nullptr;
2996     }
2997
2998     //
2999     // We need to get the number of elements in the array and the size of each element.
3000     // We verify that the newarr statement is exactly what we expect it to be.
3001     // If it's not then we just return NULL and we don't optimize this call
3002     //
3003
3004     //
3005     // It is possible the we don't have any statements in the block yet
3006     //
3007     if (impTreeLast->gtOper != GT_STMT)
3008     {
3009         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3010         return nullptr;
3011     }
3012
3013     //
3014     // We start by looking at the last statement, making sure it's an assignment, and
3015     // that the target of the assignment is the array passed to InitializeArray.
3016     //
3017     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3018     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3019         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3020         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3021     {
3022         return nullptr;
3023     }
3024
3025     //
3026     // Make sure that the object being assigned is a helper call.
3027     //
3028
3029     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3030     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3031     {
3032         return nullptr;
3033     }
3034
3035     //
3036     // Verify that it is one of the new array helpers.
3037     //
3038
3039     bool isMDArray = false;
3040
3041     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3042         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3043         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3044         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3045 #ifdef FEATURE_READYTORUN_COMPILER
3046         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3047         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3048 #endif
3049             )
3050     {
3051         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3052         {
3053             return nullptr;
3054         }
3055
3056         isMDArray = true;
3057     }
3058
3059     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3060
3061     //
3062     // Make sure we found a compile time handle to the array
3063     //
3064
3065     if (!arrayClsHnd)
3066     {
3067         return nullptr;
3068     }
3069
3070     unsigned rank = 0;
3071     S_UINT32 numElements;
3072
3073     if (isMDArray)
3074     {
3075         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3076
3077         if (rank == 0)
3078         {
3079             return nullptr;
3080         }
3081
3082         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3083         assert(tokenArg != nullptr);
3084         GenTreeArgList* numArgsArg = tokenArg->Rest();
3085         assert(numArgsArg != nullptr);
3086         GenTreeArgList* argsArg = numArgsArg->Rest();
3087         assert(argsArg != nullptr);
3088
3089         //
3090         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3091         // so at least one length must be present and the rank can't exceed 32 so there can
3092         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3093         //
3094
3095         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3096             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3097         {
3098             return nullptr;
3099         }
3100
3101         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3102         bool     lowerBoundsSpecified;
3103
3104         if (numArgs == rank * 2)
3105         {
3106             lowerBoundsSpecified = true;
3107         }
3108         else if (numArgs == rank)
3109         {
3110             lowerBoundsSpecified = false;
3111
3112             //
3113             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3114             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3115             // we get a SDArray as well, see the for loop below.
3116             //
3117
3118             if (rank == 1)
3119             {
3120                 isMDArray = false;
3121             }
3122         }
3123         else
3124         {
3125             return nullptr;
3126         }
3127
3128         //
3129         // The rank is known to be at least 1 so we can start with numElements being 1
3130         // to avoid the need to special case the first dimension.
3131         //
3132
3133         numElements = S_UINT32(1);
3134
3135         struct Match
3136         {
3137             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3138             {
3139                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3140                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3141             }
3142
3143             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3144             {
3145                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3146                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3147                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3148             }
3149
3150             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3151             {
3152                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3153                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3154             }
3155
3156             static bool IsComma(GenTree* tree)
3157             {
3158                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3159             }
3160         };
3161
3162         unsigned argIndex = 0;
3163         GenTree* comma;
3164
3165         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3166         {
3167             if (lowerBoundsSpecified)
3168             {
3169                 //
3170                 // In general lower bounds can be ignored because they're not needed to
3171                 // calculate the total number of elements. But for single dimensional arrays
3172                 // we need to know if the lower bound is 0 because in this case the runtime
3173                 // creates a SDArray and this affects the way the array data offset is calculated.
3174                 //
3175
3176                 if (rank == 1)
3177                 {
3178                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3179                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3180                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3181
3182                     if (lowerBoundNode->IsIntegralConst(0))
3183                     {
3184                         isMDArray = false;
3185                     }
3186                 }
3187
3188                 comma = comma->gtGetOp2();
3189                 argIndex++;
3190             }
3191
3192             GenTree* lengthNodeAssign = comma->gtGetOp1();
3193             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3194             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3195
3196             if (!lengthNode->IsCnsIntOrI())
3197             {
3198                 return nullptr;
3199             }
3200
3201             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3202             argIndex++;
3203         }
3204
3205         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3206
3207         if (argIndex != numArgs)
3208         {
3209             return nullptr;
3210         }
3211     }
3212     else
3213     {
3214         //
3215         // Make sure there are exactly two arguments:  the array class and
3216         // the number of elements.
3217         //
3218
3219         GenTree* arrayLengthNode;
3220
3221         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3222 #ifdef FEATURE_READYTORUN_COMPILER
3223         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3224         {
3225             // Array length is 1st argument for readytorun helper
3226             arrayLengthNode = args->Current();
3227         }
3228         else
3229 #endif
3230         {
3231             // Array length is 2nd argument for regular helper
3232             arrayLengthNode = args->Rest()->Current();
3233         }
3234
3235         //
3236         // Make sure that the number of elements look valid.
3237         //
3238         if (arrayLengthNode->gtOper != GT_CNS_INT)
3239         {
3240             return nullptr;
3241         }
3242
3243         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3244
3245         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3246         {
3247             return nullptr;
3248         }
3249     }
3250
3251     CORINFO_CLASS_HANDLE elemClsHnd;
3252     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3253
3254     //
3255     // Note that genTypeSize will return zero for non primitive types, which is exactly
3256     // what we want (size will then be 0, and we will catch this in the conditional below).
3257     // Note that we don't expect this to fail for valid binaries, so we assert in the
3258     // non-verification case (the verification case should not assert but rather correctly
3259     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3260     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3261     // why.
3262     //
3263
3264     S_UINT32 elemSize(genTypeSize(elementType));
3265     S_UINT32 size = elemSize * S_UINT32(numElements);
3266
3267     if (size.IsOverflow())
3268     {
3269         return nullptr;
3270     }
3271
3272     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3273     {
3274         assert(verNeedsVerification());
3275         return nullptr;
3276     }
3277
3278     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3279     if (!initData)
3280     {
3281         return nullptr;
3282     }
3283
3284     //
3285     // At this point we are ready to commit to implementing the InitializeArray
3286     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3287     // return the struct assignment node.
3288     //
3289
3290     impPopStack();
3291     impPopStack();
3292
3293     const unsigned blkSize = size.Value();
3294     unsigned       dataOffset;
3295
3296     if (isMDArray)
3297     {
3298         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3299     }
3300     else
3301     {
3302         dataOffset = eeGetArrayDataOffset(elementType);
3303     }
3304
3305     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3306     GenTree* blk = gtNewBlockVal(dst, blkSize);
3307     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3308
3309     return gtNewBlkOpNode(blk,     // dst
3310                           src,     // src
3311                           blkSize, // size
3312                           false,   // volatil
3313                           true);   // copyBlock
3314 }
3315
3316 //------------------------------------------------------------------------
3317 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3318 //
3319 // Arguments:
3320 //    newobjThis - for constructor calls, the tree for the newly allocated object
3321 //    clsHnd - handle for the intrinsic method's class
3322 //    method - handle for the intrinsic method
3323 //    sig    - signature of the intrinsic method
3324 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3325 //    memberRef - the token for the intrinsic method
3326 //    readonlyCall - true if call has a readonly prefix
3327 //    tailCall - true if call is in tail position
3328 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3329 //       if call is not constrained
3330 //    constraintCallThisTransform -- this transform to apply for a constrained call
3331 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3332 //       for "traditional" jit intrinsics
3333 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3334 //       that is amenable to special downstream optimization opportunities
3335 //
3336 // Returns:
3337 //    IR tree to use in place of the call, or nullptr if the jit should treat
3338 //    the intrinsic call like a normal call.
3339 //
3340 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3341 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3342 //
3343 //    isSpecial set true if the expansion is subject to special
3344 //    optimizations later in the jit processing
3345 //
3346 // Notes:
3347 //    On success the IR tree may be a call to a different method or an inline
3348 //    sequence. If it is a call, then the intrinsic processing here is responsible
3349 //    for handling all the special cases, as upon return to impImportCall
3350 //    expanded intrinsics bypass most of the normal call processing.
3351 //
3352 //    Intrinsics are generally not recognized in minopts and debug codegen.
3353 //
3354 //    However, certain traditional intrinsics are identifed as "must expand"
3355 //    if there is no fallback implmentation to invoke; these must be handled
3356 //    in all codegen modes.
3357 //
3358 //    New style intrinsics (where the fallback implementation is in IL) are
3359 //    identified as "must expand" if they are invoked from within their
3360 //    own method bodies.
3361 //
3362
3363 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3364                                 CORINFO_CLASS_HANDLE    clsHnd,
3365                                 CORINFO_METHOD_HANDLE   method,
3366                                 CORINFO_SIG_INFO*       sig,
3367                                 unsigned                methodFlags,
3368                                 int                     memberRef,
3369                                 bool                    readonlyCall,
3370                                 bool                    tailCall,
3371                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3372                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3373                                 CorInfoIntrinsics*      pIntrinsicID,
3374                                 bool*                   isSpecialIntrinsic)
3375 {
3376     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3377
3378     bool              mustExpand  = false;
3379     bool              isSpecial   = false;
3380     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3381     NamedIntrinsic    ni          = NI_Illegal;
3382
3383     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3384     {
3385         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3386     }
3387
3388     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3389     {
3390         // The recursive calls to Jit intrinsics are must-expand by convention.
3391         mustExpand = mustExpand || gtIsRecursiveCall(method);
3392
3393         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3394         {
3395             ni = lookupNamedIntrinsic(method);
3396
3397 #ifdef FEATURE_HW_INTRINSICS
3398             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3399             {
3400                 return impHWIntrinsic(ni, method, sig, mustExpand);
3401             }
3402 #endif // FEATURE_HW_INTRINSICS
3403         }
3404     }
3405
3406     *pIntrinsicID = intrinsicID;
3407
3408 #ifndef _TARGET_ARM_
3409     genTreeOps interlockedOperator;
3410 #endif
3411
3412     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3413     {
3414         // must be done regardless of DbgCode and MinOpts
3415         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3416     }
3417 #ifdef _TARGET_64BIT_
3418     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3419     {
3420         // must be done regardless of DbgCode and MinOpts
3421         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3422     }
3423 #else
3424     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3425 #endif
3426
3427     GenTree* retNode = nullptr;
3428
3429     // Under debug and minopts, only expand what is required.
3430     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3431     {
3432         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3433         return retNode;
3434     }
3435
3436     var_types callType = JITtype2varType(sig->retType);
3437
3438     /* First do the intrinsics which are always smaller than a call */
3439
3440     switch (intrinsicID)
3441     {
3442         GenTree* op1;
3443         GenTree* op2;
3444
3445         case CORINFO_INTRINSIC_Sin:
3446         case CORINFO_INTRINSIC_Cbrt:
3447         case CORINFO_INTRINSIC_Sqrt:
3448         case CORINFO_INTRINSIC_Abs:
3449         case CORINFO_INTRINSIC_Cos:
3450         case CORINFO_INTRINSIC_Round:
3451         case CORINFO_INTRINSIC_Cosh:
3452         case CORINFO_INTRINSIC_Sinh:
3453         case CORINFO_INTRINSIC_Tan:
3454         case CORINFO_INTRINSIC_Tanh:
3455         case CORINFO_INTRINSIC_Asin:
3456         case CORINFO_INTRINSIC_Asinh:
3457         case CORINFO_INTRINSIC_Acos:
3458         case CORINFO_INTRINSIC_Acosh:
3459         case CORINFO_INTRINSIC_Atan:
3460         case CORINFO_INTRINSIC_Atan2:
3461         case CORINFO_INTRINSIC_Atanh:
3462         case CORINFO_INTRINSIC_Log10:
3463         case CORINFO_INTRINSIC_Pow:
3464         case CORINFO_INTRINSIC_Exp:
3465         case CORINFO_INTRINSIC_Ceiling:
3466         case CORINFO_INTRINSIC_Floor:
3467             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3468             break;
3469
3470 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3471         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3472         case CORINFO_INTRINSIC_InterlockedAdd32:
3473             interlockedOperator = GT_LOCKADD;
3474             goto InterlockedBinOpCommon;
3475         case CORINFO_INTRINSIC_InterlockedXAdd32:
3476             interlockedOperator = GT_XADD;
3477             goto InterlockedBinOpCommon;
3478         case CORINFO_INTRINSIC_InterlockedXchg32:
3479             interlockedOperator = GT_XCHG;
3480             goto InterlockedBinOpCommon;
3481
3482 #ifdef _TARGET_64BIT_
3483         case CORINFO_INTRINSIC_InterlockedAdd64:
3484             interlockedOperator = GT_LOCKADD;
3485             goto InterlockedBinOpCommon;
3486         case CORINFO_INTRINSIC_InterlockedXAdd64:
3487             interlockedOperator = GT_XADD;
3488             goto InterlockedBinOpCommon;
3489         case CORINFO_INTRINSIC_InterlockedXchg64:
3490             interlockedOperator = GT_XCHG;
3491             goto InterlockedBinOpCommon;
3492 #endif // _TARGET_AMD64_
3493
3494         InterlockedBinOpCommon:
3495             assert(callType != TYP_STRUCT);
3496             assert(sig->numArgs == 2);
3497
3498             op2 = impPopStack().val;
3499             op1 = impPopStack().val;
3500
3501             // This creates:
3502             //   val
3503             // XAdd
3504             //   addr
3505             //     field (for example)
3506             //
3507             // In the case where the first argument is the address of a local, we might
3508             // want to make this *not* make the var address-taken -- but atomic instructions
3509             // on a local are probably pretty useless anyway, so we probably don't care.
3510
3511             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3512             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3513             retNode = op1;
3514             break;
3515 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3516
3517         case CORINFO_INTRINSIC_MemoryBarrier:
3518
3519             assert(sig->numArgs == 0);
3520
3521             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3522             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3523             retNode = op1;
3524             break;
3525
3526 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3527         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3528         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3529 #ifdef _TARGET_64BIT_
3530         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3531 #endif
3532         {
3533             assert(callType != TYP_STRUCT);
3534             assert(sig->numArgs == 3);
3535             GenTree* op3;
3536
3537             op3 = impPopStack().val; // comparand
3538             op2 = impPopStack().val; // value
3539             op1 = impPopStack().val; // location
3540
3541             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3542
3543             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3544             retNode = node;
3545             break;
3546         }
3547 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3548
3549         case CORINFO_INTRINSIC_StringLength:
3550             op1 = impPopStack().val;
3551             if (!opts.MinOpts() && !opts.compDbgCode)
3552             {
3553                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3554                 op1                   = arrLen;
3555             }
3556             else
3557             {
3558                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3559                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3560                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3561                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3562             }
3563
3564             // Getting the length of a null string should throw
3565             op1->gtFlags |= GTF_EXCEPT;
3566
3567             retNode = op1;
3568             break;
3569
3570         case CORINFO_INTRINSIC_StringGetChar:
3571             op2 = impPopStack().val;
3572             op1 = impPopStack().val;
3573             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3574             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3575             retNode = op1;
3576             break;
3577
3578         case CORINFO_INTRINSIC_InitializeArray:
3579             retNode = impInitializeArrayIntrinsic(sig);
3580             break;
3581
3582         case CORINFO_INTRINSIC_Array_Address:
3583         case CORINFO_INTRINSIC_Array_Get:
3584         case CORINFO_INTRINSIC_Array_Set:
3585             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3586             break;
3587
3588         case CORINFO_INTRINSIC_GetTypeFromHandle:
3589             op1 = impStackTop(0).val;
3590             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3591                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3592             {
3593                 op1 = impPopStack().val;
3594                 // Change call to return RuntimeType directly.
3595                 op1->gtType = TYP_REF;
3596                 retNode     = op1;
3597             }
3598             // Call the regular function.
3599             break;
3600
3601         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3602             op1 = impStackTop(0).val;
3603             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3604                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3605             {
3606                 // Old tree
3607                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3608                 //
3609                 // New tree
3610                 // TreeToGetNativeTypeHandle
3611
3612                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3613                 // to that helper.
3614
3615                 op1 = impPopStack().val;
3616
3617                 // Get native TypeHandle argument to old helper
3618                 op1 = op1->gtCall.gtCallArgs;
3619                 assert(op1->OperIsList());
3620                 assert(op1->gtOp.gtOp2 == nullptr);
3621                 op1     = op1->gtOp.gtOp1;
3622                 retNode = op1;
3623             }
3624             // Call the regular function.
3625             break;
3626
3627 #ifndef LEGACY_BACKEND
3628         case CORINFO_INTRINSIC_Object_GetType:
3629         {
3630             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3631             op1 = impStackTop(0).val;
3632
3633             // If we're calling GetType on a boxed value, just get the type directly.
3634             if (op1->IsBoxedValue())
3635             {
3636                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3637
3638                 // Try and clean up the box. Obtain the handle we
3639                 // were going to pass to the newobj.
3640                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3641
3642                 if (boxTypeHandle != nullptr)
3643                 {
3644                     // Note we don't need to play the TYP_STRUCT games here like
3645                     // do for LDTOKEN since the return value of this operator is Type,
3646                     // not RuntimeTypeHandle.
3647                     impPopStack();
3648                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3649                     GenTree*        runtimeType =
3650                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3651                     retNode = runtimeType;
3652                 }
3653             }
3654
3655             // If we have a constrained callvirt with a "box this" transform
3656             // we know we have a value class and hence an exact type.
3657             //
3658             // If so, instead of boxing and then extracting the type, just
3659             // construct the type directly.
3660             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3661                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3662             {
3663                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3664                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3665                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3666
3667                 if (isSafeToOptimize)
3668                 {
3669                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3670                     impPopStack();
3671                     GenTree* typeHandleOp =
3672                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3673                     if (typeHandleOp == nullptr)
3674                     {
3675                         assert(compDonotInline());
3676                         return nullptr;
3677                     }
3678                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3679                     GenTree*        runtimeType =
3680                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3681                     retNode = runtimeType;
3682                 }
3683             }
3684
3685 #ifdef DEBUG
3686             if (retNode != nullptr)
3687             {
3688                 JITDUMP("Optimized result for call to GetType is\n");
3689                 if (verbose)
3690                 {
3691                     gtDispTree(retNode);
3692                 }
3693             }
3694 #endif
3695
3696             // Else expand as an intrinsic, unless the call is constrained,
3697             // in which case we defer expansion to allow impImportCall do the
3698             // special constraint processing.
3699             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3700             {
3701                 JITDUMP("Expanding as special intrinsic\n");
3702                 impPopStack();
3703                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3704
3705                 // Set the CALL flag to indicate that the operator is implemented by a call.
3706                 // Set also the EXCEPTION flag because the native implementation of
3707                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3708                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3709                 retNode = op1;
3710                 // Might be further optimizable, so arrange to leave a mark behind
3711                 isSpecial = true;
3712             }
3713
3714             if (retNode == nullptr)
3715             {
3716                 JITDUMP("Leaving as normal call\n");
3717                 // Might be further optimizable, so arrange to leave a mark behind
3718                 isSpecial = true;
3719             }
3720
3721             break;
3722         }
3723
3724 #endif
3725         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3726         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3727         // substitution.  The parameter byref will be assigned into the newly allocated object.
3728         case CORINFO_INTRINSIC_ByReference_Ctor:
3729         {
3730             // Remove call to constructor and directly assign the byref passed
3731             // to the call to the first slot of the ByReference struct.
3732             op1                                    = impPopStack().val;
3733             GenTree*             thisptr           = newobjThis;
3734             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3735             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3736             GenTree*             assign            = gtNewAssignNode(field, op1);
3737             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3738             assert(byReferenceStruct != nullptr);
3739             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3740             retNode = assign;
3741             break;
3742         }
3743         // Implement ptr value getter for ByReference struct.
3744         case CORINFO_INTRINSIC_ByReference_Value:
3745         {
3746             op1                         = impPopStack().val;
3747             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3748             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3749             retNode                     = field;
3750             break;
3751         }
3752         case CORINFO_INTRINSIC_Span_GetItem:
3753         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3754         {
3755             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3756             //
3757             // For Span<T>
3758             //   Comma
3759             //     BoundsCheck(index, s->_length)
3760             //     s->_pointer + index * sizeof(T)
3761             //
3762             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3763             //
3764             // Signature should show one class type parameter, which
3765             // we need to examine.
3766             assert(sig->sigInst.classInstCount == 1);
3767             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3768             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3769             assert(elemSize > 0);
3770
3771             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3772
3773             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3774                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3775
3776             GenTree* index          = impPopStack().val;
3777             GenTree* ptrToSpan      = impPopStack().val;
3778             GenTree* indexClone     = nullptr;
3779             GenTree* ptrToSpanClone = nullptr;
3780
3781 #if defined(DEBUG)
3782             if (verbose)
3783             {
3784                 printf("with ptr-to-span\n");
3785                 gtDispTree(ptrToSpan);
3786                 printf("and index\n");
3787                 gtDispTree(index);
3788             }
3789 #endif // defined(DEBUG)
3790
3791             // We need to use both index and ptr-to-span twice, so clone or spill.
3792             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3793                                  nullptr DEBUGARG("Span.get_Item index"));
3794             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3795                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3796
3797             // Bounds check
3798             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3799             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3800             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3801             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3802                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3803
3804             // Element access
3805             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3806             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3807             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3808             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3809             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3810             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3811             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3812
3813             // Prepare result
3814             var_types resultType = JITtype2varType(sig->retType);
3815             assert(resultType == result->TypeGet());
3816             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3817
3818             break;
3819         }
3820
3821         case CORINFO_INTRINSIC_GetRawHandle:
3822         {
3823             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3824             CORINFO_RESOLVED_TOKEN resolvedToken;
3825             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3826             resolvedToken.tokenScope   = info.compScopeHnd;
3827             resolvedToken.token        = memberRef;
3828             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3829
3830             CORINFO_GENERICHANDLE_RESULT embedInfo;
3831             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3832
3833             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3834                                                  embedInfo.compileTimeHandle);
3835             if (rawHandle == nullptr)
3836             {
3837                 return nullptr;
3838             }
3839
3840             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3841
3842             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3843             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3844
3845             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3846             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3847             var_types resultType = JITtype2varType(sig->retType);
3848             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3849
3850             break;
3851         }
3852
3853         case CORINFO_INTRINSIC_TypeEQ:
3854         case CORINFO_INTRINSIC_TypeNEQ:
3855         {
3856             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3857             op1              = impStackTop(1).val;
3858             op2              = impStackTop(0).val;
3859             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3860             if (optTree != nullptr)
3861             {
3862                 // Success, clean up the evaluation stack.
3863                 impPopStack();
3864                 impPopStack();
3865
3866                 // See if we can optimize even further, to a handle compare.
3867                 optTree = gtFoldTypeCompare(optTree);
3868
3869                 // See if we can now fold a handle compare to a constant.
3870                 optTree = gtFoldExpr(optTree);
3871
3872                 retNode = optTree;
3873             }
3874             else
3875             {
3876                 // Retry optimizing these later
3877                 isSpecial = true;
3878             }
3879             break;
3880         }
3881
3882         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3883         case CORINFO_INTRINSIC_GetManagedThreadId:
3884         {
3885             // Retry optimizing these during morph
3886             isSpecial = true;
3887             break;
3888         }
3889
3890         default:
3891             /* Unknown intrinsic */
3892             intrinsicID = CORINFO_INTRINSIC_Illegal;
3893             break;
3894     }
3895
3896     // Look for new-style jit intrinsics by name
3897     if (ni != NI_Illegal)
3898     {
3899         assert(retNode == nullptr);
3900         switch (ni)
3901         {
3902             case NI_System_Enum_HasFlag:
3903             {
3904                 GenTree* thisOp  = impStackTop(1).val;
3905                 GenTree* flagOp  = impStackTop(0).val;
3906                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3907
3908                 if (optTree != nullptr)
3909                 {
3910                     // Optimization successful. Pop the stack for real.
3911                     impPopStack();
3912                     impPopStack();
3913                     retNode = optTree;
3914                 }
3915                 else
3916                 {
3917                     // Retry optimizing this during morph.
3918                     isSpecial = true;
3919                 }
3920
3921                 break;
3922             }
3923
3924             case NI_MathF_Round:
3925             case NI_Math_Round:
3926             {
3927                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3928                 // to simplify the transition, we will just treat it as if it was still the
3929                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3930                 // everywhere else.
3931
3932                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3933                 break;
3934             }
3935
3936             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3937             {
3938                 // Flag for later handling during devirtualization.
3939                 isSpecial = true;
3940                 break;
3941             }
3942
3943             default:
3944                 break;
3945         }
3946     }
3947
3948     if (mustExpand)
3949     {
3950         if (retNode == nullptr)
3951         {
3952             NO_WAY("JIT must expand the intrinsic!");
3953         }
3954     }
3955
3956     // Optionally report if this intrinsic is special
3957     // (that is, potentially re-optimizable during morph).
3958     if (isSpecialIntrinsic != nullptr)
3959     {
3960         *isSpecialIntrinsic = isSpecial;
3961     }
3962
3963     return retNode;
3964 }
3965
3966 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3967                                     CORINFO_SIG_INFO*     sig,
3968                                     var_types             callType,
3969                                     CorInfoIntrinsics     intrinsicID,
3970                                     bool                  tailCall)
3971 {
3972     GenTree* op1;
3973     GenTree* op2;
3974
3975     assert(callType != TYP_STRUCT);
3976     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3977            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3980            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3981            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3982            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3983            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3984            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3985            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3986            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3987
3988     op1 = nullptr;
3989
3990 #if defined(LEGACY_BACKEND)
3991     if (IsTargetIntrinsic(intrinsicID))
3992 #elif !defined(_TARGET_X86_)
3993     // Intrinsics that are not implemented directly by target instructions will
3994     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3995     // don't do this optimization, because
3996     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3997     //  b) It will be non-trivial task or too late to re-materialize a surviving
3998     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3999     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
4000 #else
4001     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
4002     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4003     // code generation for certain EH constructs.
4004     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4005 #endif
4006     {
4007         switch (sig->numArgs)
4008         {
4009             case 1:
4010                 op1 = impPopStack().val;
4011
4012 #if FEATURE_X87_DOUBLES
4013
4014                 // X87 stack doesn't differentiate between float/double
4015                 // so it doesn't need a cast, but everybody else does
4016                 // Just double check it is at least a FP type
4017                 noway_assert(varTypeIsFloating(op1));
4018
4019 #else // FEATURE_X87_DOUBLES
4020
4021                 if (op1->TypeGet() != callType)
4022                 {
4023                     op1 = gtNewCastNode(callType, op1, callType);
4024                 }
4025
4026 #endif // FEATURE_X87_DOUBLES
4027
4028                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4029                 break;
4030
4031             case 2:
4032                 op2 = impPopStack().val;
4033                 op1 = impPopStack().val;
4034
4035 #if FEATURE_X87_DOUBLES
4036
4037                 // X87 stack doesn't differentiate between float/double
4038                 // so it doesn't need a cast, but everybody else does
4039                 // Just double check it is at least a FP type
4040                 noway_assert(varTypeIsFloating(op2));
4041                 noway_assert(varTypeIsFloating(op1));
4042
4043 #else // FEATURE_X87_DOUBLES
4044
4045                 if (op2->TypeGet() != callType)
4046                 {
4047                     op2 = gtNewCastNode(callType, op2, callType);
4048                 }
4049                 if (op1->TypeGet() != callType)
4050                 {
4051                     op1 = gtNewCastNode(callType, op1, callType);
4052                 }
4053
4054 #endif // FEATURE_X87_DOUBLES
4055
4056                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4057                 break;
4058
4059             default:
4060                 NO_WAY("Unsupported number of args for Math Instrinsic");
4061         }
4062
4063 #ifndef LEGACY_BACKEND
4064         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4065         {
4066             op1->gtFlags |= GTF_CALL;
4067         }
4068 #endif
4069     }
4070
4071     return op1;
4072 }
4073
4074 //------------------------------------------------------------------------
4075 // lookupNamedIntrinsic: map method to jit named intrinsic value
4076 //
4077 // Arguments:
4078 //    method -- method handle for method
4079 //
4080 // Return Value:
4081 //    Id for the named intrinsic, or Illegal if none.
4082 //
4083 // Notes:
4084 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4085 //    otherwise it is not a named jit intrinsic.
4086 //
4087
4088 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4089 {
4090     NamedIntrinsic result = NI_Illegal;
4091
4092     const char* className     = nullptr;
4093     const char* namespaceName = nullptr;
4094     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4095
4096     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4097     {
4098         return result;
4099     }
4100
4101     if (strcmp(namespaceName, "System") == 0)
4102     {
4103         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4104         {
4105             result = NI_System_Enum_HasFlag;
4106         }
4107         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4108         {
4109             result = NI_MathF_Round;
4110         }
4111         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4112         {
4113             result = NI_Math_Round;
4114         }
4115     }
4116     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4117     {
4118         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4119         {
4120             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4121         }
4122     }
4123
4124 #ifdef FEATURE_HW_INTRINSICS
4125 #if defined(_TARGET_XARCH_)
4126     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4127     {
4128         InstructionSet isa = lookupHWIntrinsicISA(className);
4129         result             = lookupHWIntrinsic(methodName, isa);
4130     }
4131 #elif defined(_TARGET_ARM64_)
4132     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4133     {
4134         result = lookupHWIntrinsic(className, methodName);
4135     }
4136 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4137 #error Unsupported platform
4138 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4139 #endif // FEATURE_HW_INTRINSICS
4140     return result;
4141 }
4142
4143 /*****************************************************************************/
4144
4145 GenTree* Compiler::impArrayAccessIntrinsic(
4146     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4147 {
4148     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4149        the following, as it generates fatter code.
4150     */
4151
4152     if (compCodeOpt() == SMALL_CODE)
4153     {
4154         return nullptr;
4155     }
4156
4157     /* These intrinsics generate fatter (but faster) code and are only
4158        done if we don't need SMALL_CODE */
4159
4160     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4161
4162     // The rank 1 case is special because it has to handle two array formats
4163     // we will simply not do that case
4164     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4165     {
4166         return nullptr;
4167     }
4168
4169     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4170     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4171
4172     // For the ref case, we will only be able to inline if the types match
4173     // (verifier checks for this, we don't care for the nonverified case and the
4174     // type is final (so we don't need to do the cast)
4175     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4176     {
4177         // Get the call site signature
4178         CORINFO_SIG_INFO LocalSig;
4179         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4180         assert(LocalSig.hasThis());
4181
4182         CORINFO_CLASS_HANDLE actualElemClsHnd;
4183
4184         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4185         {
4186             // Fetch the last argument, the one that indicates the type we are setting.
4187             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4188             for (unsigned r = 0; r < rank; r++)
4189             {
4190                 argType = info.compCompHnd->getArgNext(argType);
4191             }
4192
4193             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4194             actualElemClsHnd = argInfo.GetClassHandle();
4195         }
4196         else
4197         {
4198             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4199
4200             // Fetch the return type
4201             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4202             assert(retInfo.IsByRef());
4203             actualElemClsHnd = retInfo.GetClassHandle();
4204         }
4205
4206         // if it's not final, we can't do the optimization
4207         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4208         {
4209             return nullptr;
4210         }
4211     }
4212
4213     unsigned arrayElemSize;
4214     if (elemType == TYP_STRUCT)
4215     {
4216         assert(arrElemClsHnd);
4217
4218         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4219     }
4220     else
4221     {
4222         arrayElemSize = genTypeSize(elemType);
4223     }
4224
4225     if ((unsigned char)arrayElemSize != arrayElemSize)
4226     {
4227         // arrayElemSize would be truncated as an unsigned char.
4228         // This means the array element is too large. Don't do the optimization.
4229         return nullptr;
4230     }
4231
4232     GenTree* val = nullptr;
4233
4234     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4235     {
4236         // Assignment of a struct is more work, and there are more gets than sets.
4237         if (elemType == TYP_STRUCT)
4238         {
4239             return nullptr;
4240         }
4241
4242         val = impPopStack().val;
4243         assert(genActualType(elemType) == genActualType(val->gtType) ||
4244                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4245                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4246                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4247     }
4248
4249     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4250
4251     GenTree* inds[GT_ARR_MAX_RANK];
4252     for (unsigned k = rank; k > 0; k--)
4253     {
4254         inds[k - 1] = impPopStack().val;
4255     }
4256
4257     GenTree* arr = impPopStack().val;
4258     assert(arr->gtType == TYP_REF);
4259
4260     GenTree* arrElem =
4261         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4262                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4263
4264     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4265     {
4266         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4267     }
4268
4269     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4270     {
4271         assert(val != nullptr);
4272         return gtNewAssignNode(arrElem, val);
4273     }
4274     else
4275     {
4276         return arrElem;
4277     }
4278 }
4279
4280 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4281 {
4282     unsigned i;
4283
4284     // do some basic checks first
4285     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4286     {
4287         return FALSE;
4288     }
4289
4290     if (verCurrentState.esStackDepth > 0)
4291     {
4292         // merge stack types
4293         StackEntry* parentStack = block->bbStackOnEntry();
4294         StackEntry* childStack  = verCurrentState.esStack;
4295
4296         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4297         {
4298             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4299             {
4300                 return FALSE;
4301             }
4302         }
4303     }
4304
4305     // merge initialization status of this ptr
4306
4307     if (verTrackObjCtorInitState)
4308     {
4309         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4310         assert(verCurrentState.thisInitialized != TIS_Bottom);
4311
4312         // If the successor block's thisInit state is unknown, copy it from the current state.
4313         if (block->bbThisOnEntry() == TIS_Bottom)
4314         {
4315             *changed = true;
4316             verSetThisInit(block, verCurrentState.thisInitialized);
4317         }
4318         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4319         {
4320             if (block->bbThisOnEntry() != TIS_Top)
4321             {
4322                 *changed = true;
4323                 verSetThisInit(block, TIS_Top);
4324
4325                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4326                 {
4327                     // The block is bad. Control can flow through the block to any handler that catches the
4328                     // verification exception, but the importer ignores bad blocks and therefore won't model
4329                     // this flow in the normal way. To complete the merge into the bad block, the new state
4330                     // needs to be manually pushed to the handlers that may be reached after the verification
4331                     // exception occurs.
4332                     //
4333                     // Usually, the new state was already propagated to the relevant handlers while processing
4334                     // the predecessors of the bad block. The exception is when the bad block is at the start
4335                     // of a try region, meaning it is protected by additional handlers that do not protect its
4336                     // predecessors.
4337                     //
4338                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4339                     {
4340                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4341                         // recursive calls back into this code path (if successors of the current bad block are
4342                         // also bad blocks).
4343                         //
4344                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4345                         verCurrentState.thisInitialized = TIS_Top;
4346                         impVerifyEHBlock(block, true);
4347                         verCurrentState.thisInitialized = origTIS;
4348                     }
4349                 }
4350             }
4351         }
4352     }
4353     else
4354     {
4355         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4356     }
4357
4358     return TRUE;
4359 }
4360
4361 /*****************************************************************************
4362  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4363  *   already logged it (presumably in a more detailed fashion than done here)
4364  * 'bVerificationException' is true for a verification exception, false for a
4365  *   "call unauthorized by host" exception.
4366  */
4367
4368 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4369 {
4370     block->bbJumpKind = BBJ_THROW;
4371     block->bbFlags |= BBF_FAILED_VERIFICATION;
4372
4373     impCurStmtOffsSet(block->bbCodeOffs);
4374
4375 #ifdef DEBUG
4376     // we need this since BeginTreeList asserts otherwise
4377     impTreeList = impTreeLast = nullptr;
4378     block->bbFlags &= ~BBF_IMPORTED;
4379
4380     if (logMsg)
4381     {
4382         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4383                 block->bbCodeOffs, block->bbCodeOffsEnd));
4384         if (verbose)
4385         {
4386             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4387         }
4388     }
4389
4390     if (JitConfig.DebugBreakOnVerificationFailure())
4391     {
4392         DebugBreak();
4393     }
4394 #endif
4395
4396     impBeginTreeList();
4397
4398     // if the stack is non-empty evaluate all the side-effects
4399     if (verCurrentState.esStackDepth > 0)
4400     {
4401         impEvalSideEffects();
4402     }
4403     assert(verCurrentState.esStackDepth == 0);
4404
4405     GenTree* op1 =
4406         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4407     // verCurrentState.esStackDepth = 0;
4408     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4409
4410     // The inliner is not able to handle methods that require throw block, so
4411     // make sure this methods never gets inlined.
4412     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4413 }
4414
4415 /*****************************************************************************
4416  *
4417  */
4418 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4419
4420 {
4421     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4422     // slightly different mechanism in which it calls the JIT to perform IL verification:
4423     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4424     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4425     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4426     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4427     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4428     // to fail upon runtime of the jitted method.
4429     //
4430     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4431     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4432     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4433     // we detect these two conditions, instead of generating a throw statement inside the offending
4434     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4435     // to return false and make RyuJIT behave the same way JIT64 does.
4436     //
4437     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4438     // RyuJIT for the time being until we completely replace JIT64.
4439     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4440
4441     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4442     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4443     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4444     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4445     // be turned off during importation).
4446     CLANG_FORMAT_COMMENT_ANCHOR;
4447
4448 #ifdef _TARGET_64BIT_
4449
4450 #ifdef DEBUG
4451     bool canSkipVerificationResult =
4452         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4453     assert(tiVerificationNeeded || canSkipVerificationResult);
4454 #endif // DEBUG
4455
4456     // Add the non verifiable flag to the compiler
4457     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4458     {
4459         tiIsVerifiableCode = FALSE;
4460     }
4461 #endif //_TARGET_64BIT_
4462     verResetCurrentState(block, &verCurrentState);
4463     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4464
4465 #ifdef DEBUG
4466     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4467 #endif                   // DEBUG
4468 }
4469
4470 /******************************************************************************/
4471 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4472 {
4473     assert(ciType < CORINFO_TYPE_COUNT);
4474
4475     typeInfo tiResult;
4476     switch (ciType)
4477     {
4478         case CORINFO_TYPE_STRING:
4479         case CORINFO_TYPE_CLASS:
4480             tiResult = verMakeTypeInfo(clsHnd);
4481             if (!tiResult.IsType(TI_REF))
4482             { // type must be consistent with element type
4483                 return typeInfo();
4484             }
4485             break;
4486
4487 #ifdef _TARGET_64BIT_
4488         case CORINFO_TYPE_NATIVEINT:
4489         case CORINFO_TYPE_NATIVEUINT:
4490             if (clsHnd)
4491             {
4492                 // If we have more precise information, use it
4493                 return verMakeTypeInfo(clsHnd);
4494             }
4495             else
4496             {
4497                 return typeInfo::nativeInt();
4498             }
4499             break;
4500 #endif // _TARGET_64BIT_
4501
4502         case CORINFO_TYPE_VALUECLASS:
4503         case CORINFO_TYPE_REFANY:
4504             tiResult = verMakeTypeInfo(clsHnd);
4505             // type must be constant with element type;
4506             if (!tiResult.IsValueClass())
4507             {
4508                 return typeInfo();
4509             }
4510             break;
4511         case CORINFO_TYPE_VAR:
4512             return verMakeTypeInfo(clsHnd);
4513
4514         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4515         case CORINFO_TYPE_VOID:
4516             return typeInfo();
4517             break;
4518
4519         case CORINFO_TYPE_BYREF:
4520         {
4521             CORINFO_CLASS_HANDLE childClassHandle;
4522             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4523             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4524         }
4525         break;
4526
4527         default:
4528             if (clsHnd)
4529             { // If we have more precise information, use it
4530                 return typeInfo(TI_STRUCT, clsHnd);
4531             }
4532             else
4533             {
4534                 return typeInfo(JITtype2tiType(ciType));
4535             }
4536     }
4537     return tiResult;
4538 }
4539
4540 /******************************************************************************/
4541
4542 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4543 {
4544     if (clsHnd == nullptr)
4545     {
4546         return typeInfo();
4547     }
4548
4549     // Byrefs should only occur in method and local signatures, which are accessed
4550     // using ICorClassInfo and ICorClassInfo.getChildType.
4551     // So findClass() and getClassAttribs() should not be called for byrefs
4552
4553     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4554     {
4555         assert(!"Did findClass() return a Byref?");
4556         return typeInfo();
4557     }
4558
4559     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4560
4561     if (attribs & CORINFO_FLG_VALUECLASS)
4562     {
4563         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4564
4565         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4566         // not occur here, so we may want to change this to an assert instead.
4567         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4568         {
4569             return typeInfo();
4570         }
4571
4572 #ifdef _TARGET_64BIT_
4573         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4574         {
4575             return typeInfo::nativeInt();
4576         }
4577 #endif // _TARGET_64BIT_
4578
4579         if (t != CORINFO_TYPE_UNDEF)
4580         {
4581             return (typeInfo(JITtype2tiType(t)));
4582         }
4583         else if (bashStructToRef)
4584         {
4585             return (typeInfo(TI_REF, clsHnd));
4586         }
4587         else
4588         {
4589             return (typeInfo(TI_STRUCT, clsHnd));
4590         }
4591     }
4592     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4593     {
4594         // See comment in _typeInfo.h for why we do it this way.
4595         return (typeInfo(TI_REF, clsHnd, true));
4596     }
4597     else
4598     {
4599         return (typeInfo(TI_REF, clsHnd));
4600     }
4601 }
4602
4603 /******************************************************************************/
4604 BOOL Compiler::verIsSDArray(typeInfo ti)
4605 {
4606     if (ti.IsNullObjRef())
4607     { // nulls are SD arrays
4608         return TRUE;
4609     }
4610
4611     if (!ti.IsType(TI_REF))
4612     {
4613         return FALSE;
4614     }
4615
4616     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4617     {
4618         return FALSE;
4619     }
4620     return TRUE;
4621 }
4622
4623 /******************************************************************************/
4624 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4625 /* Returns an error type if anything goes wrong */
4626
4627 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4628 {
4629     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4630
4631     if (!verIsSDArray(arrayObjectType))
4632     {
4633         return typeInfo();
4634     }
4635
4636     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4637     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4638
4639     return verMakeTypeInfo(ciType, childClassHandle);
4640 }
4641
4642 /*****************************************************************************
4643  */
4644 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4645 {
4646     CORINFO_CLASS_HANDLE classHandle;
4647     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4648
4649     var_types type = JITtype2varType(ciType);
4650     if (varTypeIsGC(type))
4651     {
4652         // For efficiency, getArgType only returns something in classHandle for
4653         // value types.  For other types that have addition type info, you
4654         // have to call back explicitly
4655         classHandle = info.compCompHnd->getArgClass(sig, args);
4656         if (!classHandle)
4657         {
4658             NO_WAY("Could not figure out Class specified in argument or local signature");
4659         }
4660     }
4661
4662     return verMakeTypeInfo(ciType, classHandle);
4663 }
4664
4665 /*****************************************************************************/
4666
4667 // This does the expensive check to figure out whether the method
4668 // needs to be verified. It is called only when we fail verification,
4669 // just before throwing the verification exception.
4670
4671 BOOL Compiler::verNeedsVerification()
4672 {
4673     // If we have previously determined that verification is NOT needed
4674     // (for example in Compiler::compCompile), that means verification is really not needed.
4675     // Return the same decision we made before.
4676     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4677
4678     if (!tiVerificationNeeded)
4679     {
4680         return tiVerificationNeeded;
4681     }
4682
4683     assert(tiVerificationNeeded);
4684
4685     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4686     // obtain the answer.
4687     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4688         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4689
4690     // canSkipVerification will return one of the following three values:
4691     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4692     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4693     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4694     //     but need to insert a callout to the VM to ask during runtime
4695     //     whether to skip verification or not.
4696
4697     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4698     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4699     {
4700         tiRuntimeCalloutNeeded = true;
4701     }
4702
4703     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4704     {
4705         // Dev10 706080 - Testers don't like the assert, so just silence it
4706         // by not using the macros that invoke debugAssert.
4707         badCode();
4708     }
4709
4710     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4711     // The following line means we will NOT do jit time verification if canSkipVerification
4712     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4713     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4714     return tiVerificationNeeded;
4715 }
4716
4717 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4718 {
4719     if (ti.IsByRef())
4720     {
4721         return TRUE;
4722     }
4723     if (!ti.IsType(TI_STRUCT))
4724     {
4725         return FALSE;
4726     }
4727     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4728 }
4729
4730 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4731 {
4732     if (ti.IsPermanentHomeByRef())
4733     {
4734         return TRUE;
4735     }
4736     else
4737     {
4738         return FALSE;
4739     }
4740 }
4741
4742 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4743 {
4744     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4745             || ti.IsUnboxedGenericTypeVar() ||
4746             (ti.IsType(TI_STRUCT) &&
4747              // exclude byreflike structs
4748              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4749 }
4750
4751 // Is it a boxed value type?
4752 bool Compiler::verIsBoxedValueType(typeInfo ti)
4753 {
4754     if (ti.GetType() == TI_REF)
4755     {
4756         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4757         return !!eeIsValueClass(clsHnd);
4758     }
4759     else
4760     {
4761         return false;
4762     }
4763 }
4764
4765 /*****************************************************************************
4766  *
4767  *  Check if a TailCall is legal.
4768  */
4769
4770 bool Compiler::verCheckTailCallConstraint(
4771     OPCODE                  opcode,
4772     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4773     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4774     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4775                                                        // return false to the caller.
4776                                                        // If false, it will throw.
4777     )
4778 {
4779     DWORD            mflags;
4780     CORINFO_SIG_INFO sig;
4781     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4782                                    // this counter is used to keep track of how many items have been
4783                                    // virtually popped
4784
4785     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4786     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4787     unsigned              methodClassFlgs = 0;
4788
4789     assert(impOpcodeIsCallOpcode(opcode));
4790
4791     if (compIsForInlining())
4792     {
4793         return false;
4794     }
4795
4796     // for calli, VerifyOrReturn that this is not a virtual method
4797     if (opcode == CEE_CALLI)
4798     {
4799         /* Get the call sig */
4800         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4801
4802         // We don't know the target method, so we have to infer the flags, or
4803         // assume the worst-case.
4804         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4805     }
4806     else
4807     {
4808         methodHnd = pResolvedToken->hMethod;
4809
4810         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4811
4812         // When verifying generic code we pair the method handle with its
4813         // owning class to get the exact method signature.
4814         methodClassHnd = pResolvedToken->hClass;
4815         assert(methodClassHnd);
4816
4817         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4818
4819         // opcode specific check
4820         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4821     }
4822
4823     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4824     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4825
4826     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4827     {
4828         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4829     }
4830
4831     // check compatibility of the arguments
4832     unsigned int argCount;
4833     argCount = sig.numArgs;
4834     CORINFO_ARG_LIST_HANDLE args;
4835     args = sig.args;
4836     while (argCount--)
4837     {
4838         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4839
4840         // check that the argument is not a byref for tailcalls
4841         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4842
4843         // For unsafe code, we might have parameters containing pointer to the stack location.
4844         // Disallow the tailcall for this kind.
4845         CORINFO_CLASS_HANDLE classHandle;
4846         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4847         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4848
4849         args = info.compCompHnd->getArgNext(args);
4850     }
4851
4852     // update popCount
4853     popCount += sig.numArgs;
4854
4855     // check for 'this' which is on non-static methods, not called via NEWOBJ
4856     if (!(mflags & CORINFO_FLG_STATIC))
4857     {
4858         // Always update the popCount.
4859         // This is crucial for the stack calculation to be correct.
4860         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4861         popCount++;
4862
4863         if (opcode == CEE_CALLI)
4864         {
4865             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4866             // on the stack.
4867             if (tiThis.IsValueClass())
4868             {
4869                 tiThis.MakeByRef();
4870             }
4871             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4872         }
4873         else
4874         {
4875             // Check type compatibility of the this argument
4876             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4877             if (tiDeclaredThis.IsValueClass())
4878             {
4879                 tiDeclaredThis.MakeByRef();
4880             }
4881
4882             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4883         }
4884     }
4885
4886     // Tail calls on constrained calls should be illegal too:
4887     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4888     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4889
4890     // Get the exact view of the signature for an array method
4891     if (sig.retType != CORINFO_TYPE_VOID)
4892     {
4893         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4894         {
4895             assert(opcode != CEE_CALLI);
4896             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4897         }
4898     }
4899
4900     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4901     typeInfo tiCallerRetType =
4902         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4903
4904     // void return type gets morphed into the error type, so we have to treat them specially here
4905     if (sig.retType == CORINFO_TYPE_VOID)
4906     {
4907         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4908                                   speculative);
4909     }
4910     else
4911     {
4912         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4913                                                    NormaliseForStack(tiCallerRetType), true),
4914                                   "tailcall return mismatch", speculative);
4915     }
4916
4917     // for tailcall, stack must be empty
4918     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4919
4920     return true; // Yes, tailcall is legal
4921 }
4922
4923 /*****************************************************************************
4924  *
4925  *  Checks the IL verification rules for the call
4926  */
4927
4928 void Compiler::verVerifyCall(OPCODE                  opcode,
4929                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4930                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4931                              bool                    tailCall,
4932                              bool                    readonlyCall,
4933                              const BYTE*             delegateCreateStart,
4934                              const BYTE*             codeAddr,
4935                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4936 {
4937     DWORD             mflags;
4938     CORINFO_SIG_INFO* sig      = nullptr;
4939     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4940                                     // this counter is used to keep track of how many items have been
4941                                     // virtually popped
4942
4943     // for calli, VerifyOrReturn that this is not a virtual method
4944     if (opcode == CEE_CALLI)
4945     {
4946         Verify(false, "Calli not verifiable");
4947         return;
4948     }
4949
4950     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4951     mflags = callInfo->verMethodFlags;
4952
4953     sig = &callInfo->verSig;
4954
4955     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4956     {
4957         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4958     }
4959
4960     // opcode specific check
4961     unsigned methodClassFlgs = callInfo->classFlags;
4962     switch (opcode)
4963     {
4964         case CEE_CALLVIRT:
4965             // cannot do callvirt on valuetypes
4966             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4967             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4968             break;
4969
4970         case CEE_NEWOBJ:
4971         {
4972             assert(!tailCall); // Importer should not allow this
4973             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4974                            "newobj must be on instance");
4975
4976             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4977             {
4978                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4979                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4980                 typeInfo tiDeclaredFtn =
4981                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4982                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4983
4984                 assert(popCount == 0);
4985                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4986                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4987
4988                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4989                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4990                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4991                                "delegate object type mismatch");
4992
4993                 CORINFO_CLASS_HANDLE objTypeHandle =
4994                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4995
4996                 // the method signature must be compatible with the delegate's invoke method
4997
4998                 // check that for virtual functions, the type of the object used to get the
4999                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
5000                 // since this is a bit of work to determine in general, we pattern match stylized
5001                 // code sequences
5002
5003                 // the delegate creation code check, which used to be done later, is now done here
5004                 // so we can read delegateMethodRef directly from
5005                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5006                 // we then use it in our call to isCompatibleDelegate().
5007
5008                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5009                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5010                                "must create delegates with certain IL");
5011
5012                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5013                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5014                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5015                 delegateResolvedToken.token        = delegateMethodRef;
5016                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5017                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5018
5019                 CORINFO_CALL_INFO delegateCallInfo;
5020                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5021                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5022
5023                 BOOL isOpenDelegate = FALSE;
5024                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5025                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5026                                                                       &isOpenDelegate),
5027                                "function incompatible with delegate");
5028
5029                 // check the constraints on the target method
5030                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5031                                "delegate target has unsatisfied class constraints");
5032                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5033                                                                             tiActualFtn.GetMethod()),
5034                                "delegate target has unsatisfied method constraints");
5035
5036                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5037                 // for additional verification rules for delegates
5038                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5039                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5040                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5041                 {
5042
5043                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5044 #ifdef DEBUG
5045                         && StrictCheckForNonVirtualCallToVirtualMethod()
5046 #endif
5047                             )
5048                     {
5049                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5050                         {
5051                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5052                                                verIsBoxedValueType(tiActualObj),
5053                                            "The 'this' parameter to the call must be either the calling method's "
5054                                            "'this' parameter or "
5055                                            "a boxed value type.");
5056                         }
5057                     }
5058                 }
5059
5060                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5061                 {
5062                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5063
5064                     Verify(targetIsStatic || !isOpenDelegate,
5065                            "Unverifiable creation of an open instance delegate for a protected member.");
5066
5067                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5068                                                                 ? info.compClassHnd
5069                                                                 : tiActualObj.GetClassHandleForObjRef();
5070
5071                     // In the case of protected methods, it is a requirement that the 'this'
5072                     // pointer be a subclass of the current context.  Perform this check.
5073                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5074                            "Accessing protected method through wrong type.");
5075                 }
5076                 goto DONE_ARGS;
5077             }
5078         }
5079         // fall thru to default checks
5080         default:
5081             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5082     }
5083     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5084                    "can only newobj a delegate constructor");
5085
5086     // check compatibility of the arguments
5087     unsigned int argCount;
5088     argCount = sig->numArgs;
5089     CORINFO_ARG_LIST_HANDLE args;
5090     args = sig->args;
5091     while (argCount--)
5092     {
5093         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5094
5095         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5096         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5097
5098         args = info.compCompHnd->getArgNext(args);
5099     }
5100
5101 DONE_ARGS:
5102
5103     // update popCount
5104     popCount += sig->numArgs;
5105
5106     // check for 'this' which are is non-static methods, not called via NEWOBJ
5107     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5108     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5109     {
5110         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5111         popCount++;
5112
5113         // If it is null, we assume we can access it (since it will AV shortly)
5114         // If it is anything but a reference class, there is no hierarchy, so
5115         // again, we don't need the precise instance class to compute 'protected' access
5116         if (tiThis.IsType(TI_REF))
5117         {
5118             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5119         }
5120
5121         // Check type compatibility of the this argument
5122         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5123         if (tiDeclaredThis.IsValueClass())
5124         {
5125             tiDeclaredThis.MakeByRef();
5126         }
5127
5128         // If this is a call to the base class .ctor, set thisPtr Init for
5129         // this block.
5130         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5131         {
5132             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5133                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5134             {
5135                 assert(verCurrentState.thisInitialized !=
5136                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5137                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5138                                "Call to base class constructor when 'this' is possibly initialized");
5139                 // Otherwise, 'this' is now initialized.
5140                 verCurrentState.thisInitialized = TIS_Init;
5141                 tiThis.SetInitialisedObjRef();
5142             }
5143             else
5144             {
5145                 // We allow direct calls to value type constructors
5146                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5147                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5148                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5149                                "Bad call to a constructor");
5150             }
5151         }
5152
5153         if (pConstrainedResolvedToken != nullptr)
5154         {
5155             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5156
5157             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5158
5159             // We just dereference this and test for equality
5160             tiThis.DereferenceByRef();
5161             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5162                            "this type mismatch with constrained type operand");
5163
5164             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5165             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5166         }
5167
5168         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5169         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5170         {
5171             tiDeclaredThis.SetIsReadonlyByRef();
5172         }
5173
5174         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5175
5176         if (tiThis.IsByRef())
5177         {
5178             // Find the actual type where the method exists (as opposed to what is declared
5179             // in the metadata). This is to prevent passing a byref as the "this" argument
5180             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5181
5182             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5183             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5184                            "Call to base type of valuetype (which is never a valuetype)");
5185         }
5186
5187         // Rules for non-virtual call to a non-final virtual method:
5188
5189         // Define:
5190         // The "this" pointer is considered to be "possibly written" if
5191         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5192         //   (or)
5193         //   2. It has been stored to (STARG.0) anywhere in the method.
5194
5195         // A non-virtual call to a non-final virtual method is only allowed if
5196         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5197         //   (or)
5198         //   2. The this pointer passed to the callee is the current method's this pointer.
5199         //      (and) The current method's this pointer is not "possibly written".
5200
5201         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5202         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5203         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5204         // hard and more error prone.
5205
5206         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5207 #ifdef DEBUG
5208             && StrictCheckForNonVirtualCallToVirtualMethod()
5209 #endif
5210                 )
5211         {
5212             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5213             {
5214                 VerifyOrReturn(
5215                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5216                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5217                     "a boxed value type.");
5218             }
5219         }
5220     }
5221
5222     // check any constraints on the callee's class and type parameters
5223     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5224                    "method has unsatisfied class constraints");
5225     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5226                    "method has unsatisfied method constraints");
5227
5228     if (mflags & CORINFO_FLG_PROTECTED)
5229     {
5230         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5231                        "Can't access protected method");
5232     }
5233
5234     // Get the exact view of the signature for an array method
5235     if (sig->retType != CORINFO_TYPE_VOID)
5236     {
5237         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5238     }
5239
5240     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5241     // The methods supported by array types are under the control of the EE
5242     // so we can trust that only the Address operation returns a byref.
5243     if (readonlyCall)
5244     {
5245         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5246         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5247                        "unexpected use of readonly prefix");
5248     }
5249
5250     // Verify the tailcall
5251     if (tailCall)
5252     {
5253         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5254     }
5255 }
5256
5257 /*****************************************************************************
5258  *  Checks that a delegate creation is done using the following pattern:
5259  *     dup
5260  *     ldvirtftn targetMemberRef
5261  *  OR
5262  *     ldftn targetMemberRef
5263  *
5264  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5265  *  not in this basic block)
5266  *
5267  *  targetMemberRef is read from the code sequence.
5268  *  targetMemberRef is validated iff verificationNeeded.
5269  */
5270
5271 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5272                                         const BYTE*  codeAddr,
5273                                         mdMemberRef& targetMemberRef)
5274 {
5275     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5276     {
5277         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5278         return TRUE;
5279     }
5280     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5281     {
5282         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5283         return TRUE;
5284     }
5285
5286     return FALSE;
5287 }
5288
5289 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5290 {
5291     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5292     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5293     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5294     if (!tiCompatibleWith(value, normPtrVal, true))
5295     {
5296         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5297         compUnsafeCastUsed = true;
5298     }
5299     return ptrVal;
5300 }
5301
5302 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5303 {
5304     assert(!instrType.IsStruct());
5305
5306     typeInfo ptrVal;
5307     if (ptr.IsByRef())
5308     {
5309         ptrVal = DereferenceByRef(ptr);
5310         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5311         {
5312             Verify(false, "bad pointer");
5313             compUnsafeCastUsed = true;
5314         }
5315         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5316         {
5317             Verify(false, "pointer not consistent with instr");
5318             compUnsafeCastUsed = true;
5319         }
5320     }
5321     else
5322     {
5323         Verify(false, "pointer not byref");
5324         compUnsafeCastUsed = true;
5325     }
5326
5327     return ptrVal;
5328 }
5329
5330 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5331 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5332 // ld*flda or a st*fld.
5333 // 'enclosingClass' is given if we are accessing a field in some specific type.
5334
5335 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5336                               const CORINFO_FIELD_INFO& fieldInfo,
5337                               const typeInfo*           tiThis,
5338                               BOOL                      mutator,
5339                               BOOL                      allowPlainStructAsThis)
5340 {
5341     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5342     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5343     CORINFO_CLASS_HANDLE instanceClass =
5344         info.compClassHnd; // for statics, we imagine the instance is the current class.
5345
5346     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5347     if (mutator)
5348     {
5349         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5350         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5351         {
5352             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5353                        info.compIsStatic == isStaticField,
5354                    "bad use of initonly field (set or address taken)");
5355         }
5356     }
5357
5358     if (tiThis == nullptr)
5359     {
5360         Verify(isStaticField, "used static opcode with non-static field");
5361     }
5362     else
5363     {
5364         typeInfo tThis = *tiThis;
5365
5366         if (allowPlainStructAsThis && tThis.IsValueClass())
5367         {
5368             tThis.MakeByRef();
5369         }
5370
5371         // If it is null, we assume we can access it (since it will AV shortly)
5372         // If it is anything but a refernce class, there is no hierarchy, so
5373         // again, we don't need the precise instance class to compute 'protected' access
5374         if (tiThis->IsType(TI_REF))
5375         {
5376             instanceClass = tiThis->GetClassHandleForObjRef();
5377         }
5378
5379         // Note that even if the field is static, we require that the this pointer
5380         // satisfy the same constraints as a non-static field  This happens to
5381         // be simpler and seems reasonable
5382         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5383         if (tiDeclaredThis.IsValueClass())
5384         {
5385             tiDeclaredThis.MakeByRef();
5386
5387             // we allow read-only tThis, on any field access (even stores!), because if the
5388             // class implementor wants to prohibit stores he should make the field private.
5389             // we do this by setting the read-only bit on the type we compare tThis to.
5390             tiDeclaredThis.SetIsReadonlyByRef();
5391         }
5392         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5393         {
5394             // Any field access is legal on "uninitialized" this pointers.
5395             // The easiest way to implement this is to simply set the
5396             // initialized bit for the duration of the type check on the
5397             // field access only.  It does not change the state of the "this"
5398             // for the function as a whole. Note that the "tThis" is a copy
5399             // of the original "this" type (*tiThis) passed in.
5400             tThis.SetInitialisedObjRef();
5401         }
5402
5403         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5404     }
5405
5406     // Presently the JIT does not check that we don't store or take the address of init-only fields
5407     // since we cannot guarantee their immutability and it is not a security issue.
5408
5409     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5410     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5411                    "field has unsatisfied class constraints");
5412     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5413     {
5414         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5415                "Accessing protected method through wrong type.");
5416     }
5417 }
5418
5419 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5420 {
5421     if (tiOp1.IsNumberType())
5422     {
5423 #ifdef _TARGET_64BIT_
5424         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5425 #else  // _TARGET_64BIT
5426         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5427         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5428         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5429         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5430 #endif // !_TARGET_64BIT_
5431     }
5432     else if (tiOp1.IsObjRef())
5433     {
5434         switch (opcode)
5435         {
5436             case CEE_BEQ_S:
5437             case CEE_BEQ:
5438             case CEE_BNE_UN_S:
5439             case CEE_BNE_UN:
5440             case CEE_CEQ:
5441             case CEE_CGT_UN:
5442                 break;
5443             default:
5444                 Verify(FALSE, "Cond not allowed on object types");
5445         }
5446         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5447     }
5448     else if (tiOp1.IsByRef())
5449     {
5450         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5451     }
5452     else
5453     {
5454         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5455     }
5456 }
5457
5458 void Compiler::verVerifyThisPtrInitialised()
5459 {
5460     if (verTrackObjCtorInitState)
5461     {
5462         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5463     }
5464 }
5465
5466 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5467 {
5468     // Either target == context, in this case calling an alternate .ctor
5469     // Or target is the immediate parent of context
5470
5471     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5472 }
5473
5474 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5475                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5476                                       CORINFO_CALL_INFO*      pCallInfo)
5477 {
5478     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5479     {
5480         NO_WAY("Virtual call to a function added via EnC is not supported");
5481     }
5482
5483     // CoreRT generic virtual method
5484     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5485     {
5486         GenTree* runtimeMethodHandle = nullptr;
5487         if (pCallInfo->exactContextNeedsRuntimeLookup)
5488         {
5489             runtimeMethodHandle =
5490                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5491         }
5492         else
5493         {
5494             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5495         }
5496         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5497                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5498     }
5499
5500 #ifdef FEATURE_READYTORUN_COMPILER
5501     if (opts.IsReadyToRun())
5502     {
5503         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5504         {
5505             GenTreeCall* call =
5506                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5507
5508             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5509
5510             return call;
5511         }
5512
5513         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5514         if (IsTargetAbi(CORINFO_CORERT_ABI))
5515         {
5516             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5517
5518             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5519                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5520         }
5521     }
5522 #endif
5523
5524     // Get the exact descriptor for the static callsite
5525     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5526     if (exactTypeDesc == nullptr)
5527     { // compDonotInline()
5528         return nullptr;
5529     }
5530
5531     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5532     if (exactMethodDesc == nullptr)
5533     { // compDonotInline()
5534         return nullptr;
5535     }
5536
5537     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5538
5539     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5540
5541     helpArgs = gtNewListNode(thisPtr, helpArgs);
5542
5543     // Call helper function.  This gets the target address of the final destination callsite.
5544
5545     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5546 }
5547
5548 //------------------------------------------------------------------------
5549 // impImportAndPushBox: build and import a value-type box
5550 //
5551 // Arguments:
5552 //   pResolvedToken - resolved token from the box operation
5553 //
5554 // Return Value:
5555 //   None.
5556 //
5557 // Side Effects:
5558 //   The value to be boxed is popped from the stack, and a tree for
5559 //   the boxed value is pushed. This method may create upstream
5560 //   statements, spill side effecting trees, and create new temps.
5561 //
5562 //   If importing an inlinee, we may also discover the inline must
5563 //   fail. If so there is no new value pushed on the stack. Callers
5564 //   should use CompDoNotInline after calling this method to see if
5565 //   ongoing importation should be aborted.
5566 //
5567 // Notes:
5568 //   Boxing of ref classes results in the same value as the value on
5569 //   the top of the stack, so is handled inline in impImportBlockCode
5570 //   for the CEE_BOX case. Only value or primitive type boxes make it
5571 //   here.
5572 //
5573 //   Boxing for nullable types is done via a helper call; boxing
5574 //   of other value types is expanded inline or handled via helper
5575 //   call, depending on the jit's codegen mode.
5576 //
5577 //   When the jit is operating in size and time constrained modes,
5578 //   using a helper call here can save jit time and code size. But it
5579 //   also may inhibit cleanup optimizations that could have also had a
5580 //   even greater benefit effect on code size and jit time. An optimal
5581 //   strategy may need to peek ahead and see if it is easy to tell how
5582 //   the box is being used. For now, we defer.
5583
5584 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5585 {
5586     // Spill any special side effects
5587     impSpillSpecialSideEff();
5588
5589     // Get get the expression to box from the stack.
5590     GenTree*             op1       = nullptr;
5591     GenTree*             op2       = nullptr;
5592     StackEntry           se        = impPopStack();
5593     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5594     GenTree*             exprToBox = se.val;
5595
5596     // Look at what helper we should use.
5597     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5598
5599     // Determine what expansion to prefer.
5600     //
5601     // In size/time/debuggable constrained modes, the helper call
5602     // expansion for box is generally smaller and is preferred, unless
5603     // the value to box is a struct that comes from a call. In that
5604     // case the call can construct its return value directly into the
5605     // box payload, saving possibly some up-front zeroing.
5606     //
5607     // Currently primitive type boxes always get inline expanded. We may
5608     // want to do the same for small structs if they don't come from
5609     // calls and don't have GC pointers, since explicitly copying such
5610     // structs is cheap.
5611     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5612     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5613     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5614     bool expandInline    = canExpandInline && !optForSize;
5615
5616     if (expandInline)
5617     {
5618         JITDUMP(" inline allocate/copy sequence\n");
5619
5620         // we are doing 'normal' boxing.  This means that we can inline the box operation
5621         // Box(expr) gets morphed into
5622         // temp = new(clsHnd)
5623         // cpobj(temp+4, expr, clsHnd)
5624         // push temp
5625         // The code paths differ slightly below for structs and primitives because
5626         // "cpobj" differs in these cases.  In one case you get
5627         //    impAssignStructPtr(temp+4, expr, clsHnd)
5628         // and the other you get
5629         //    *(temp+4) = expr
5630
5631         if (opts.MinOpts() || opts.compDbgCode)
5632         {
5633             // For minopts/debug code, try and minimize the total number
5634             // of box temps by reusing an existing temp when possible.
5635             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5636             {
5637                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5638             }
5639         }
5640         else
5641         {
5642             // When optimizing, use a new temp for each box operation
5643             // since we then know the exact class of the box temp.
5644             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5645             lvaTable[impBoxTemp].lvType = TYP_REF;
5646             const bool isExact          = true;
5647             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5648         }
5649
5650         // needs to stay in use until this box expression is appended
5651         // some other node.  We approximate this by keeping it alive until
5652         // the opcode stack becomes empty
5653         impBoxTempInUse = true;
5654
5655 #ifdef FEATURE_READYTORUN_COMPILER
5656         bool usingReadyToRunHelper = false;
5657
5658         if (opts.IsReadyToRun())
5659         {
5660             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5661             usingReadyToRunHelper = (op1 != nullptr);
5662         }
5663
5664         if (!usingReadyToRunHelper)
5665 #endif
5666         {
5667             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5668             // and the newfast call with a single call to a dynamic R2R cell that will:
5669             //      1) Load the context
5670             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5671             //      3) Allocate and return the new object for boxing
5672             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5673
5674             // Ensure that the value class is restored
5675             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5676             if (op2 == nullptr)
5677             {
5678                 // We must be backing out of an inline.
5679                 assert(compDonotInline());
5680                 return;
5681             }
5682
5683             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5684                                     pResolvedToken->hClass, TYP_REF, op2);
5685         }
5686
5687         /* Remember that this basic block contains 'new' of an object, and so does this method */
5688         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5689         optMethodFlags |= OMF_HAS_NEWOBJ;
5690
5691         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5692
5693         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5694
5695         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5696         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5697         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5698
5699         if (varTypeIsStruct(exprToBox))
5700         {
5701             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5702             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5703         }
5704         else
5705         {
5706             var_types lclTyp = exprToBox->TypeGet();
5707             if (lclTyp == TYP_BYREF)
5708             {
5709                 lclTyp = TYP_I_IMPL;
5710             }
5711             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5712             if (impIsPrimitive(jitType))
5713             {
5714                 lclTyp = JITtype2varType(jitType);
5715             }
5716             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5717                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5718             var_types srcTyp = exprToBox->TypeGet();
5719             var_types dstTyp = lclTyp;
5720
5721             if (srcTyp != dstTyp)
5722             {
5723                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5724                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5725                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5726             }
5727             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5728         }
5729
5730         // Spill eval stack to flush out any pending side effects.
5731         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5732
5733         // Set up this copy as a second assignment.
5734         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5735
5736         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5737
5738         // Record that this is a "box" node and keep track of the matching parts.
5739         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5740
5741         // If it is a value class, mark the "box" node.  We can use this information
5742         // to optimise several cases:
5743         //    "box(x) == null" --> false
5744         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5745         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5746
5747         op1->gtFlags |= GTF_BOX_VALUE;
5748         assert(op1->IsBoxedValue());
5749         assert(asg->gtOper == GT_ASG);
5750     }
5751     else
5752     {
5753         // Don't optimize, just call the helper and be done with it.
5754         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5755         assert(operCls != nullptr);
5756
5757         // Ensure that the value class is restored
5758         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5759         if (op2 == nullptr)
5760         {
5761             // We must be backing out of an inline.
5762             assert(compDonotInline());
5763             return;
5764         }
5765
5766         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5767         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5768     }
5769
5770     /* Push the result back on the stack, */
5771     /* even if clsHnd is a value class we want the TI_REF */
5772     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5773     impPushOnStack(op1, tiRetVal);
5774 }
5775
5776 //------------------------------------------------------------------------
5777 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5778 //
5779 // Arguments:
5780 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5781 //                     by a call to CEEInfo::resolveToken().
5782 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5783 //                by a call to CEEInfo::getCallInfo().
5784 //
5785 // Assumptions:
5786 //    The multi-dimensional array constructor arguments (array dimensions) are
5787 //    pushed on the IL stack on entry to this method.
5788 //
5789 // Notes:
5790 //    Multi-dimensional array constructors are imported as calls to a JIT
5791 //    helper, not as regular calls.
5792
5793 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5794 {
5795     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5796     if (classHandle == nullptr)
5797     { // compDonotInline()
5798         return;
5799     }
5800
5801     assert(pCallInfo->sig.numArgs);
5802
5803     GenTree*        node;
5804     GenTreeArgList* args;
5805
5806     //
5807     // There are two different JIT helpers that can be used to allocate
5808     // multi-dimensional arrays:
5809     //
5810     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5811     //      This variant is deprecated. It should be eventually removed.
5812     //
5813     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5814     //      pointer to block of int32s. This variant is more portable.
5815     //
5816     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5817     // unconditionally would require ReadyToRun version bump.
5818     //
5819     CLANG_FORMAT_COMMENT_ANCHOR;
5820
5821     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5822     {
5823
5824         // Reuse the temp used to pass the array dimensions to avoid bloating
5825         // the stack frame in case there are multiple calls to multi-dim array
5826         // constructors within a single method.
5827         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5828         {
5829             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5830             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5831             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5832         }
5833
5834         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5835         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5836         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5837             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5838
5839         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5840         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5841         // to one allocation at a time.
5842         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5843
5844         //
5845         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5846         //  - Array class handle
5847         //  - Number of dimension arguments
5848         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5849         //
5850
5851         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5852         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5853
5854         // Pop dimension arguments from the stack one at a time and store it
5855         // into lvaNewObjArrayArgs temp.
5856         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5857         {
5858             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5859
5860             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5861             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5862             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5863                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5864             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5865
5866             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5867         }
5868
5869         args = gtNewArgList(node);
5870
5871         // pass number of arguments to the helper
5872         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5873
5874         args = gtNewListNode(classHandle, args);
5875
5876         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5877     }
5878     else
5879     {
5880         //
5881         // The varargs helper needs the type and method handles as last
5882         // and  last-1 param (this is a cdecl call, so args will be
5883         // pushed in reverse order on the CPU stack)
5884         //
5885
5886         args = gtNewArgList(classHandle);
5887
5888         // pass number of arguments to the helper
5889         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5890
5891         unsigned argFlags = 0;
5892         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5893
5894         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5895
5896         // varargs, so we pop the arguments
5897         node->gtFlags |= GTF_CALL_POP_ARGS;
5898
5899 #ifdef DEBUG
5900         // At the present time we don't track Caller pop arguments
5901         // that have GC references in them
5902         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5903         {
5904             assert(temp->Current()->gtType != TYP_REF);
5905         }
5906 #endif
5907     }
5908
5909     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5910     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5911
5912     // Remember that this basic block contains 'new' of a md array
5913     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5914
5915     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5916 }
5917
5918 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
5919                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5920                                     CORINFO_THIS_TRANSFORM  transform)
5921 {
5922     switch (transform)
5923     {
5924         case CORINFO_DEREF_THIS:
5925         {
5926             GenTree* obj = thisPtr;
5927
5928             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5929             impBashVarAddrsToI(obj);
5930             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5931             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5932
5933             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5934             // ldind could point anywhere, example a boxed class static int
5935             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5936
5937             return obj;
5938         }
5939
5940         case CORINFO_BOX_THIS:
5941         {
5942             // Constraint calls where there might be no
5943             // unboxed entry point require us to implement the call via helper.
5944             // These only occur when a possible target of the call
5945             // may have inherited an implementation of an interface
5946             // method from System.Object or System.ValueType.  The EE does not provide us with
5947             // "unboxed" versions of these methods.
5948
5949             GenTree* obj = thisPtr;
5950
5951             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5952             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5953             obj->gtFlags |= GTF_EXCEPT;
5954
5955             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5956             var_types   objType = JITtype2varType(jitTyp);
5957             if (impIsPrimitive(jitTyp))
5958             {
5959                 if (obj->OperIsBlk())
5960                 {
5961                     obj->ChangeOperUnchecked(GT_IND);
5962
5963                     // Obj could point anywhere, example a boxed class static int
5964                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5965                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5966                 }
5967
5968                 obj->gtType = JITtype2varType(jitTyp);
5969                 assert(varTypeIsArithmetic(obj->gtType));
5970             }
5971
5972             // This pushes on the dereferenced byref
5973             // This is then used immediately to box.
5974             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5975
5976             // This pops off the byref-to-a-value-type remaining on the stack and
5977             // replaces it with a boxed object.
5978             // This is then used as the object to the virtual call immediately below.
5979             impImportAndPushBox(pConstrainedResolvedToken);
5980             if (compDonotInline())
5981             {
5982                 return nullptr;
5983             }
5984
5985             obj = impPopStack().val;
5986             return obj;
5987         }
5988         case CORINFO_NO_THIS_TRANSFORM:
5989         default:
5990             return thisPtr;
5991     }
5992 }
5993
5994 //------------------------------------------------------------------------
5995 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5996 //
5997 // Return Value:
5998 //    true if PInvoke inlining should be enabled in current method, false otherwise
5999 //
6000 // Notes:
6001 //    Checks a number of ambient conditions where we could pinvoke but choose not to
6002
6003 bool Compiler::impCanPInvokeInline()
6004 {
6005     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6006            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6007         ;
6008 }
6009
6010 //------------------------------------------------------------------------
6011 // impCanPInvokeInlineCallSite: basic legality checks using information
6012 // from a call to see if the call qualifies as an inline pinvoke.
6013 //
6014 // Arguments:
6015 //    block      - block contaning the call, or for inlinees, block
6016 //                 containing the call being inlined
6017 //
6018 // Return Value:
6019 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6020 //
6021 // Notes:
6022 //    For runtimes that support exception handling interop there are
6023 //    restrictions on using inline pinvoke in handler regions.
6024 //
6025 //    * We have to disable pinvoke inlining inside of filters because
6026 //    in case the main execution (i.e. in the try block) is inside
6027 //    unmanaged code, we cannot reuse the inlined stub (we still need
6028 //    the original state until we are in the catch handler)
6029 //
6030 //    * We disable pinvoke inlining inside handlers since the GSCookie
6031 //    is in the inlined Frame (see
6032 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6033 //    this would not protect framelets/return-address of handlers.
6034 //
6035 //    These restrictions are currently also in place for CoreCLR but
6036 //    can be relaxed when coreclr/#8459 is addressed.
6037
6038 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6039 {
6040     if (block->hasHndIndex())
6041     {
6042         return false;
6043     }
6044
6045     // The remaining limitations do not apply to CoreRT
6046     if (IsTargetAbi(CORINFO_CORERT_ABI))
6047     {
6048         return true;
6049     }
6050
6051 #ifdef _TARGET_AMD64_
6052     // On x64, we disable pinvoke inlining inside of try regions.
6053     // Here is the comment from JIT64 explaining why:
6054     //
6055     //   [VSWhidbey: 611015] - because the jitted code links in the
6056     //   Frame (instead of the stub) we rely on the Frame not being
6057     //   'active' until inside the stub.  This normally happens by the
6058     //   stub setting the return address pointer in the Frame object
6059     //   inside the stub.  On a normal return, the return address
6060     //   pointer is zeroed out so the Frame can be safely re-used, but
6061     //   if an exception occurs, nobody zeros out the return address
6062     //   pointer.  Thus if we re-used the Frame object, it would go
6063     //   'active' as soon as we link it into the Frame chain.
6064     //
6065     //   Technically we only need to disable PInvoke inlining if we're
6066     //   in a handler or if we're in a try body with a catch or
6067     //   filter/except where other non-handler code in this method
6068     //   might run and try to re-use the dirty Frame object.
6069     //
6070     //   A desktop test case where this seems to matter is
6071     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6072     if (block->hasTryIndex())
6073     {
6074         return false;
6075     }
6076 #endif // _TARGET_AMD64_
6077
6078     return true;
6079 }
6080
6081 //------------------------------------------------------------------------
6082 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6083 // if it can be expressed as an inline pinvoke.
6084 //
6085 // Arguments:
6086 //    call       - tree for the call
6087 //    methHnd    - handle for the method being called (may be null)
6088 //    sig        - signature of the method being called
6089 //    mflags     - method flags for the method being called
6090 //    block      - block contaning the call, or for inlinees, block
6091 //                 containing the call being inlined
6092 //
6093 // Notes:
6094 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6095 //
6096 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6097 //   call passes a combination of legality and profitabilty checks.
6098 //
6099 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6100
6101 void Compiler::impCheckForPInvokeCall(
6102     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6103 {
6104     CorInfoUnmanagedCallConv unmanagedCallConv;
6105
6106     // If VM flagged it as Pinvoke, flag the call node accordingly
6107     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6108     {
6109         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6110     }
6111
6112     if (methHnd)
6113     {
6114         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6115         {
6116             return;
6117         }
6118
6119         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6120     }
6121     else
6122     {
6123         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6124         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6125         {
6126             // Used by the IL Stubs.
6127             callConv = CORINFO_CALLCONV_C;
6128         }
6129         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6130         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6131         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6132         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6133
6134         assert(!call->gtCallCookie);
6135     }
6136
6137     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6138         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6139     {
6140         return;
6141     }
6142     optNativeCallCount++;
6143
6144     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6145     {
6146         // PInvoke CALLI in IL stubs must be inlined
6147     }
6148     else
6149     {
6150         // Check legality
6151         if (!impCanPInvokeInlineCallSite(block))
6152         {
6153             return;
6154         }
6155
6156         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6157         // profitability checks
6158         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6159         {
6160             if (!impCanPInvokeInline())
6161             {
6162                 return;
6163             }
6164
6165             // Size-speed tradeoff: don't use inline pinvoke at rarely
6166             // executed call sites.  The non-inline version is more
6167             // compact.
6168             if (block->isRunRarely())
6169             {
6170                 return;
6171             }
6172         }
6173
6174         // The expensive check should be last
6175         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6176         {
6177             return;
6178         }
6179     }
6180
6181     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6182
6183     call->gtFlags |= GTF_CALL_UNMANAGED;
6184     info.compCallUnmanaged++;
6185
6186     // AMD64 convention is same for native and managed
6187     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6188     {
6189         call->gtFlags |= GTF_CALL_POP_ARGS;
6190     }
6191
6192     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6193     {
6194         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6195     }
6196 }
6197
6198 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6199 {
6200     var_types callRetTyp = JITtype2varType(sig->retType);
6201
6202     /* The function pointer is on top of the stack - It may be a
6203      * complex expression. As it is evaluated after the args,
6204      * it may cause registered args to be spilled. Simply spill it.
6205      */
6206
6207     // Ignore this trivial case.
6208     if (impStackTop().val->gtOper != GT_LCL_VAR)
6209     {
6210         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6211                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6212     }
6213
6214     /* Get the function pointer */
6215
6216     GenTree* fptr = impPopStack().val;
6217
6218     // The function pointer is typically a sized to match the target pointer size
6219     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6220     // See ILCodeStream::LowerOpcode
6221     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6222
6223 #ifdef DEBUG
6224     // This temporary must never be converted to a double in stress mode,
6225     // because that can introduce a call to the cast helper after the
6226     // arguments have already been evaluated.
6227
6228     if (fptr->OperGet() == GT_LCL_VAR)
6229     {
6230         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6231     }
6232 #endif
6233
6234     /* Create the call node */
6235
6236     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6237
6238     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6239
6240     return call;
6241 }
6242
6243 /*****************************************************************************/
6244
6245 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6246 {
6247     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6248
6249     /* Since we push the arguments in reverse order (i.e. right -> left)
6250      * spill any side effects from the stack
6251      *
6252      * OBS: If there is only one side effect we do not need to spill it
6253      *      thus we have to spill all side-effects except last one
6254      */
6255
6256     unsigned lastLevelWithSideEffects = UINT_MAX;
6257
6258     unsigned argsToReverse = sig->numArgs;
6259
6260     // For "thiscall", the first argument goes in a register. Since its
6261     // order does not need to be changed, we do not need to spill it
6262
6263     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6264     {
6265         assert(argsToReverse);
6266         argsToReverse--;
6267     }
6268
6269 #ifndef _TARGET_X86_
6270     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6271     argsToReverse = 0;
6272 #endif
6273
6274     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6275     {
6276         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6277         {
6278             assert(lastLevelWithSideEffects == UINT_MAX);
6279
6280             impSpillStackEntry(level,
6281                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6282         }
6283         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6284         {
6285             if (lastLevelWithSideEffects != UINT_MAX)
6286             {
6287                 /* We had a previous side effect - must spill it */
6288                 impSpillStackEntry(lastLevelWithSideEffects,
6289                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6290
6291                 /* Record the level for the current side effect in case we will spill it */
6292                 lastLevelWithSideEffects = level;
6293             }
6294             else
6295             {
6296                 /* This is the first side effect encountered - record its level */
6297
6298                 lastLevelWithSideEffects = level;
6299             }
6300         }
6301     }
6302
6303     /* The argument list is now "clean" - no out-of-order side effects
6304      * Pop the argument list in reverse order */
6305
6306     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6307
6308     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6309     {
6310         GenTree* thisPtr = args->Current();
6311         impBashVarAddrsToI(thisPtr);
6312         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6313     }
6314
6315     if (args)
6316     {
6317         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6318     }
6319 }
6320
6321 //------------------------------------------------------------------------
6322 // impInitClass: Build a node to initialize the class before accessing the
6323 //               field if necessary
6324 //
6325 // Arguments:
6326 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6327 //                     by a call to CEEInfo::resolveToken().
6328 //
6329 // Return Value: If needed, a pointer to the node that will perform the class
6330 //               initializtion.  Otherwise, nullptr.
6331 //
6332
6333 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6334 {
6335     CorInfoInitClassResult initClassResult =
6336         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6337
6338     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6339     {
6340         return nullptr;
6341     }
6342     BOOL runtimeLookup;
6343
6344     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6345
6346     if (node == nullptr)
6347     {
6348         assert(compDonotInline());
6349         return nullptr;
6350     }
6351
6352     if (runtimeLookup)
6353     {
6354         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6355     }
6356     else
6357     {
6358         // Call the shared non gc static helper, as its the fastest
6359         node = fgGetSharedCCtor(pResolvedToken->hClass);
6360     }
6361
6362     return node;
6363 }
6364
6365 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6366 {
6367     GenTree* op1 = nullptr;
6368
6369     switch (lclTyp)
6370     {
6371         int     ival;
6372         __int64 lval;
6373         double  dval;
6374
6375         case TYP_BOOL:
6376             ival = *((bool*)fldAddr);
6377             goto IVAL_COMMON;
6378
6379         case TYP_BYTE:
6380             ival = *((signed char*)fldAddr);
6381             goto IVAL_COMMON;
6382
6383         case TYP_UBYTE:
6384             ival = *((unsigned char*)fldAddr);
6385             goto IVAL_COMMON;
6386
6387         case TYP_SHORT:
6388             ival = *((short*)fldAddr);
6389             goto IVAL_COMMON;
6390
6391         case TYP_USHORT:
6392             ival = *((unsigned short*)fldAddr);
6393             goto IVAL_COMMON;
6394
6395         case TYP_UINT:
6396         case TYP_INT:
6397             ival = *((int*)fldAddr);
6398         IVAL_COMMON:
6399             op1 = gtNewIconNode(ival);
6400             break;
6401
6402         case TYP_LONG:
6403         case TYP_ULONG:
6404             lval = *((__int64*)fldAddr);
6405             op1  = gtNewLconNode(lval);
6406             break;
6407
6408         case TYP_FLOAT:
6409             dval = *((float*)fldAddr);
6410             op1  = gtNewDconNode(dval);
6411 #if !FEATURE_X87_DOUBLES
6412             // X87 stack doesn't differentiate between float/double
6413             // so R4 is treated as R8, but everybody else does
6414             op1->gtType = TYP_FLOAT;
6415 #endif // FEATURE_X87_DOUBLES
6416             break;
6417
6418         case TYP_DOUBLE:
6419             dval = *((double*)fldAddr);
6420             op1  = gtNewDconNode(dval);
6421             break;
6422
6423         default:
6424             assert(!"Unexpected lclTyp");
6425             break;
6426     }
6427
6428     return op1;
6429 }
6430
6431 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6432                                               CORINFO_ACCESS_FLAGS    access,
6433                                               CORINFO_FIELD_INFO*     pFieldInfo,
6434                                               var_types               lclTyp)
6435 {
6436     GenTree* op1;
6437
6438     switch (pFieldInfo->fieldAccessor)
6439     {
6440         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6441         {
6442             assert(!compIsForInlining());
6443
6444             // We first call a special helper to get the statics base pointer
6445             op1 = impParentClassTokenToHandle(pResolvedToken);
6446
6447             // compIsForInlining() is false so we should not neve get NULL here
6448             assert(op1 != nullptr);
6449
6450             var_types type = TYP_BYREF;
6451
6452             switch (pFieldInfo->helper)
6453             {
6454                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6455                     type = TYP_I_IMPL;
6456                     break;
6457                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6458                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6459                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6460                     break;
6461                 default:
6462                     assert(!"unknown generic statics helper");
6463                     break;
6464             }
6465
6466             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6467
6468             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6469             op1              = gtNewOperNode(GT_ADD, type, op1,
6470                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6471         }
6472         break;
6473
6474         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6475         {
6476 #ifdef FEATURE_READYTORUN_COMPILER
6477             if (opts.IsReadyToRun())
6478             {
6479                 unsigned callFlags = 0;
6480
6481                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6482                 {
6483                     callFlags |= GTF_CALL_HOISTABLE;
6484                 }
6485
6486                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6487                 op1->gtFlags |= callFlags;
6488
6489                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6490             }
6491             else
6492 #endif
6493             {
6494                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6495             }
6496
6497             {
6498                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6499                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6500                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6501             }
6502             break;
6503         }
6504
6505         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6506         {
6507 #ifdef FEATURE_READYTORUN_COMPILER
6508             noway_assert(opts.IsReadyToRun());
6509             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6510             assert(kind.needsRuntimeLookup);
6511
6512             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6513             GenTreeArgList* args    = gtNewArgList(ctxTree);
6514
6515             unsigned callFlags = 0;
6516
6517             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6518             {
6519                 callFlags |= GTF_CALL_HOISTABLE;
6520             }
6521             var_types type = TYP_BYREF;
6522             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6523             op1->gtFlags |= callFlags;
6524
6525             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6526             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6527             op1              = gtNewOperNode(GT_ADD, type, op1,
6528                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6529 #else
6530             unreached();
6531 #endif // FEATURE_READYTORUN_COMPILER
6532         }
6533         break;
6534
6535         default:
6536         {
6537             if (!(access & CORINFO_ACCESS_ADDRESS))
6538             {
6539                 // In future, it may be better to just create the right tree here instead of folding it later.
6540                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6541
6542                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6543                 {
6544                     op1->gtFlags |= GTF_FLD_INITCLASS;
6545                 }
6546
6547                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6548                 {
6549                     op1->gtType = TYP_REF; // points at boxed object
6550                     FieldSeqNode* firstElemFldSeq =
6551                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6552                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6553                                         new (this, GT_CNS_INT)
6554                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6555
6556                     if (varTypeIsStruct(lclTyp))
6557                     {
6558                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6559                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6560                     }
6561                     else
6562                     {
6563                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6564                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6565                     }
6566                 }
6567
6568                 return op1;
6569             }
6570             else
6571             {
6572                 void** pFldAddr = nullptr;
6573                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6574
6575                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6576
6577                 /* Create the data member node */
6578                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6579                                           fldSeq);
6580
6581                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6582                 {
6583                     op1->gtFlags |= GTF_ICON_INITCLASS;
6584                 }
6585
6586                 if (pFldAddr != nullptr)
6587                 {
6588                     // There are two cases here, either the static is RVA based,
6589                     // in which case the type of the FIELD node is not a GC type
6590                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6591                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6592                     // because handles to statics now go into the large object heap
6593
6594                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6595                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6596                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6597                 }
6598             }
6599             break;
6600         }
6601     }
6602
6603     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6604     {
6605         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6606
6607         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6608
6609         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6610                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6611     }
6612
6613     if (!(access & CORINFO_ACCESS_ADDRESS))
6614     {
6615         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6616         op1->gtFlags |= GTF_GLOB_REF;
6617     }
6618
6619     return op1;
6620 }
6621
6622 // In general try to call this before most of the verification work.  Most people expect the access
6623 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6624 // out if you can't access something we also think that you're unverifiable for other reasons.
6625 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6626 {
6627     if (result != CORINFO_ACCESS_ALLOWED)
6628     {
6629         impHandleAccessAllowedInternal(result, helperCall);
6630     }
6631 }
6632
6633 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6634 {
6635     switch (result)
6636     {
6637         case CORINFO_ACCESS_ALLOWED:
6638             break;
6639         case CORINFO_ACCESS_ILLEGAL:
6640             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6641             // method is verifiable.  Otherwise, delay the exception to runtime.
6642             if (compIsForImportOnly())
6643             {
6644                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6645             }
6646             else
6647             {
6648                 impInsertHelperCall(helperCall);
6649             }
6650             break;
6651         case CORINFO_ACCESS_RUNTIME_CHECK:
6652             impInsertHelperCall(helperCall);
6653             break;
6654     }
6655 }
6656
6657 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6658 {
6659     // Construct the argument list
6660     GenTreeArgList* args = nullptr;
6661     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6662     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6663     {
6664         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6665         GenTree*                  currentArg = nullptr;
6666         switch (helperArg.argType)
6667         {
6668             case CORINFO_HELPER_ARG_TYPE_Field:
6669                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6670                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6671                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6672                 break;
6673             case CORINFO_HELPER_ARG_TYPE_Method:
6674                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6675                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6676                 break;
6677             case CORINFO_HELPER_ARG_TYPE_Class:
6678                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6679                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6680                 break;
6681             case CORINFO_HELPER_ARG_TYPE_Module:
6682                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6683                 break;
6684             case CORINFO_HELPER_ARG_TYPE_Const:
6685                 currentArg = gtNewIconNode(helperArg.constant);
6686                 break;
6687             default:
6688                 NO_WAY("Illegal helper arg type");
6689         }
6690         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6691     }
6692
6693     /* TODO-Review:
6694      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6695      * Also, consider sticking this in the first basic block.
6696      */
6697     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6698     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6699 }
6700
6701 // Checks whether the return types of caller and callee are compatible
6702 // so that callee can be tail called. Note that here we don't check
6703 // compatibility in IL Verifier sense, but on the lines of return type
6704 // sizes are equal and get returned in the same return register.
6705 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6706                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6707                                             var_types            calleeRetType,
6708                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6709 {
6710     // Note that we can not relax this condition with genActualType() as the
6711     // calling convention dictates that the caller of a function with a small
6712     // typed return value is responsible for normalizing the return val.
6713     if (callerRetType == calleeRetType)
6714     {
6715         return true;
6716     }
6717
6718     // If the class handles are the same and not null, the return types are compatible.
6719     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6720     {
6721         return true;
6722     }
6723
6724 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6725     // Jit64 compat:
6726     if (callerRetType == TYP_VOID)
6727     {
6728         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6729         //     tail.call
6730         //     pop
6731         //     ret
6732         //
6733         // Note that the above IL pattern is not valid as per IL verification rules.
6734         // Therefore, only full trust code can take advantage of this pattern.
6735         return true;
6736     }
6737
6738     // These checks return true if the return value type sizes are the same and
6739     // get returned in the same return register i.e. caller doesn't need to normalize
6740     // return value. Some of the tail calls permitted by below checks would have
6741     // been rejected by IL Verifier before we reached here.  Therefore, only full
6742     // trust code can make those tail calls.
6743     unsigned callerRetTypeSize = 0;
6744     unsigned calleeRetTypeSize = 0;
6745     bool     isCallerRetTypMBEnreg =
6746         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6747     bool isCalleeRetTypMBEnreg =
6748         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6749
6750     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6751     {
6752         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6753     }
6754 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6755
6756     return false;
6757 }
6758
6759 // For prefixFlags
6760 enum
6761 {
6762     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6763     PREFIX_TAILCALL_IMPLICIT =
6764         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6765     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6766     PREFIX_VOLATILE    = 0x00000100,
6767     PREFIX_UNALIGNED   = 0x00001000,
6768     PREFIX_CONSTRAINED = 0x00010000,
6769     PREFIX_READONLY    = 0x00100000
6770 };
6771
6772 /********************************************************************************
6773  *
6774  * Returns true if the current opcode and and the opcodes following it correspond
6775  * to a supported tail call IL pattern.
6776  *
6777  */
6778 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6779                                       OPCODE      curOpcode,
6780                                       const BYTE* codeAddrOfNextOpcode,
6781                                       const BYTE* codeEnd,
6782                                       bool        isRecursive,
6783                                       bool*       isCallPopAndRet /* = nullptr */)
6784 {
6785     // Bail out if the current opcode is not a call.
6786     if (!impOpcodeIsCallOpcode(curOpcode))
6787     {
6788         return false;
6789     }
6790
6791 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6792     // If shared ret tail opt is not enabled, we will enable
6793     // it for recursive methods.
6794     if (isRecursive)
6795 #endif
6796     {
6797         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6798         // sequence. Make sure we don't go past the end of the IL however.
6799         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6800     }
6801
6802     // Bail out if there is no next opcode after call
6803     if (codeAddrOfNextOpcode >= codeEnd)
6804     {
6805         return false;
6806     }
6807
6808     // Scan the opcodes to look for the following IL patterns if either
6809     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6810     //  ii) if tail prefixed, IL verification is not needed for the method.
6811     //
6812     // Only in the above two cases we can allow the below tail call patterns
6813     // violating ECMA spec.
6814     //
6815     // Pattern1:
6816     //       call
6817     //       nop*
6818     //       ret
6819     //
6820     // Pattern2:
6821     //       call
6822     //       nop*
6823     //       pop
6824     //       nop*
6825     //       ret
6826     int    cntPop = 0;
6827     OPCODE nextOpcode;
6828
6829 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6830     do
6831     {
6832         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6833         codeAddrOfNextOpcode += sizeof(__int8);
6834     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6835              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6836              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6837                                                                                          // one pop seen so far.
6838 #else
6839     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6840 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6841
6842     if (isCallPopAndRet)
6843     {
6844         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6845         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6846     }
6847
6848 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6849     // Jit64 Compat:
6850     // Tail call IL pattern could be either of the following
6851     // 1) call/callvirt/calli + ret
6852     // 2) call/callvirt/calli + pop + ret in a method returning void.
6853     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6854 #else
6855     return (nextOpcode == CEE_RET) && (cntPop == 0);
6856 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6857 }
6858
6859 /*****************************************************************************
6860  *
6861  * Determine whether the call could be converted to an implicit tail call
6862  *
6863  */
6864 bool Compiler::impIsImplicitTailCallCandidate(
6865     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6866 {
6867
6868 #if FEATURE_TAILCALL_OPT
6869     if (!opts.compTailCallOpt)
6870     {
6871         return false;
6872     }
6873
6874     if (opts.compDbgCode || opts.MinOpts())
6875     {
6876         return false;
6877     }
6878
6879     // must not be tail prefixed
6880     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6881     {
6882         return false;
6883     }
6884
6885 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6886     // the block containing call is marked as BBJ_RETURN
6887     // We allow shared ret tail call optimization on recursive calls even under
6888     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6889     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6890         return false;
6891 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6892
6893     // must be call+ret or call+pop+ret
6894     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6895     {
6896         return false;
6897     }
6898
6899     return true;
6900 #else
6901     return false;
6902 #endif // FEATURE_TAILCALL_OPT
6903 }
6904
6905 //------------------------------------------------------------------------
6906 // impImportCall: import a call-inspiring opcode
6907 //
6908 // Arguments:
6909 //    opcode                    - opcode that inspires the call
6910 //    pResolvedToken            - resolved token for the call target
6911 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6912 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6913 //    prefixFlags               - IL prefix flags for the call
6914 //    callInfo                  - EE supplied info for the call
6915 //    rawILOffset               - IL offset of the opcode
6916 //
6917 // Returns:
6918 //    Type of the call's return value.
6919 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6920 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6921 //
6922 //
6923 // Notes:
6924 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6925 //
6926 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6927 //    uninitalized object.
6928
6929 #ifdef _PREFAST_
6930 #pragma warning(push)
6931 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6932 #endif
6933
6934 var_types Compiler::impImportCall(OPCODE                  opcode,
6935                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6936                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6937                                   GenTree*                newobjThis,
6938                                   int                     prefixFlags,
6939                                   CORINFO_CALL_INFO*      callInfo,
6940                                   IL_OFFSET               rawILOffset)
6941 {
6942     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6943
6944     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6945     var_types              callRetTyp                     = TYP_COUNT;
6946     CORINFO_SIG_INFO*      sig                            = nullptr;
6947     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6948     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6949     unsigned               clsFlags                       = 0;
6950     unsigned               mflags                         = 0;
6951     unsigned               argFlags                       = 0;
6952     GenTree*               call                           = nullptr;
6953     GenTreeArgList*        args                           = nullptr;
6954     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6955     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6956     bool                   exactContextNeedsRuntimeLookup = false;
6957     bool                   canTailCall                    = true;
6958     const char*            szCanTailCallFailReason        = nullptr;
6959     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6960     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6961
6962     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6963
6964     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6965     // do that before tailcalls, but that is probably not the intended
6966     // semantic. So just disallow tailcalls from synchronized methods.
6967     // Also, popping arguments in a varargs function is more work and NYI
6968     // If we have a security object, we have to keep our frame around for callers
6969     // to see any imperative security.
6970     if (info.compFlags & CORINFO_FLG_SYNCH)
6971     {
6972         canTailCall             = false;
6973         szCanTailCallFailReason = "Caller is synchronized";
6974     }
6975 #if !FEATURE_FIXED_OUT_ARGS
6976     else if (info.compIsVarArgs)
6977     {
6978         canTailCall             = false;
6979         szCanTailCallFailReason = "Caller is varargs";
6980     }
6981 #endif // FEATURE_FIXED_OUT_ARGS
6982     else if (opts.compNeedSecurityCheck)
6983     {
6984         canTailCall             = false;
6985         szCanTailCallFailReason = "Caller requires a security check.";
6986     }
6987
6988     // We only need to cast the return value of pinvoke inlined calls that return small types
6989
6990     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6991     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6992     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6993     // the time being that the callee might be compiled by the other JIT and thus the return
6994     // value will need to be widened by us (or not widened at all...)
6995
6996     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6997
6998     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6999     bool bIntrinsicImported = false;
7000
7001     CORINFO_SIG_INFO calliSig;
7002     GenTreeArgList*  extraArg = nullptr;
7003
7004     /*-------------------------------------------------------------------------
7005      * First create the call node
7006      */
7007
7008     if (opcode == CEE_CALLI)
7009     {
7010         /* Get the call site sig */
7011         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7012
7013         callRetTyp = JITtype2varType(calliSig.retType);
7014
7015         call = impImportIndirectCall(&calliSig, ilOffset);
7016
7017         // We don't know the target method, so we have to infer the flags, or
7018         // assume the worst-case.
7019         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7020
7021 #ifdef DEBUG
7022         if (verbose)
7023         {
7024             unsigned structSize =
7025                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7026             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7027                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7028         }
7029 #endif
7030         // This should be checked in impImportBlockCode.
7031         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7032
7033         sig = &calliSig;
7034
7035 #ifdef DEBUG
7036         // We cannot lazily obtain the signature of a CALLI call because it has no method
7037         // handle that we can use, so we need to save its full call signature here.
7038         assert(call->gtCall.callSig == nullptr);
7039         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7040         *call->gtCall.callSig = calliSig;
7041 #endif // DEBUG
7042
7043         if (IsTargetAbi(CORINFO_CORERT_ABI))
7044         {
7045             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7046                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7047                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7048                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7049             if (managedCall)
7050             {
7051                 addFatPointerCandidate(call->AsCall());
7052             }
7053         }
7054     }
7055     else // (opcode != CEE_CALLI)
7056     {
7057         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7058
7059         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7060         // supply the instantiation parameters necessary to make direct calls to underlying
7061         // shared generic code, rather than calling through instantiating stubs.  If the
7062         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7063         // must indeed pass an instantiation parameter.
7064
7065         methHnd = callInfo->hMethod;
7066
7067         sig        = &(callInfo->sig);
7068         callRetTyp = JITtype2varType(sig->retType);
7069
7070         mflags = callInfo->methodFlags;
7071
7072 #ifdef DEBUG
7073         if (verbose)
7074         {
7075             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7076             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7077                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7078         }
7079 #endif
7080         if (compIsForInlining())
7081         {
7082             /* Does this call site have security boundary restrictions? */
7083
7084             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7085             {
7086                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7087                 return TYP_UNDEF;
7088             }
7089
7090             /* Does the inlinee need a security check token on the frame */
7091
7092             if (mflags & CORINFO_FLG_SECURITYCHECK)
7093             {
7094                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7095                 return TYP_UNDEF;
7096             }
7097
7098             /* Does the inlinee use StackCrawlMark */
7099
7100             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7101             {
7102                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7103                 return TYP_UNDEF;
7104             }
7105
7106             /* For now ignore delegate invoke */
7107
7108             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7109             {
7110                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7111                 return TYP_UNDEF;
7112             }
7113
7114             /* For now ignore varargs */
7115             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7116             {
7117                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7118                 return TYP_UNDEF;
7119             }
7120
7121             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7122             {
7123                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7124                 return TYP_UNDEF;
7125             }
7126
7127             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7128             {
7129                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7130                 return TYP_UNDEF;
7131             }
7132         }
7133
7134         clsHnd = pResolvedToken->hClass;
7135
7136         clsFlags = callInfo->classFlags;
7137
7138 #ifdef DEBUG
7139         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7140
7141         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7142         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7143         const char* modName;
7144         const char* className;
7145         const char* methodName;
7146         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7147             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7148             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7149         {
7150             return impImportJitTestLabelMark(sig->numArgs);
7151         }
7152 #endif // DEBUG
7153
7154         // <NICE> Factor this into getCallInfo </NICE>
7155         bool isSpecialIntrinsic = false;
7156         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7157         {
7158             const bool isTail = canTailCall && (tailCall != 0);
7159
7160             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7161                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7162
7163             if (compDonotInline())
7164             {
7165                 return TYP_UNDEF;
7166             }
7167
7168             if (call != nullptr)
7169             {
7170                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7171                        (clsFlags & CORINFO_FLG_FINAL));
7172
7173 #ifdef FEATURE_READYTORUN_COMPILER
7174                 if (call->OperGet() == GT_INTRINSIC)
7175                 {
7176                     if (opts.IsReadyToRun())
7177                     {
7178                         noway_assert(callInfo->kind == CORINFO_CALL);
7179                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7180                     }
7181                     else
7182                     {
7183                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7184                     }
7185                 }
7186 #endif
7187
7188                 bIntrinsicImported = true;
7189                 goto DONE_CALL;
7190             }
7191         }
7192
7193 #ifdef FEATURE_SIMD
7194         if (featureSIMD)
7195         {
7196             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7197             if (call != nullptr)
7198             {
7199                 bIntrinsicImported = true;
7200                 goto DONE_CALL;
7201             }
7202         }
7203 #endif // FEATURE_SIMD
7204
7205         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7206         {
7207             NO_WAY("Virtual call to a function added via EnC is not supported");
7208         }
7209
7210         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7211             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7212             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7213         {
7214             BADCODE("Bad calling convention");
7215         }
7216
7217         //-------------------------------------------------------------------------
7218         //  Construct the call node
7219         //
7220         // Work out what sort of call we're making.
7221         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7222
7223         constraintCallThisTransform    = callInfo->thisTransform;
7224         exactContextHnd                = callInfo->contextHandle;
7225         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7226
7227         // Recursive call is treated as a loop to the begining of the method.
7228         if (gtIsRecursiveCall(methHnd))
7229         {
7230 #ifdef DEBUG
7231             if (verbose)
7232             {
7233                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7234                         fgFirstBB->bbNum, compCurBB->bbNum);
7235             }
7236 #endif
7237             fgMarkBackwardJump(fgFirstBB, compCurBB);
7238         }
7239
7240         switch (callInfo->kind)
7241         {
7242
7243             case CORINFO_VIRTUALCALL_STUB:
7244             {
7245                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7246                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7247                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7248                 {
7249
7250                     if (compIsForInlining())
7251                     {
7252                         // Don't import runtime lookups when inlining
7253                         // Inlining has to be aborted in such a case
7254                         /* XXX Fri 3/20/2009
7255                          * By the way, this would never succeed.  If the handle lookup is into the generic
7256                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7257                          * inlined code will crash.
7258                          *
7259                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7260                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7261                          * failing here.
7262                          */
7263                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7264                         return TYP_UNDEF;
7265                     }
7266
7267                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7268                     assert(!compDonotInline());
7269
7270                     // This is the rough code to set up an indirect stub call
7271                     assert(stubAddr != nullptr);
7272
7273                     // The stubAddr may be a
7274                     // complex expression. As it is evaluated after the args,
7275                     // it may cause registered args to be spilled. Simply spill it.
7276
7277                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7278                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7279                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7280
7281                     // Create the actual call node
7282
7283                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7284                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7285
7286                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7287
7288                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7289                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7290
7291 #ifdef _TARGET_X86_
7292                     // No tailcalls allowed for these yet...
7293                     canTailCall             = false;
7294                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7295 #endif
7296                 }
7297                 else
7298                 {
7299                     // ok, the stub is available at compile type.
7300
7301                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7302                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7303                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7304                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
7305                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7306                     {
7307                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7308                     }
7309                 }
7310
7311 #ifdef FEATURE_READYTORUN_COMPILER
7312                 if (opts.IsReadyToRun())
7313                 {
7314                     // Null check is sometimes needed for ready to run to handle
7315                     // non-virtual <-> virtual changes between versions
7316                     if (callInfo->nullInstanceCheck)
7317                     {
7318                         call->gtFlags |= GTF_CALL_NULLCHECK;
7319                     }
7320                 }
7321 #endif
7322
7323                 break;
7324             }
7325
7326             case CORINFO_VIRTUALCALL_VTABLE:
7327             {
7328                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7329                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7330                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7331                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7332                 break;
7333             }
7334
7335             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7336             {
7337                 if (compIsForInlining())
7338                 {
7339                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7340                     return TYP_UNDEF;
7341                 }
7342
7343                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7344                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7345                 // OK, We've been told to call via LDVIRTFTN, so just
7346                 // take the call now....
7347
7348                 args = impPopList(sig->numArgs, sig);
7349
7350                 GenTree* thisPtr = impPopStack().val;
7351                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7352                 assert(thisPtr != nullptr);
7353
7354                 // Clone the (possibly transformed) "this" pointer
7355                 GenTree* thisPtrCopy;
7356                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7357                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7358
7359                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7360                 assert(fptr != nullptr);
7361
7362                 thisPtr = nullptr; // can't reuse it
7363
7364                 // Now make an indirect call through the function pointer
7365
7366                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7367                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7368                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7369
7370                 // Create the actual call node
7371
7372                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7373                 call->gtCall.gtCallObjp = thisPtrCopy;
7374                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7375
7376                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7377                 {
7378                     // CoreRT generic virtual method: need to handle potential fat function pointers
7379                     addFatPointerCandidate(call->AsCall());
7380                 }
7381 #ifdef FEATURE_READYTORUN_COMPILER
7382                 if (opts.IsReadyToRun())
7383                 {
7384                     // Null check is needed for ready to run to handle
7385                     // non-virtual <-> virtual changes between versions
7386                     call->gtFlags |= GTF_CALL_NULLCHECK;
7387                 }
7388 #endif
7389
7390                 // Sine we are jumping over some code, check that its OK to skip that code
7391                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7392                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7393                 goto DONE;
7394             }
7395
7396             case CORINFO_CALL:
7397             {
7398                 // This is for a non-virtual, non-interface etc. call
7399                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7400
7401                 // We remove the nullcheck for the GetType call instrinsic.
7402                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7403                 // and instrinsics.
7404                 if (callInfo->nullInstanceCheck &&
7405                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7406                 {
7407                     call->gtFlags |= GTF_CALL_NULLCHECK;
7408                 }
7409
7410 #ifdef FEATURE_READYTORUN_COMPILER
7411                 if (opts.IsReadyToRun())
7412                 {
7413                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7414                 }
7415 #endif
7416                 break;
7417             }
7418
7419             case CORINFO_CALL_CODE_POINTER:
7420             {
7421                 // The EE has asked us to call by computing a code pointer and then doing an
7422                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7423
7424                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7425                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7426
7427                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7428                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7429
7430                 GenTree* fptr =
7431                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7432
7433                 if (compDonotInline())
7434                 {
7435                     return TYP_UNDEF;
7436                 }
7437
7438                 // Now make an indirect call through the function pointer
7439
7440                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7441                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7442                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7443
7444                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7445                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7446                 if (callInfo->nullInstanceCheck)
7447                 {
7448                     call->gtFlags |= GTF_CALL_NULLCHECK;
7449                 }
7450
7451                 break;
7452             }
7453
7454             default:
7455                 assert(!"unknown call kind");
7456                 break;
7457         }
7458
7459         //-------------------------------------------------------------------------
7460         // Set more flags
7461
7462         PREFIX_ASSUME(call != nullptr);
7463
7464         if (mflags & CORINFO_FLG_NOGCCHECK)
7465         {
7466             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7467         }
7468
7469         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7470         if (isSpecialIntrinsic)
7471         {
7472             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7473         }
7474     }
7475     assert(sig);
7476     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7477
7478     /* Some sanity checks */
7479
7480     // CALL_VIRT and NEWOBJ must have a THIS pointer
7481     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7482     // static bit and hasThis are negations of one another
7483     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7484     assert(call != nullptr);
7485
7486     /*-------------------------------------------------------------------------
7487      * Check special-cases etc
7488      */
7489
7490     /* Special case - Check if it is a call to Delegate.Invoke(). */
7491
7492     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7493     {
7494         assert(!compIsForInlining());
7495         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7496         assert(mflags & CORINFO_FLG_FINAL);
7497
7498         /* Set the delegate flag */
7499         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7500
7501         if (callInfo->secureDelegateInvoke)
7502         {
7503             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7504         }
7505
7506         if (opcode == CEE_CALLVIRT)
7507         {
7508             assert(mflags & CORINFO_FLG_FINAL);
7509
7510             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7511             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7512             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7513         }
7514     }
7515
7516     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7517     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7518     if (varTypeIsStruct(callRetTyp))
7519     {
7520         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7521         call->gtType = callRetTyp;
7522     }
7523
7524 #if !FEATURE_VARARG
7525     /* Check for varargs */
7526     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7527         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7528     {
7529         BADCODE("Varargs not supported.");
7530     }
7531 #endif // !FEATURE_VARARG
7532
7533 #ifdef UNIX_X86_ABI
7534     if (call->gtCall.callSig == nullptr)
7535     {
7536         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7537         *call->gtCall.callSig = *sig;
7538     }
7539 #endif // UNIX_X86_ABI
7540
7541     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7542         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7543     {
7544         assert(!compIsForInlining());
7545
7546         /* Set the right flags */
7547
7548         call->gtFlags |= GTF_CALL_POP_ARGS;
7549         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7550
7551         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7552            will be expecting to pop a certain number of arguments, but if we
7553            tailcall to a function with a different number of arguments, we
7554            are hosed. There are ways around this (caller remembers esp value,
7555            varargs is not caller-pop, etc), but not worth it. */
7556         CLANG_FORMAT_COMMENT_ANCHOR;
7557
7558 #ifdef _TARGET_X86_
7559         if (canTailCall)
7560         {
7561             canTailCall             = false;
7562             szCanTailCallFailReason = "Callee is varargs";
7563         }
7564 #endif
7565
7566         /* Get the total number of arguments - this is already correct
7567          * for CALLI - for methods we have to get it from the call site */
7568
7569         if (opcode != CEE_CALLI)
7570         {
7571 #ifdef DEBUG
7572             unsigned numArgsDef = sig->numArgs;
7573 #endif
7574             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7575
7576 #ifdef DEBUG
7577             // We cannot lazily obtain the signature of a vararg call because using its method
7578             // handle will give us only the declared argument list, not the full argument list.
7579             assert(call->gtCall.callSig == nullptr);
7580             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7581             *call->gtCall.callSig = *sig;
7582 #endif
7583
7584             // For vararg calls we must be sure to load the return type of the
7585             // method actually being called, as well as the return types of the
7586             // specified in the vararg signature. With type equivalency, these types
7587             // may not be the same.
7588             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7589             {
7590                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7591                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7592                     sig->retType != CORINFO_TYPE_VAR)
7593                 {
7594                     // Make sure that all valuetypes (including enums) that we push are loaded.
7595                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7596                     // all valuetypes in the method signature are already loaded.
7597                     // We need to be able to find the size of the valuetypes, but we cannot
7598                     // do a class-load from within GC.
7599                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7600                 }
7601             }
7602
7603             assert(numArgsDef <= sig->numArgs);
7604         }
7605
7606         /* We will have "cookie" as the last argument but we cannot push
7607          * it on the operand stack because we may overflow, so we append it
7608          * to the arg list next after we pop them */
7609     }
7610
7611     if (mflags & CORINFO_FLG_SECURITYCHECK)
7612     {
7613         assert(!compIsForInlining());
7614
7615         // Need security prolog/epilog callouts when there is
7616         // imperative security in the method. This is to give security a
7617         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7618
7619         if (compIsForInlining())
7620         {
7621             // Cannot handle this if the method being imported is an inlinee by itself.
7622             // Because inlinee method does not have its own frame.
7623
7624             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7625             return TYP_UNDEF;
7626         }
7627         else
7628         {
7629             tiSecurityCalloutNeeded = true;
7630
7631             // If the current method calls a method which needs a security check,
7632             // (i.e. the method being compiled has imperative security)
7633             // we need to reserve a slot for the security object in
7634             // the current method's stack frame
7635             opts.compNeedSecurityCheck = true;
7636         }
7637     }
7638
7639     //--------------------------- Inline NDirect ------------------------------
7640
7641     // For inline cases we technically should look at both the current
7642     // block and the call site block (or just the latter if we've
7643     // fused the EH trees). However the block-related checks pertain to
7644     // EH and we currently won't inline a method with EH. So for
7645     // inlinees, just checking the call site block is sufficient.
7646     {
7647         // New lexical block here to avoid compilation errors because of GOTOs.
7648         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7649         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7650     }
7651
7652     if (call->gtFlags & GTF_CALL_UNMANAGED)
7653     {
7654         // We set up the unmanaged call by linking the frame, disabling GC, etc
7655         // This needs to be cleaned up on return
7656         if (canTailCall)
7657         {
7658             canTailCall             = false;
7659             szCanTailCallFailReason = "Callee is native";
7660         }
7661
7662         checkForSmallType = true;
7663
7664         impPopArgsForUnmanagedCall(call, sig);
7665
7666         goto DONE;
7667     }
7668     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7669                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7670                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7671                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7672     {
7673         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7674         {
7675             // Normally this only happens with inlining.
7676             // However, a generic method (or type) being NGENd into another module
7677             // can run into this issue as well.  There's not an easy fall-back for NGEN
7678             // so instead we fallback to JIT.
7679             if (compIsForInlining())
7680             {
7681                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7682             }
7683             else
7684             {
7685                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7686             }
7687
7688             return TYP_UNDEF;
7689         }
7690
7691         GenTree* cookie = eeGetPInvokeCookie(sig);
7692
7693         // This cookie is required to be either a simple GT_CNS_INT or
7694         // an indirection of a GT_CNS_INT
7695         //
7696         GenTree* cookieConst = cookie;
7697         if (cookie->gtOper == GT_IND)
7698         {
7699             cookieConst = cookie->gtOp.gtOp1;
7700         }
7701         assert(cookieConst->gtOper == GT_CNS_INT);
7702
7703         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7704         // we won't allow this tree to participate in any CSE logic
7705         //
7706         cookie->gtFlags |= GTF_DONT_CSE;
7707         cookieConst->gtFlags |= GTF_DONT_CSE;
7708
7709         call->gtCall.gtCallCookie = cookie;
7710
7711         if (canTailCall)
7712         {
7713             canTailCall             = false;
7714             szCanTailCallFailReason = "PInvoke calli";
7715         }
7716     }
7717
7718     /*-------------------------------------------------------------------------
7719      * Create the argument list
7720      */
7721
7722     //-------------------------------------------------------------------------
7723     // Special case - for varargs we have an implicit last argument
7724
7725     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7726     {
7727         assert(!compIsForInlining());
7728
7729         void *varCookie, *pVarCookie;
7730         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7731         {
7732             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7733             return TYP_UNDEF;
7734         }
7735
7736         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7737         assert((!varCookie) != (!pVarCookie));
7738         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7739
7740         assert(extraArg == nullptr);
7741         extraArg = gtNewArgList(cookie);
7742     }
7743
7744     //-------------------------------------------------------------------------
7745     // Extra arg for shared generic code and array methods
7746     //
7747     // Extra argument containing instantiation information is passed in the
7748     // following circumstances:
7749     // (a) To the "Address" method on array classes; the extra parameter is
7750     //     the array's type handle (a TypeDesc)
7751     // (b) To shared-code instance methods in generic structs; the extra parameter
7752     //     is the struct's type handle (a vtable ptr)
7753     // (c) To shared-code per-instantiation non-generic static methods in generic
7754     //     classes and structs; the extra parameter is the type handle
7755     // (d) To shared-code generic methods; the extra parameter is an
7756     //     exact-instantiation MethodDesc
7757     //
7758     // We also set the exact type context associated with the call so we can
7759     // inline the call correctly later on.
7760
7761     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7762     {
7763         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7764         if (clsHnd == nullptr)
7765         {
7766             NO_WAY("CALLI on parameterized type");
7767         }
7768
7769         assert(opcode != CEE_CALLI);
7770
7771         GenTree* instParam;
7772         BOOL     runtimeLookup;
7773
7774         // Instantiated generic method
7775         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7776         {
7777             CORINFO_METHOD_HANDLE exactMethodHandle =
7778                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7779
7780             if (!exactContextNeedsRuntimeLookup)
7781             {
7782 #ifdef FEATURE_READYTORUN_COMPILER
7783                 if (opts.IsReadyToRun())
7784                 {
7785                     instParam =
7786                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7787                     if (instParam == nullptr)
7788                     {
7789                         assert(compDonotInline());
7790                         return TYP_UNDEF;
7791                     }
7792                 }
7793                 else
7794 #endif
7795                 {
7796                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7797                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7798                 }
7799             }
7800             else
7801             {
7802                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7803                 if (instParam == nullptr)
7804                 {
7805                     assert(compDonotInline());
7806                     return TYP_UNDEF;
7807                 }
7808             }
7809         }
7810
7811         // otherwise must be an instance method in a generic struct,
7812         // a static method in a generic type, or a runtime-generated array method
7813         else
7814         {
7815             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7816             CORINFO_CLASS_HANDLE exactClassHandle =
7817                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7818
7819             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7820             {
7821                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7822                 return TYP_UNDEF;
7823             }
7824
7825             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7826             {
7827                 // We indicate "readonly" to the Address operation by using a null
7828                 // instParam.
7829                 instParam = gtNewIconNode(0, TYP_REF);
7830             }
7831             else if (!exactContextNeedsRuntimeLookup)
7832             {
7833 #ifdef FEATURE_READYTORUN_COMPILER
7834                 if (opts.IsReadyToRun())
7835                 {
7836                     instParam =
7837                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7838                     if (instParam == nullptr)
7839                     {
7840                         assert(compDonotInline());
7841                         return TYP_UNDEF;
7842                     }
7843                 }
7844                 else
7845 #endif
7846                 {
7847                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7848                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7849                 }
7850             }
7851             else
7852             {
7853                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7854                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7855                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7856                 if (pConstrainedResolvedToken)
7857                 {
7858                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7859                                                  FALSE /* importParent */);
7860                 }
7861                 else
7862                 {
7863                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7864                 }
7865
7866                 if (instParam == nullptr)
7867                 {
7868                     assert(compDonotInline());
7869                     return TYP_UNDEF;
7870                 }
7871             }
7872         }
7873
7874         assert(extraArg == nullptr);
7875         extraArg = gtNewArgList(instParam);
7876     }
7877
7878     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7879     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7880     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7881     // exactContextHnd is not currently required when inlining shared generic code into shared
7882     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7883     // (e.g. anything marked needsRuntimeLookup)
7884     if (exactContextNeedsRuntimeLookup)
7885     {
7886         exactContextHnd = nullptr;
7887     }
7888
7889     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7890     {
7891         // Only verifiable cases are supported.
7892         // dup; ldvirtftn; newobj; or ldftn; newobj.
7893         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7894         if (impStackHeight() > 0)
7895         {
7896             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7897             if (delegateTypeInfo.IsToken())
7898             {
7899                 ldftnToken = delegateTypeInfo.GetToken();
7900             }
7901         }
7902     }
7903
7904     //-------------------------------------------------------------------------
7905     // The main group of arguments
7906
7907     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7908
7909     if (args)
7910     {
7911         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7912     }
7913
7914     //-------------------------------------------------------------------------
7915     // The "this" pointer
7916
7917     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7918     {
7919         GenTree* obj;
7920
7921         if (opcode == CEE_NEWOBJ)
7922         {
7923             obj = newobjThis;
7924         }
7925         else
7926         {
7927             obj = impPopStack().val;
7928             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7929             if (compDonotInline())
7930             {
7931                 return TYP_UNDEF;
7932             }
7933         }
7934
7935         // Store the "this" value in the call
7936         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7937         call->gtCall.gtCallObjp = obj;
7938
7939         // Is this a virtual or interface call?
7940         if (call->gtCall.IsVirtual())
7941         {
7942             // only true object pointers can be virtual
7943             assert(obj->gtType == TYP_REF);
7944
7945             // See if we can devirtualize.
7946             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7947                                 &exactContextHnd);
7948         }
7949
7950         if (impIsThis(obj))
7951         {
7952             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7953         }
7954     }
7955
7956     //-------------------------------------------------------------------------
7957     // The "this" pointer for "newobj"
7958
7959     if (opcode == CEE_NEWOBJ)
7960     {
7961         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7962         {
7963             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7964             // This is a 'new' of a variable sized object, wher
7965             // the constructor is to return the object.  In this case
7966             // the constructor claims to return VOID but we know it
7967             // actually returns the new object
7968             assert(callRetTyp == TYP_VOID);
7969             callRetTyp   = TYP_REF;
7970             call->gtType = TYP_REF;
7971             impSpillSpecialSideEff();
7972
7973             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7974         }
7975         else
7976         {
7977             if (clsFlags & CORINFO_FLG_DELEGATE)
7978             {
7979                 // New inliner morph it in impImportCall.
7980                 // This will allow us to inline the call to the delegate constructor.
7981                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7982             }
7983
7984             if (!bIntrinsicImported)
7985             {
7986
7987 #if defined(DEBUG) || defined(INLINE_DATA)
7988
7989                 // Keep track of the raw IL offset of the call
7990                 call->gtCall.gtRawILOffset = rawILOffset;
7991
7992 #endif // defined(DEBUG) || defined(INLINE_DATA)
7993
7994                 // Is it an inline candidate?
7995                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7996             }
7997
7998             // append the call node.
7999             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8000
8001             // Now push the value of the 'new onto the stack
8002
8003             // This is a 'new' of a non-variable sized object.
8004             // Append the new node (op1) to the statement list,
8005             // and then push the local holding the value of this
8006             // new instruction on the stack.
8007
8008             if (clsFlags & CORINFO_FLG_VALUECLASS)
8009             {
8010                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8011
8012                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8013                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8014             }
8015             else
8016             {
8017                 if (newobjThis->gtOper == GT_COMMA)
8018                 {
8019                     // In coreclr the callout can be inserted even if verification is disabled
8020                     // so we cannot rely on tiVerificationNeeded alone
8021
8022                     // We must have inserted the callout. Get the real newobj.
8023                     newobjThis = newobjThis->gtOp.gtOp2;
8024                 }
8025
8026                 assert(newobjThis->gtOper == GT_LCL_VAR);
8027                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8028             }
8029         }
8030         return callRetTyp;
8031     }
8032
8033 DONE:
8034
8035     if (tailCall)
8036     {
8037         // This check cannot be performed for implicit tail calls for the reason
8038         // that impIsImplicitTailCallCandidate() is not checking whether return
8039         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8040         // As a result it is possible that in the following case, we find that
8041         // the type stack is non-empty if Callee() is considered for implicit
8042         // tail calling.
8043         //      int Caller(..) { .... void Callee(); ret val; ... }
8044         //
8045         // Note that we cannot check return type compatibility before ImpImportCall()
8046         // as we don't have required info or need to duplicate some of the logic of
8047         // ImpImportCall().
8048         //
8049         // For implicit tail calls, we perform this check after return types are
8050         // known to be compatible.
8051         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8052         {
8053             BADCODE("Stack should be empty after tailcall");
8054         }
8055
8056         // Note that we can not relax this condition with genActualType() as
8057         // the calling convention dictates that the caller of a function with
8058         // a small-typed return value is responsible for normalizing the return val
8059
8060         if (canTailCall &&
8061             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8062                                           callInfo->sig.retTypeClass))
8063         {
8064             canTailCall             = false;
8065             szCanTailCallFailReason = "Return types are not tail call compatible";
8066         }
8067
8068         // Stack empty check for implicit tail calls.
8069         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8070         {
8071 #ifdef _TARGET_AMD64_
8072             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8073             // in JIT64, not an InvalidProgramException.
8074             Verify(false, "Stack should be empty after tailcall");
8075 #else  // _TARGET_64BIT_
8076             BADCODE("Stack should be empty after tailcall");
8077 #endif //!_TARGET_64BIT_
8078         }
8079
8080         // assert(compCurBB is not a catch, finally or filter block);
8081         // assert(compCurBB is not a try block protected by a finally block);
8082
8083         // Check for permission to tailcall
8084         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8085
8086         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8087
8088         if (canTailCall)
8089         {
8090             // True virtual or indirect calls, shouldn't pass in a callee handle.
8091             CORINFO_METHOD_HANDLE exactCalleeHnd =
8092                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8093             GenTree* thisArg = call->gtCall.gtCallObjp;
8094
8095             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8096             {
8097                 canTailCall = true;
8098                 if (explicitTailCall)
8099                 {
8100                     // In case of explicit tail calls, mark it so that it is not considered
8101                     // for in-lining.
8102                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8103 #ifdef DEBUG
8104                     if (verbose)
8105                     {
8106                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8107                         printTreeID(call);
8108                         printf("\n");
8109                     }
8110 #endif
8111                 }
8112                 else
8113                 {
8114 #if FEATURE_TAILCALL_OPT
8115                     // Must be an implicit tail call.
8116                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8117
8118                     // It is possible that a call node is both an inline candidate and marked
8119                     // for opportunistic tail calling.  In-lining happens before morhphing of
8120                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8121                     // reason, it will survive to the morphing stage at which point it will be
8122                     // transformed into a tail call after performing additional checks.
8123
8124                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8125 #ifdef DEBUG
8126                     if (verbose)
8127                     {
8128                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8129                         printTreeID(call);
8130                         printf("\n");
8131                     }
8132 #endif
8133
8134 #else //! FEATURE_TAILCALL_OPT
8135                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8136
8137 #endif // FEATURE_TAILCALL_OPT
8138                 }
8139
8140                 // we can't report success just yet...
8141             }
8142             else
8143             {
8144                 canTailCall = false;
8145 // canTailCall reported its reasons already
8146 #ifdef DEBUG
8147                 if (verbose)
8148                 {
8149                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8150                     printTreeID(call);
8151                     printf("\n");
8152                 }
8153 #endif
8154             }
8155         }
8156         else
8157         {
8158             // If this assert fires it means that canTailCall was set to false without setting a reason!
8159             assert(szCanTailCallFailReason != nullptr);
8160
8161 #ifdef DEBUG
8162             if (verbose)
8163             {
8164                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8165                 printTreeID(call);
8166                 printf(": %s\n", szCanTailCallFailReason);
8167             }
8168 #endif
8169             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8170                                                      szCanTailCallFailReason);
8171         }
8172     }
8173
8174     // Note: we assume that small return types are already normalized by the managed callee
8175     // or by the pinvoke stub for calls to unmanaged code.
8176
8177     if (!bIntrinsicImported)
8178     {
8179         //
8180         // Things needed to be checked when bIntrinsicImported is false.
8181         //
8182
8183         assert(call->gtOper == GT_CALL);
8184         assert(sig != nullptr);
8185
8186         // Tail calls require us to save the call site's sig info so we can obtain an argument
8187         // copying thunk from the EE later on.
8188         if (call->gtCall.callSig == nullptr)
8189         {
8190             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8191             *call->gtCall.callSig = *sig;
8192         }
8193
8194         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8195         {
8196             GenTree* callObj = call->gtCall.gtCallObjp;
8197             assert(callObj != nullptr);
8198
8199             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8200                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8201                                                                    impInlineInfo->inlArgInfo))
8202             {
8203                 impInlineInfo->thisDereferencedFirst = true;
8204             }
8205         }
8206
8207 #if defined(DEBUG) || defined(INLINE_DATA)
8208
8209         // Keep track of the raw IL offset of the call
8210         call->gtCall.gtRawILOffset = rawILOffset;
8211
8212 #endif // defined(DEBUG) || defined(INLINE_DATA)
8213
8214         // Is it an inline candidate?
8215         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8216     }
8217
8218 DONE_CALL:
8219     // Push or append the result of the call
8220     if (callRetTyp == TYP_VOID)
8221     {
8222         if (opcode == CEE_NEWOBJ)
8223         {
8224             // we actually did push something, so don't spill the thing we just pushed.
8225             assert(verCurrentState.esStackDepth > 0);
8226             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8227         }
8228         else
8229         {
8230             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8231         }
8232     }
8233     else
8234     {
8235         impSpillSpecialSideEff();
8236
8237         if (clsFlags & CORINFO_FLG_ARRAY)
8238         {
8239             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8240         }
8241
8242         // Find the return type used for verification by interpreting the method signature.
8243         // NB: we are clobbering the already established sig.
8244         if (tiVerificationNeeded)
8245         {
8246             // Actually, we never get the sig for the original method.
8247             sig = &(callInfo->verSig);
8248         }
8249
8250         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8251         tiRetVal.NormaliseForStack();
8252
8253         // The CEE_READONLY prefix modifies the verification semantics of an Address
8254         // operation on an array type.
8255         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8256         {
8257             tiRetVal.SetIsReadonlyByRef();
8258         }
8259
8260         if (tiVerificationNeeded)
8261         {
8262             // We assume all calls return permanent home byrefs. If they
8263             // didn't they wouldn't be verifiable. This is also covering
8264             // the Address() helper for multidimensional arrays.
8265             if (tiRetVal.IsByRef())
8266             {
8267                 tiRetVal.SetIsPermanentHomeByRef();
8268             }
8269         }
8270
8271         if (call->IsCall())
8272         {
8273             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8274
8275             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8276             if (varTypeIsStruct(callRetTyp))
8277             {
8278                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8279             }
8280
8281             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8282             {
8283                 assert(opts.OptEnabled(CLFLG_INLINING));
8284                 assert(!fatPointerCandidate); // We should not try to inline calli.
8285
8286                 // Make the call its own tree (spill the stack if needed).
8287                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8288
8289                 // TODO: Still using the widened type.
8290                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8291             }
8292             else
8293             {
8294                 if (fatPointerCandidate)
8295                 {
8296                     // fatPointer candidates should be in statements of the form call() or var = call().
8297                     // Such form allows to find statements with fat calls without walking through whole trees
8298                     // and removes problems with cutting trees.
8299                     assert(!bIntrinsicImported);
8300                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8301                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8302                     {
8303                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8304                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8305                         varDsc->lvVerTypeInfo = tiRetVal;
8306                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8307                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8308                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8309                         call           = gtNewLclvNode(calliSlot, type);
8310                     }
8311                 }
8312
8313                 // For non-candidates we must also spill, since we
8314                 // might have locals live on the eval stack that this
8315                 // call can modify.
8316                 //
8317                 // Suppress this for certain well-known call targets
8318                 // that we know won't modify locals, eg calls that are
8319                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8320                 // we may break key fragile pattern matches later on.
8321                 bool spillStack = true;
8322                 if (call->IsCall())
8323                 {
8324                     GenTreeCall* callNode = call->AsCall();
8325                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8326                     {
8327                         spillStack = false;
8328                     }
8329                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8330                     {
8331                         spillStack = false;
8332                     }
8333                 }
8334
8335                 if (spillStack)
8336                 {
8337                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8338                 }
8339             }
8340         }
8341
8342         if (!bIntrinsicImported)
8343         {
8344             //-------------------------------------------------------------------------
8345             //
8346             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8347                 before returning.
8348                 However, we need to normalize small type values returned by unmanaged
8349                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8350                 if we use the shorter inlined pinvoke stub. */
8351
8352             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8353             {
8354                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8355             }
8356         }
8357
8358         impPushOnStack(call, tiRetVal);
8359     }
8360
8361     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8362     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8363     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8364     //  callInfoCache.uncacheCallInfo();
8365
8366     return callRetTyp;
8367 }
8368 #ifdef _PREFAST_
8369 #pragma warning(pop)
8370 #endif
8371
8372 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8373 {
8374     CorInfoType corType = methInfo->args.retType;
8375
8376     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8377     {
8378         // We have some kind of STRUCT being returned
8379
8380         structPassingKind howToReturnStruct = SPK_Unknown;
8381
8382         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8383
8384         if (howToReturnStruct == SPK_ByReference)
8385         {
8386             return true;
8387         }
8388     }
8389
8390     return false;
8391 }
8392
8393 #ifdef DEBUG
8394 //
8395 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8396 {
8397     TestLabelAndNum tlAndN;
8398     if (numArgs == 2)
8399     {
8400         tlAndN.m_num  = 0;
8401         StackEntry se = impPopStack();
8402         assert(se.seTypeInfo.GetType() == TI_INT);
8403         GenTree* val = se.val;
8404         assert(val->IsCnsIntOrI());
8405         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8406     }
8407     else if (numArgs == 3)
8408     {
8409         StackEntry se = impPopStack();
8410         assert(se.seTypeInfo.GetType() == TI_INT);
8411         GenTree* val = se.val;
8412         assert(val->IsCnsIntOrI());
8413         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8414         se           = impPopStack();
8415         assert(se.seTypeInfo.GetType() == TI_INT);
8416         val = se.val;
8417         assert(val->IsCnsIntOrI());
8418         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8419     }
8420     else
8421     {
8422         assert(false);
8423     }
8424
8425     StackEntry expSe = impPopStack();
8426     GenTree*   node  = expSe.val;
8427
8428     // There are a small number of special cases, where we actually put the annotation on a subnode.
8429     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8430     {
8431         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8432         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8433         // offset within the the static field block whose address is returned by the helper call.
8434         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8435         GenTree* helperCall = nullptr;
8436         assert(node->OperGet() == GT_IND);
8437         tlAndN.m_num -= 100;
8438         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8439         GetNodeTestData()->Remove(node);
8440     }
8441     else
8442     {
8443         GetNodeTestData()->Set(node, tlAndN);
8444     }
8445
8446     impPushOnStack(node, expSe.seTypeInfo);
8447     return node->TypeGet();
8448 }
8449 #endif // DEBUG
8450
8451 //-----------------------------------------------------------------------------------
8452 //  impFixupCallStructReturn: For a call node that returns a struct type either
8453 //  adjust the return type to an enregisterable type, or set the flag to indicate
8454 //  struct return via retbuf arg.
8455 //
8456 //  Arguments:
8457 //    call       -  GT_CALL GenTree node
8458 //    retClsHnd  -  Class handle of return type of the call
8459 //
8460 //  Return Value:
8461 //    Returns new GenTree node after fixing struct return of call node
8462 //
8463 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8464 {
8465     if (!varTypeIsStruct(call))
8466     {
8467         return call;
8468     }
8469
8470     call->gtRetClsHnd = retClsHnd;
8471
8472 #if FEATURE_MULTIREG_RET
8473     // Initialize Return type descriptor of call node
8474     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8475     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8476 #endif // FEATURE_MULTIREG_RET
8477
8478 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8479
8480     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8481     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8482
8483     // The return type will remain as the incoming struct type unless normalized to a
8484     // single eightbyte return type below.
8485     call->gtReturnType = call->gtType;
8486
8487     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8488     if (retRegCount != 0)
8489     {
8490         if (retRegCount == 1)
8491         {
8492             // struct returned in a single register
8493             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8494         }
8495         else
8496         {
8497             // must be a struct returned in two registers
8498             assert(retRegCount == 2);
8499
8500             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8501             {
8502                 // Force a call returning multi-reg struct to be always of the IR form
8503                 //   tmp = call
8504                 //
8505                 // No need to assign a multi-reg struct to a local var if:
8506                 //  - It is a tail call or
8507                 //  - The call is marked for in-lining later
8508                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8509             }
8510         }
8511     }
8512     else
8513     {
8514         // struct not returned in registers i.e returned via hiddden retbuf arg.
8515         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8516     }
8517
8518 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8519
8520     // Check for TYP_STRUCT type that wraps a primitive type
8521     // Such structs are returned using a single register
8522     // and we change the return type on those calls here.
8523     //
8524     structPassingKind howToReturnStruct;
8525     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8526
8527     if (howToReturnStruct == SPK_ByReference)
8528     {
8529         assert(returnType == TYP_UNKNOWN);
8530         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8531     }
8532     else
8533     {
8534         assert(returnType != TYP_UNKNOWN);
8535         call->gtReturnType = returnType;
8536
8537         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8538         if ((returnType == TYP_LONG) && (compLongUsed == false))
8539         {
8540             compLongUsed = true;
8541         }
8542         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8543         {
8544             compFloatingPointUsed = true;
8545         }
8546
8547 #if FEATURE_MULTIREG_RET
8548         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8549         assert(retRegCount != 0);
8550
8551         if (retRegCount >= 2)
8552         {
8553             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8554             {
8555                 // Force a call returning multi-reg struct to be always of the IR form
8556                 //   tmp = call
8557                 //
8558                 // No need to assign a multi-reg struct to a local var if:
8559                 //  - It is a tail call or
8560                 //  - The call is marked for in-lining later
8561                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8562             }
8563         }
8564 #endif // FEATURE_MULTIREG_RET
8565     }
8566
8567 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8568
8569     return call;
8570 }
8571
8572 /*****************************************************************************
8573    For struct return values, re-type the operand in the case where the ABI
8574    does not use a struct return buffer
8575    Note that this method is only call for !_TARGET_X86_
8576  */
8577
8578 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8579 {
8580     assert(varTypeIsStruct(info.compRetType));
8581     assert(info.compRetBuffArg == BAD_VAR_NUM);
8582
8583 #if defined(_TARGET_XARCH_)
8584
8585 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8586     // No VarArgs for CoreCLR on x64 Unix
8587     assert(!info.compIsVarArgs);
8588
8589     // Is method returning a multi-reg struct?
8590     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8591     {
8592         // In case of multi-reg struct return, we force IR to be one of the following:
8593         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8594         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8595
8596         if (op->gtOper == GT_LCL_VAR)
8597         {
8598             // Make sure that this struct stays in memory and doesn't get promoted.
8599             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8600             lvaTable[lclNum].lvIsMultiRegRet = true;
8601
8602             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8603             op->gtFlags |= GTF_DONT_CSE;
8604
8605             return op;
8606         }
8607
8608         if (op->gtOper == GT_CALL)
8609         {
8610             return op;
8611         }
8612
8613         return impAssignMultiRegTypeToVar(op, retClsHnd);
8614     }
8615 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8616     assert(info.compRetNativeType != TYP_STRUCT);
8617 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8618
8619 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8620
8621     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8622     {
8623         if (op->gtOper == GT_LCL_VAR)
8624         {
8625             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8626             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8627             // Make sure this struct type stays as struct so that we can return it as an HFA
8628             lvaTable[lclNum].lvIsMultiRegRet = true;
8629
8630             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8631             op->gtFlags |= GTF_DONT_CSE;
8632
8633             return op;
8634         }
8635
8636         if (op->gtOper == GT_CALL)
8637         {
8638             if (op->gtCall.IsVarargs())
8639             {
8640                 // We cannot tail call because control needs to return to fixup the calling
8641                 // convention for result return.
8642                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8643                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8644             }
8645             else
8646             {
8647                 return op;
8648             }
8649         }
8650         return impAssignMultiRegTypeToVar(op, retClsHnd);
8651     }
8652
8653 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8654
8655     // Is method returning a multi-reg struct?
8656     if (IsMultiRegReturnedType(retClsHnd))
8657     {
8658         if (op->gtOper == GT_LCL_VAR)
8659         {
8660             // This LCL_VAR stays as a TYP_STRUCT
8661             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8662
8663             // Make sure this struct type is not struct promoted
8664             lvaTable[lclNum].lvIsMultiRegRet = true;
8665
8666             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8667             op->gtFlags |= GTF_DONT_CSE;
8668
8669             return op;
8670         }
8671
8672         if (op->gtOper == GT_CALL)
8673         {
8674             if (op->gtCall.IsVarargs())
8675             {
8676                 // We cannot tail call because control needs to return to fixup the calling
8677                 // convention for result return.
8678                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8679                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8680             }
8681             else
8682             {
8683                 return op;
8684             }
8685         }
8686         return impAssignMultiRegTypeToVar(op, retClsHnd);
8687     }
8688
8689 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8690
8691 REDO_RETURN_NODE:
8692     // adjust the type away from struct to integral
8693     // and no normalizing
8694     if (op->gtOper == GT_LCL_VAR)
8695     {
8696         op->ChangeOper(GT_LCL_FLD);
8697     }
8698     else if (op->gtOper == GT_OBJ)
8699     {
8700         GenTree* op1 = op->AsObj()->Addr();
8701
8702         // We will fold away OBJ/ADDR
8703         // except for OBJ/ADDR/INDEX
8704         //     as the array type influences the array element's offset
8705         //     Later in this method we change op->gtType to info.compRetNativeType
8706         //     This is not correct when op is a GT_INDEX as the starting offset
8707         //     for the array elements 'elemOffs' is different for an array of
8708         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8709         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8710         //
8711         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8712         {
8713             // Change '*(&X)' to 'X' and see if we can do better
8714             op = op1->gtOp.gtOp1;
8715             goto REDO_RETURN_NODE;
8716         }
8717         op->gtObj.gtClass = NO_CLASS_HANDLE;
8718         op->ChangeOperUnchecked(GT_IND);
8719         op->gtFlags |= GTF_IND_TGTANYWHERE;
8720     }
8721     else if (op->gtOper == GT_CALL)
8722     {
8723         if (op->AsCall()->TreatAsHasRetBufArg(this))
8724         {
8725             // This must be one of those 'special' helpers that don't
8726             // really have a return buffer, but instead use it as a way
8727             // to keep the trees cleaner with fewer address-taken temps.
8728             //
8729             // Well now we have to materialize the the return buffer as
8730             // an address-taken temp. Then we can return the temp.
8731             //
8732             // NOTE: this code assumes that since the call directly
8733             // feeds the return, then the call must be returning the
8734             // same structure/class/type.
8735             //
8736             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8737
8738             // No need to spill anything as we're about to return.
8739             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8740
8741             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8742             // jump directly to a GT_LCL_FLD.
8743             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8744             op->ChangeOper(GT_LCL_FLD);
8745         }
8746         else
8747         {
8748             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8749
8750             // Don't change the gtType of the node just yet, it will get changed later.
8751             return op;
8752         }
8753     }
8754 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8755     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8756     {
8757         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8758         // assert(op->gtType == info.compRetNativeType)
8759         if (op->gtType != info.compRetNativeType)
8760         {
8761             // Insert a register move to keep target type of SIMD intrinsic intact
8762             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8763         }
8764     }
8765 #endif
8766     else if (op->gtOper == GT_COMMA)
8767     {
8768         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8769     }
8770
8771     op->gtType = info.compRetNativeType;
8772
8773     return op;
8774 }
8775
8776 /*****************************************************************************
8777    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8778    finally-protected try. We find the finally blocks protecting the current
8779    offset (in order) by walking over the complete exception table and
8780    finding enclosing clauses. This assumes that the table is sorted.
8781    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8782
8783    If we are leaving a catch handler, we need to attach the
8784    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8785
8786    After this function, the BBJ_LEAVE block has been converted to a different type.
8787  */
8788
8789 #if !FEATURE_EH_FUNCLETS
8790
8791 void Compiler::impImportLeave(BasicBlock* block)
8792 {
8793 #ifdef DEBUG
8794     if (verbose)
8795     {
8796         printf("\nBefore import CEE_LEAVE:\n");
8797         fgDispBasicBlocks();
8798         fgDispHandlerTab();
8799     }
8800 #endif // DEBUG
8801
8802     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8803     unsigned    blkAddr         = block->bbCodeOffs;
8804     BasicBlock* leaveTarget     = block->bbJumpDest;
8805     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8806
8807     // LEAVE clears the stack, spill side effects, and set stack to 0
8808
8809     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8810     verCurrentState.esStackDepth = 0;
8811
8812     assert(block->bbJumpKind == BBJ_LEAVE);
8813     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8814
8815     BasicBlock* step         = DUMMY_INIT(NULL);
8816     unsigned    encFinallies = 0; // Number of enclosing finallies.
8817     GenTree*    endCatches   = NULL;
8818     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8819
8820     unsigned  XTnum;
8821     EHblkDsc* HBtab;
8822
8823     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8824     {
8825         // Grab the handler offsets
8826
8827         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8828         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8829         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8830         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8831
8832         /* Is this a catch-handler we are CEE_LEAVEing out of?
8833          * If so, we need to call CORINFO_HELP_ENDCATCH.
8834          */
8835
8836         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8837         {
8838             // Can't CEE_LEAVE out of a finally/fault handler
8839             if (HBtab->HasFinallyOrFaultHandler())
8840                 BADCODE("leave out of fault/finally block");
8841
8842             // Create the call to CORINFO_HELP_ENDCATCH
8843             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8844
8845             // Make a list of all the currently pending endCatches
8846             if (endCatches)
8847                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8848             else
8849                 endCatches = endCatch;
8850
8851 #ifdef DEBUG
8852             if (verbose)
8853             {
8854                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8855                        "CORINFO_HELP_ENDCATCH\n",
8856                        block->bbNum, XTnum);
8857             }
8858 #endif
8859         }
8860         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8861                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8862         {
8863             /* This is a finally-protected try we are jumping out of */
8864
8865             /* If there are any pending endCatches, and we have already
8866                jumped out of a finally-protected try, then the endCatches
8867                have to be put in a block in an outer try for async
8868                exceptions to work correctly.
8869                Else, just use append to the original block */
8870
8871             BasicBlock* callBlock;
8872
8873             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8874
8875             if (encFinallies == 0)
8876             {
8877                 assert(step == DUMMY_INIT(NULL));
8878                 callBlock             = block;
8879                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8880
8881                 if (endCatches)
8882                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8883
8884 #ifdef DEBUG
8885                 if (verbose)
8886                 {
8887                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8888                            "block %s\n",
8889                            callBlock->dspToString());
8890                 }
8891 #endif
8892             }
8893             else
8894             {
8895                 assert(step != DUMMY_INIT(NULL));
8896
8897                 /* Calling the finally block */
8898                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8899                 assert(step->bbJumpKind == BBJ_ALWAYS);
8900                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8901                                               // finally in the chain)
8902                 step->bbJumpDest->bbRefs++;
8903
8904                 /* The new block will inherit this block's weight */
8905                 callBlock->setBBWeight(block->bbWeight);
8906                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8907
8908 #ifdef DEBUG
8909                 if (verbose)
8910                 {
8911                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8912                            callBlock->dspToString());
8913                 }
8914 #endif
8915
8916                 GenTree* lastStmt;
8917
8918                 if (endCatches)
8919                 {
8920                     lastStmt         = gtNewStmt(endCatches);
8921                     endLFin->gtNext  = lastStmt;
8922                     lastStmt->gtPrev = endLFin;
8923                 }
8924                 else
8925                 {
8926                     lastStmt = endLFin;
8927                 }
8928
8929                 // note that this sets BBF_IMPORTED on the block
8930                 impEndTreeList(callBlock, endLFin, lastStmt);
8931             }
8932
8933             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8934             /* The new block will inherit this block's weight */
8935             step->setBBWeight(block->bbWeight);
8936             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8937
8938 #ifdef DEBUG
8939             if (verbose)
8940             {
8941                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8942                        step->dspToString());
8943             }
8944 #endif
8945
8946             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8947             assert(finallyNesting <= compHndBBtabCount);
8948
8949             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8950             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8951             endLFin               = gtNewStmt(endLFin);
8952             endCatches            = NULL;
8953
8954             encFinallies++;
8955
8956             invalidatePreds = true;
8957         }
8958     }
8959
8960     /* Append any remaining endCatches, if any */
8961
8962     assert(!encFinallies == !endLFin);
8963
8964     if (encFinallies == 0)
8965     {
8966         assert(step == DUMMY_INIT(NULL));
8967         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8968
8969         if (endCatches)
8970             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8971
8972 #ifdef DEBUG
8973         if (verbose)
8974         {
8975             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8976                    "block %s\n",
8977                    block->dspToString());
8978         }
8979 #endif
8980     }
8981     else
8982     {
8983         // If leaveTarget is the start of another try block, we want to make sure that
8984         // we do not insert finalStep into that try block. Hence, we find the enclosing
8985         // try block.
8986         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8987
8988         // Insert a new BB either in the try region indicated by tryIndex or
8989         // the handler region indicated by leaveTarget->bbHndIndex,
8990         // depending on which is the inner region.
8991         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8992         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8993         step->bbJumpDest = finalStep;
8994
8995         /* The new block will inherit this block's weight */
8996         finalStep->setBBWeight(block->bbWeight);
8997         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8998
8999 #ifdef DEBUG
9000         if (verbose)
9001         {
9002             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9003                    finalStep->dspToString());
9004         }
9005 #endif
9006
9007         GenTree* lastStmt;
9008
9009         if (endCatches)
9010         {
9011             lastStmt         = gtNewStmt(endCatches);
9012             endLFin->gtNext  = lastStmt;
9013             lastStmt->gtPrev = endLFin;
9014         }
9015         else
9016         {
9017             lastStmt = endLFin;
9018         }
9019
9020         impEndTreeList(finalStep, endLFin, lastStmt);
9021
9022         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9023
9024         // Queue up the jump target for importing
9025
9026         impImportBlockPending(leaveTarget);
9027
9028         invalidatePreds = true;
9029     }
9030
9031     if (invalidatePreds && fgComputePredsDone)
9032     {
9033         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9034         fgRemovePreds();
9035     }
9036
9037 #ifdef DEBUG
9038     fgVerifyHandlerTab();
9039
9040     if (verbose)
9041     {
9042         printf("\nAfter import CEE_LEAVE:\n");
9043         fgDispBasicBlocks();
9044         fgDispHandlerTab();
9045     }
9046 #endif // DEBUG
9047 }
9048
9049 #else // FEATURE_EH_FUNCLETS
9050
9051 void Compiler::impImportLeave(BasicBlock* block)
9052 {
9053 #ifdef DEBUG
9054     if (verbose)
9055     {
9056         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9057         fgDispBasicBlocks();
9058         fgDispHandlerTab();
9059     }
9060 #endif // DEBUG
9061
9062     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9063     unsigned    blkAddr         = block->bbCodeOffs;
9064     BasicBlock* leaveTarget     = block->bbJumpDest;
9065     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9066
9067     // LEAVE clears the stack, spill side effects, and set stack to 0
9068
9069     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9070     verCurrentState.esStackDepth = 0;
9071
9072     assert(block->bbJumpKind == BBJ_LEAVE);
9073     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9074
9075     BasicBlock* step = nullptr;
9076
9077     enum StepType
9078     {
9079         // No step type; step == NULL.
9080         ST_None,
9081
9082         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9083         // That is, is step->bbJumpDest where a finally will return to?
9084         ST_FinallyReturn,
9085
9086         // The step block is a catch return.
9087         ST_Catch,
9088
9089         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9090         ST_Try
9091     };
9092     StepType stepType = ST_None;
9093
9094     unsigned  XTnum;
9095     EHblkDsc* HBtab;
9096
9097     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9098     {
9099         // Grab the handler offsets
9100
9101         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9102         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9103         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9104         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9105
9106         /* Is this a catch-handler we are CEE_LEAVEing out of?
9107          */
9108
9109         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9110         {
9111             // Can't CEE_LEAVE out of a finally/fault handler
9112             if (HBtab->HasFinallyOrFaultHandler())
9113             {
9114                 BADCODE("leave out of fault/finally block");
9115             }
9116
9117             /* We are jumping out of a catch */
9118
9119             if (step == nullptr)
9120             {
9121                 step             = block;
9122                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9123                 stepType         = ST_Catch;
9124
9125 #ifdef DEBUG
9126                 if (verbose)
9127                 {
9128                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9129                            "block\n",
9130                            XTnum, step->bbNum);
9131                 }
9132 #endif
9133             }
9134             else
9135             {
9136                 BasicBlock* exitBlock;
9137
9138                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9139                  * scope */
9140                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9141
9142                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9143                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9144                                               // exit) returns to this block
9145                 step->bbJumpDest->bbRefs++;
9146
9147 #if defined(_TARGET_ARM_)
9148                 if (stepType == ST_FinallyReturn)
9149                 {
9150                     assert(step->bbJumpKind == BBJ_ALWAYS);
9151                     // Mark the target of a finally return
9152                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9153                 }
9154 #endif // defined(_TARGET_ARM_)
9155
9156                 /* The new block will inherit this block's weight */
9157                 exitBlock->setBBWeight(block->bbWeight);
9158                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9159
9160                 /* This exit block is the new step */
9161                 step     = exitBlock;
9162                 stepType = ST_Catch;
9163
9164                 invalidatePreds = true;
9165
9166 #ifdef DEBUG
9167                 if (verbose)
9168                 {
9169                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9170                            exitBlock->bbNum);
9171                 }
9172 #endif
9173             }
9174         }
9175         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9176                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9177         {
9178             /* We are jumping out of a finally-protected try */
9179
9180             BasicBlock* callBlock;
9181
9182             if (step == nullptr)
9183             {
9184 #if FEATURE_EH_CALLFINALLY_THUNKS
9185
9186                 // Put the call to the finally in the enclosing region.
9187                 unsigned callFinallyTryIndex =
9188                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9189                 unsigned callFinallyHndIndex =
9190                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9191                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9192
9193                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9194                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9195                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9196                 // next block, and flow optimizations will remove it.
9197                 block->bbJumpKind = BBJ_ALWAYS;
9198                 block->bbJumpDest = callBlock;
9199                 block->bbJumpDest->bbRefs++;
9200
9201                 /* The new block will inherit this block's weight */
9202                 callBlock->setBBWeight(block->bbWeight);
9203                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9204
9205 #ifdef DEBUG
9206                 if (verbose)
9207                 {
9208                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9209                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9210                            XTnum, block->bbNum, callBlock->bbNum);
9211                 }
9212 #endif
9213
9214 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9215
9216                 callBlock             = block;
9217                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9218
9219 #ifdef DEBUG
9220                 if (verbose)
9221                 {
9222                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9223                            "BBJ_CALLFINALLY block\n",
9224                            XTnum, callBlock->bbNum);
9225                 }
9226 #endif
9227
9228 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9229             }
9230             else
9231             {
9232                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9233                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9234                 // a 'finally'), or the step block is the return from a catch.
9235                 //
9236                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9237                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9238                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9239                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9240                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9241                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9242                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9243                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9244                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9245                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9246                 // stack walks.)
9247
9248                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9249
9250 #if FEATURE_EH_CALLFINALLY_THUNKS
9251                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9252                 {
9253                     // Need to create another step block in the 'try' region that will actually branch to the
9254                     // call-to-finally thunk.
9255                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9256                     step->bbJumpDest  = step2;
9257                     step->bbJumpDest->bbRefs++;
9258                     step2->setBBWeight(block->bbWeight);
9259                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9260
9261 #ifdef DEBUG
9262                     if (verbose)
9263                     {
9264                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9265                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9266                                XTnum, step->bbNum, step2->bbNum);
9267                     }
9268 #endif
9269
9270                     step = step2;
9271                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9272                 }
9273 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9274
9275 #if FEATURE_EH_CALLFINALLY_THUNKS
9276                 unsigned callFinallyTryIndex =
9277                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9278                 unsigned callFinallyHndIndex =
9279                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9280 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9281                 unsigned callFinallyTryIndex = XTnum + 1;
9282                 unsigned callFinallyHndIndex = 0; // don't care
9283 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9284
9285                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9286                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9287                                               // finally in the chain)
9288                 step->bbJumpDest->bbRefs++;
9289
9290 #if defined(_TARGET_ARM_)
9291                 if (stepType == ST_FinallyReturn)
9292                 {
9293                     assert(step->bbJumpKind == BBJ_ALWAYS);
9294                     // Mark the target of a finally return
9295                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9296                 }
9297 #endif // defined(_TARGET_ARM_)
9298
9299                 /* The new block will inherit this block's weight */
9300                 callBlock->setBBWeight(block->bbWeight);
9301                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9302
9303 #ifdef DEBUG
9304                 if (verbose)
9305                 {
9306                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9307                            "BB%02u\n",
9308                            XTnum, callBlock->bbNum);
9309                 }
9310 #endif
9311             }
9312
9313             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9314             stepType = ST_FinallyReturn;
9315
9316             /* The new block will inherit this block's weight */
9317             step->setBBWeight(block->bbWeight);
9318             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9319
9320 #ifdef DEBUG
9321             if (verbose)
9322             {
9323                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9324                        "block BB%02u\n",
9325                        XTnum, step->bbNum);
9326             }
9327 #endif
9328
9329             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9330
9331             invalidatePreds = true;
9332         }
9333         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9334                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9335         {
9336             // We are jumping out of a catch-protected try.
9337             //
9338             // If we are returning from a call to a finally, then we must have a step block within a try
9339             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9340             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9341             // and invoke the appropriate catch.
9342             //
9343             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9344             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9345             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9346             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9347             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9348             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9349             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9350             // For example:
9351             //
9352             // try {
9353             //    try {
9354             //       // something here raises ThreadAbortException
9355             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9356             //    } catch (Exception) {
9357             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9358             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9359             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9360             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9361             //       // need to do this transformation if the current EH block is a try/catch that catches
9362             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9363             //       // information, so currently we do it for all catch types.
9364             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9365             //    }
9366             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9367             // } catch (ThreadAbortException) {
9368             // }
9369             // LABEL_1:
9370             //
9371             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9372             // compiler.
9373
9374             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9375             {
9376                 BasicBlock* catchStep;
9377
9378                 assert(step);
9379
9380                 if (stepType == ST_FinallyReturn)
9381                 {
9382                     assert(step->bbJumpKind == BBJ_ALWAYS);
9383                 }
9384                 else
9385                 {
9386                     assert(stepType == ST_Catch);
9387                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9388                 }
9389
9390                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9391                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9392                 step->bbJumpDest = catchStep;
9393                 step->bbJumpDest->bbRefs++;
9394
9395 #if defined(_TARGET_ARM_)
9396                 if (stepType == ST_FinallyReturn)
9397                 {
9398                     // Mark the target of a finally return
9399                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9400                 }
9401 #endif // defined(_TARGET_ARM_)
9402
9403                 /* The new block will inherit this block's weight */
9404                 catchStep->setBBWeight(block->bbWeight);
9405                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9406
9407 #ifdef DEBUG
9408                 if (verbose)
9409                 {
9410                     if (stepType == ST_FinallyReturn)
9411                     {
9412                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9413                                "BBJ_ALWAYS block BB%02u\n",
9414                                XTnum, catchStep->bbNum);
9415                     }
9416                     else
9417                     {
9418                         assert(stepType == ST_Catch);
9419                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9420                                "BBJ_ALWAYS block BB%02u\n",
9421                                XTnum, catchStep->bbNum);
9422                     }
9423                 }
9424 #endif // DEBUG
9425
9426                 /* This block is the new step */
9427                 step     = catchStep;
9428                 stepType = ST_Try;
9429
9430                 invalidatePreds = true;
9431             }
9432         }
9433     }
9434
9435     if (step == nullptr)
9436     {
9437         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9438
9439 #ifdef DEBUG
9440         if (verbose)
9441         {
9442             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9443                    "block BB%02u to BBJ_ALWAYS\n",
9444                    block->bbNum);
9445         }
9446 #endif
9447     }
9448     else
9449     {
9450         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9451
9452 #if defined(_TARGET_ARM_)
9453         if (stepType == ST_FinallyReturn)
9454         {
9455             assert(step->bbJumpKind == BBJ_ALWAYS);
9456             // Mark the target of a finally return
9457             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9458         }
9459 #endif // defined(_TARGET_ARM_)
9460
9461 #ifdef DEBUG
9462         if (verbose)
9463         {
9464             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9465         }
9466 #endif
9467
9468         // Queue up the jump target for importing
9469
9470         impImportBlockPending(leaveTarget);
9471     }
9472
9473     if (invalidatePreds && fgComputePredsDone)
9474     {
9475         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9476         fgRemovePreds();
9477     }
9478
9479 #ifdef DEBUG
9480     fgVerifyHandlerTab();
9481
9482     if (verbose)
9483     {
9484         printf("\nAfter import CEE_LEAVE:\n");
9485         fgDispBasicBlocks();
9486         fgDispHandlerTab();
9487     }
9488 #endif // DEBUG
9489 }
9490
9491 #endif // FEATURE_EH_FUNCLETS
9492
9493 /*****************************************************************************/
9494 // This is called when reimporting a leave block. It resets the JumpKind,
9495 // JumpDest, and bbNext to the original values
9496
9497 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9498 {
9499 #if FEATURE_EH_FUNCLETS
9500     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9501     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9502     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9503     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9504     // only predecessor are also considered orphans and attempted to be deleted.
9505     //
9506     //  try  {
9507     //     ....
9508     //     try
9509     //     {
9510     //         ....
9511     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9512     //     } finally { }
9513     //  } finally { }
9514     //  OUTSIDE:
9515     //
9516     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9517     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9518     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9519     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9520     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9521     // will be treated as pair and handled correctly.
9522     if (block->bbJumpKind == BBJ_CALLFINALLY)
9523     {
9524         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9525         dupBlock->bbFlags    = block->bbFlags;
9526         dupBlock->bbJumpDest = block->bbJumpDest;
9527         dupBlock->copyEHRegion(block);
9528         dupBlock->bbCatchTyp = block->bbCatchTyp;
9529
9530         // Mark this block as
9531         //  a) not referenced by any other block to make sure that it gets deleted
9532         //  b) weight zero
9533         //  c) prevent from being imported
9534         //  d) as internal
9535         //  e) as rarely run
9536         dupBlock->bbRefs   = 0;
9537         dupBlock->bbWeight = 0;
9538         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9539
9540         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9541         // will be next to each other.
9542         fgInsertBBafter(block, dupBlock);
9543
9544 #ifdef DEBUG
9545         if (verbose)
9546         {
9547             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9548         }
9549 #endif
9550     }
9551 #endif // FEATURE_EH_FUNCLETS
9552
9553     block->bbJumpKind = BBJ_LEAVE;
9554     fgInitBBLookup();
9555     block->bbJumpDest = fgLookupBB(jmpAddr);
9556
9557     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9558     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9559     // reason we don't want to remove the block at this point is that if we call
9560     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9561     // added and the linked list length will be different than fgBBcount.
9562 }
9563
9564 /*****************************************************************************/
9565 // Get the first non-prefix opcode. Used for verification of valid combinations
9566 // of prefixes and actual opcodes.
9567
9568 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9569 {
9570     while (codeAddr < codeEndp)
9571     {
9572         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9573         codeAddr += sizeof(__int8);
9574
9575         if (opcode == CEE_PREFIX1)
9576         {
9577             if (codeAddr >= codeEndp)
9578             {
9579                 break;
9580             }
9581             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9582             codeAddr += sizeof(__int8);
9583         }
9584
9585         switch (opcode)
9586         {
9587             case CEE_UNALIGNED:
9588             case CEE_VOLATILE:
9589             case CEE_TAILCALL:
9590             case CEE_CONSTRAINED:
9591             case CEE_READONLY:
9592                 break;
9593             default:
9594                 return opcode;
9595         }
9596
9597         codeAddr += opcodeSizes[opcode];
9598     }
9599
9600     return CEE_ILLEGAL;
9601 }
9602
9603 /*****************************************************************************/
9604 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9605
9606 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9607 {
9608     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9609
9610     if (!(
9611             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9612             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9613             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9614             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9615             // volatile. prefix is allowed with the ldsfld and stsfld
9616             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9617     {
9618         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9619     }
9620 }
9621
9622 /*****************************************************************************/
9623
9624 #ifdef DEBUG
9625
9626 #undef RETURN // undef contracts RETURN macro
9627
9628 enum controlFlow_t
9629 {
9630     NEXT,
9631     CALL,
9632     RETURN,
9633     THROW,
9634     BRANCH,
9635     COND_BRANCH,
9636     BREAK,
9637     PHI,
9638     META,
9639 };
9640
9641 const static controlFlow_t controlFlow[] = {
9642 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9643 #include "opcode.def"
9644 #undef OPDEF
9645 };
9646
9647 #endif // DEBUG
9648
9649 /*****************************************************************************
9650  *  Determine the result type of an arithemetic operation
9651  *  On 64-bit inserts upcasts when native int is mixed with int32
9652  */
9653 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9654 {
9655     var_types type = TYP_UNDEF;
9656     GenTree*  op1  = *pOp1;
9657     GenTree*  op2  = *pOp2;
9658
9659     // Arithemetic operations are generally only allowed with
9660     // primitive types, but certain operations are allowed
9661     // with byrefs
9662
9663     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9664     {
9665         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9666         {
9667             // byref1-byref2 => gives a native int
9668             type = TYP_I_IMPL;
9669         }
9670         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9671         {
9672             // [native] int - byref => gives a native int
9673
9674             //
9675             // The reason is that it is possible, in managed C++,
9676             // to have a tree like this:
9677             //
9678             //              -
9679             //             / \
9680             //            /   \
9681             //           /     \
9682             //          /       \
9683             // const(h) int     addr byref
9684             //
9685             // <BUGNUM> VSW 318822 </BUGNUM>
9686             //
9687             // So here we decide to make the resulting type to be a native int.
9688             CLANG_FORMAT_COMMENT_ANCHOR;
9689
9690 #ifdef _TARGET_64BIT_
9691             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9692             {
9693                 // insert an explicit upcast
9694                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9695             }
9696 #endif // _TARGET_64BIT_
9697
9698             type = TYP_I_IMPL;
9699         }
9700         else
9701         {
9702             // byref - [native] int => gives a byref
9703             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9704
9705 #ifdef _TARGET_64BIT_
9706             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9707             {
9708                 // insert an explicit upcast
9709                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9710             }
9711 #endif // _TARGET_64BIT_
9712
9713             type = TYP_BYREF;
9714         }
9715     }
9716     else if ((oper == GT_ADD) &&
9717              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9718     {
9719         // byref + [native] int => gives a byref
9720         // (or)
9721         // [native] int + byref => gives a byref
9722
9723         // only one can be a byref : byref op byref not allowed
9724         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9725         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9726
9727 #ifdef _TARGET_64BIT_
9728         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9729         {
9730             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9731             {
9732                 // insert an explicit upcast
9733                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9734             }
9735         }
9736         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9737         {
9738             // insert an explicit upcast
9739             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9740         }
9741 #endif // _TARGET_64BIT_
9742
9743         type = TYP_BYREF;
9744     }
9745 #ifdef _TARGET_64BIT_
9746     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9747     {
9748         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9749
9750         // int + long => gives long
9751         // long + int => gives long
9752         // we get this because in the IL the long isn't Int64, it's just IntPtr
9753
9754         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9755         {
9756             // insert an explicit upcast
9757             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9758         }
9759         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9760         {
9761             // insert an explicit upcast
9762             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9763         }
9764
9765         type = TYP_I_IMPL;
9766     }
9767 #else  // 32-bit TARGET
9768     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9769     {
9770         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9771
9772         // int + long => gives long
9773         // long + int => gives long
9774
9775         type = TYP_LONG;
9776     }
9777 #endif // _TARGET_64BIT_
9778     else
9779     {
9780         // int + int => gives an int
9781         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9782
9783         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9784                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9785
9786         type = genActualType(op1->gtType);
9787
9788 #if FEATURE_X87_DOUBLES
9789
9790         // For x87, since we only have 1 size of registers, prefer double
9791         // For everybody else, be more precise
9792         if (type == TYP_FLOAT)
9793             type = TYP_DOUBLE;
9794
9795 #else // !FEATURE_X87_DOUBLES
9796
9797         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9798         // Otherwise, turn floats into doubles
9799         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9800         {
9801             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9802             type = TYP_DOUBLE;
9803         }
9804
9805 #endif // FEATURE_X87_DOUBLES
9806     }
9807
9808 #if FEATURE_X87_DOUBLES
9809     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9810 #else  // FEATURE_X87_DOUBLES
9811     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9812 #endif // FEATURE_X87_DOUBLES
9813
9814     return type;
9815 }
9816
9817 //------------------------------------------------------------------------
9818 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9819 //
9820 // Arguments:
9821 //   op1 - value to cast
9822 //   pResolvedToken - resolved token for type to cast to
9823 //   isCastClass - true if this is a castclass, false if isinst
9824 //
9825 // Return Value:
9826 //   tree representing optimized cast, or null if no optimization possible
9827
9828 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9829 {
9830     assert(op1->TypeGet() == TYP_REF);
9831
9832     // Don't optimize for minopts or debug codegen.
9833     if (opts.compDbgCode || opts.MinOpts())
9834     {
9835         return nullptr;
9836     }
9837
9838     // See what we know about the type of the object being cast.
9839     bool                 isExact   = false;
9840     bool                 isNonNull = false;
9841     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9842     GenTree*             optResult = nullptr;
9843
9844     if (fromClass != nullptr)
9845     {
9846         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9847         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9848                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9849                 info.compCompHnd->getClassName(toClass));
9850
9851         // Perhaps we know if the cast will succeed or fail.
9852         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9853
9854         if (castResult == TypeCompareState::Must)
9855         {
9856             // Cast will succeed, result is simply op1.
9857             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9858             return op1;
9859         }
9860         else if (castResult == TypeCompareState::MustNot)
9861         {
9862             // See if we can sharpen exactness by looking for final classes
9863             if (!isExact)
9864             {
9865                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9866                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9867                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9868                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9869             }
9870
9871             // Cast to exact type will fail. Handle case where we have
9872             // an exact type (that is, fromClass is not a subtype)
9873             // and we're not going to throw on failure.
9874             if (isExact && !isCastClass)
9875             {
9876                 JITDUMP("Cast will fail, optimizing to return null\n");
9877                 GenTree* result = gtNewIconNode(0, TYP_REF);
9878
9879                 // If the cast was fed by a box, we can remove that too.
9880                 if (op1->IsBoxedValue())
9881                 {
9882                     JITDUMP("Also removing upstream box\n");
9883                     gtTryRemoveBoxUpstreamEffects(op1);
9884                 }
9885
9886                 return result;
9887             }
9888             else if (isExact)
9889             {
9890                 JITDUMP("Not optimizing failing castclass (yet)\n");
9891             }
9892             else
9893             {
9894                 JITDUMP("Can't optimize since fromClass is inexact\n");
9895             }
9896         }
9897         else
9898         {
9899             JITDUMP("Result of cast unknown, must generate runtime test\n");
9900         }
9901     }
9902     else
9903     {
9904         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9905     }
9906
9907     return nullptr;
9908 }
9909
9910 //------------------------------------------------------------------------
9911 // impCastClassOrIsInstToTree: build and import castclass/isinst
9912 //
9913 // Arguments:
9914 //   op1 - value to cast
9915 //   op2 - type handle for type to cast to
9916 //   pResolvedToken - resolved token from the cast operation
9917 //   isCastClass - true if this is castclass, false means isinst
9918 //
9919 // Return Value:
9920 //   Tree representing the cast
9921 //
9922 // Notes:
9923 //   May expand into a series of runtime checks or a helper call.
9924
9925 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
9926                                               GenTree*                op2,
9927                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
9928                                               bool                    isCastClass)
9929 {
9930     assert(op1->TypeGet() == TYP_REF);
9931
9932     // Optimistically assume the jit should expand this as an inline test
9933     bool shouldExpandInline = true;
9934
9935     // Profitability check.
9936     //
9937     // Don't bother with inline expansion when jit is trying to
9938     // generate code quickly, or the cast is in code that won't run very
9939     // often, or the method already is pretty big.
9940     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9941     {
9942         // not worth the code expansion if jitting fast or in a rarely run block
9943         shouldExpandInline = false;
9944     }
9945     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9946     {
9947         // not worth creating an untracked local variable
9948         shouldExpandInline = false;
9949     }
9950
9951     // Pessimistically assume the jit cannot expand this as an inline test
9952     bool                  canExpandInline = false;
9953     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9954
9955     // Legality check.
9956     //
9957     // Not all classclass/isinst operations can be inline expanded.
9958     // Check legality only if an inline expansion is desirable.
9959     if (shouldExpandInline)
9960     {
9961         if (isCastClass)
9962         {
9963             // Jit can only inline expand the normal CHKCASTCLASS helper.
9964             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9965         }
9966         else
9967         {
9968             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9969             {
9970                 // Check the class attributes.
9971                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9972
9973                 // If the class is final and is not marshal byref or
9974                 // contextful, the jit can expand the IsInst check inline.
9975                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9976                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9977             }
9978         }
9979     }
9980
9981     const bool expandInline = canExpandInline && shouldExpandInline;
9982
9983     if (!expandInline)
9984     {
9985         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9986                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9987
9988         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9989         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9990         //
9991         op2->gtFlags |= GTF_DONT_CSE;
9992
9993         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9994     }
9995
9996     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9997
9998     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9999
10000     GenTree* temp;
10001     GenTree* condMT;
10002     //
10003     // expand the methodtable match:
10004     //
10005     //  condMT ==>   GT_NE
10006     //               /    \
10007     //           GT_IND   op2 (typically CNS_INT)
10008     //              |
10009     //           op1Copy
10010     //
10011
10012     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10013     //
10014     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10015     //
10016     // op1 is now known to be a non-complex tree
10017     // thus we can use gtClone(op1) from now on
10018     //
10019
10020     GenTree* op2Var = op2;
10021     if (isCastClass)
10022     {
10023         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10024         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10025     }
10026     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10027     temp->gtFlags |= GTF_EXCEPT;
10028     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10029
10030     GenTree* condNull;
10031     //
10032     // expand the null check:
10033     //
10034     //  condNull ==>   GT_EQ
10035     //                 /    \
10036     //             op1Copy CNS_INT
10037     //                      null
10038     //
10039     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10040
10041     //
10042     // expand the true and false trees for the condMT
10043     //
10044     GenTree* condFalse = gtClone(op1);
10045     GenTree* condTrue;
10046     if (isCastClass)
10047     {
10048         //
10049         // use the special helper that skips the cases checked by our inlined cast
10050         //
10051         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10052
10053         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10054     }
10055     else
10056     {
10057         condTrue = gtNewIconNode(0, TYP_REF);
10058     }
10059
10060 #define USE_QMARK_TREES
10061
10062 #ifdef USE_QMARK_TREES
10063     GenTree* qmarkMT;
10064     //
10065     // Generate first QMARK - COLON tree
10066     //
10067     //  qmarkMT ==>   GT_QMARK
10068     //                 /     \
10069     //            condMT   GT_COLON
10070     //                      /     \
10071     //                condFalse  condTrue
10072     //
10073     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10074     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10075     condMT->gtFlags |= GTF_RELOP_QMARK;
10076
10077     GenTree* qmarkNull;
10078     //
10079     // Generate second QMARK - COLON tree
10080     //
10081     //  qmarkNull ==>  GT_QMARK
10082     //                 /     \
10083     //           condNull  GT_COLON
10084     //                      /     \
10085     //                qmarkMT   op1Copy
10086     //
10087     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10088     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10089     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10090     condNull->gtFlags |= GTF_RELOP_QMARK;
10091
10092     // Make QMark node a top level node by spilling it.
10093     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10094     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10095
10096     // TODO: Is it possible op1 has a better type?
10097     lvaSetClass(tmp, pResolvedToken->hClass);
10098     return gtNewLclvNode(tmp, TYP_REF);
10099 #endif
10100 }
10101
10102 #ifndef DEBUG
10103 #define assertImp(cond) ((void)0)
10104 #else
10105 #define assertImp(cond)                                                                                                \
10106     do                                                                                                                 \
10107     {                                                                                                                  \
10108         if (!(cond))                                                                                                   \
10109         {                                                                                                              \
10110             const int cchAssertImpBuf = 600;                                                                           \
10111             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10112             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10113                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10114                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10115                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10116             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10117         }                                                                                                              \
10118     } while (0)
10119 #endif // DEBUG
10120
10121 #ifdef _PREFAST_
10122 #pragma warning(push)
10123 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10124 #endif
10125 /*****************************************************************************
10126  *  Import the instr for the given basic block
10127  */
10128 void Compiler::impImportBlockCode(BasicBlock* block)
10129 {
10130 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10131
10132 #ifdef DEBUG
10133
10134     if (verbose)
10135     {
10136         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10137     }
10138 #endif
10139
10140     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10141     IL_OFFSET nxtStmtOffs;
10142
10143     GenTree*                     arrayNodeFrom;
10144     GenTree*                     arrayNodeTo;
10145     GenTree*                     arrayNodeToIndex;
10146     CorInfoHelpFunc              helper;
10147     CorInfoIsAccessAllowedResult accessAllowedResult;
10148     CORINFO_HELPER_DESC          calloutHelper;
10149     const BYTE*                  lastLoadToken = nullptr;
10150
10151     // reject cyclic constraints
10152     if (tiVerificationNeeded)
10153     {
10154         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10155         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10156     }
10157
10158     /* Get the tree list started */
10159
10160     impBeginTreeList();
10161
10162     /* Walk the opcodes that comprise the basic block */
10163
10164     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10165     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10166
10167     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10168     IL_OFFSET lastSpillOffs = opcodeOffs;
10169
10170     signed jmpDist;
10171
10172     /* remember the start of the delegate creation sequence (used for verification) */
10173     const BYTE* delegateCreateStart = nullptr;
10174
10175     int  prefixFlags = 0;
10176     bool explicitTailCall, constraintCall, readonlyCall;
10177
10178     typeInfo tiRetVal;
10179
10180     unsigned numArgs = info.compArgsCount;
10181
10182     /* Now process all the opcodes in the block */
10183
10184     var_types callTyp    = TYP_COUNT;
10185     OPCODE    prevOpcode = CEE_ILLEGAL;
10186
10187     if (block->bbCatchTyp)
10188     {
10189         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10190         {
10191             impCurStmtOffsSet(block->bbCodeOffs);
10192         }
10193
10194         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10195         // to a temp. This is a trade off for code simplicity
10196         impSpillSpecialSideEff();
10197     }
10198
10199     while (codeAddr < codeEndp)
10200     {
10201         bool                   usingReadyToRunHelper = false;
10202         CORINFO_RESOLVED_TOKEN resolvedToken;
10203         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10204         CORINFO_CALL_INFO      callInfo;
10205         CORINFO_FIELD_INFO     fieldInfo;
10206
10207         tiRetVal = typeInfo(); // Default type info
10208
10209         //---------------------------------------------------------------------
10210
10211         /* We need to restrict the max tree depth as many of the Compiler
10212            functions are recursive. We do this by spilling the stack */
10213
10214         if (verCurrentState.esStackDepth)
10215         {
10216             /* Has it been a while since we last saw a non-empty stack (which
10217                guarantees that the tree depth isnt accumulating. */
10218
10219             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10220             {
10221                 impSpillStackEnsure();
10222                 lastSpillOffs = opcodeOffs;
10223             }
10224         }
10225         else
10226         {
10227             lastSpillOffs   = opcodeOffs;
10228             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10229         }
10230
10231         /* Compute the current instr offset */
10232
10233         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10234
10235 #ifndef DEBUG
10236         if (opts.compDbgInfo)
10237 #endif
10238         {
10239             if (!compIsForInlining())
10240             {
10241                 nxtStmtOffs =
10242                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10243
10244                 /* Have we reached the next stmt boundary ? */
10245
10246                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10247                 {
10248                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10249
10250                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10251                     {
10252                         /* We need to provide accurate IP-mapping at this point.
10253                            So spill anything on the stack so that it will form
10254                            gtStmts with the correct stmt offset noted */
10255
10256                         impSpillStackEnsure(true);
10257                     }
10258
10259                     // Has impCurStmtOffs been reported in any tree?
10260
10261                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10262                     {
10263                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10264                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10265
10266                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10267                     }
10268
10269                     if (impCurStmtOffs == BAD_IL_OFFSET)
10270                     {
10271                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10272                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10273
10274                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10275                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10276                         {
10277                             nxtStmtIndex++;
10278                         }
10279
10280                         /* Go to the new stmt */
10281
10282                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10283
10284                         /* Update the stmt boundary index */
10285
10286                         nxtStmtIndex++;
10287                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10288
10289                         /* Are there any more line# entries after this one? */
10290
10291                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10292                         {
10293                             /* Remember where the next line# starts */
10294
10295                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10296                         }
10297                         else
10298                         {
10299                             /* No more line# entries */
10300
10301                             nxtStmtOffs = BAD_IL_OFFSET;
10302                         }
10303                     }
10304                 }
10305                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10306                          (verCurrentState.esStackDepth == 0))
10307                 {
10308                     /* At stack-empty locations, we have already added the tree to
10309                        the stmt list with the last offset. We just need to update
10310                        impCurStmtOffs
10311                      */
10312
10313                     impCurStmtOffsSet(opcodeOffs);
10314                 }
10315                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10316                          impOpcodeIsCallSiteBoundary(prevOpcode))
10317                 {
10318                     /* Make sure we have a type cached */
10319                     assert(callTyp != TYP_COUNT);
10320
10321                     if (callTyp == TYP_VOID)
10322                     {
10323                         impCurStmtOffsSet(opcodeOffs);
10324                     }
10325                     else if (opts.compDbgCode)
10326                     {
10327                         impSpillStackEnsure(true);
10328                         impCurStmtOffsSet(opcodeOffs);
10329                     }
10330                 }
10331                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10332                 {
10333                     if (opts.compDbgCode)
10334                     {
10335                         impSpillStackEnsure(true);
10336                     }
10337
10338                     impCurStmtOffsSet(opcodeOffs);
10339                 }
10340
10341                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10342                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10343             }
10344         }
10345
10346         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10347         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10348         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10349
10350         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10351         GenTree*        op1           = DUMMY_INIT(NULL);
10352         GenTree*        op2           = DUMMY_INIT(NULL);
10353         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10354         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10355         bool            uns           = DUMMY_INIT(false);
10356         bool            isLocal       = false;
10357
10358         /* Get the next opcode and the size of its parameters */
10359
10360         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10361         codeAddr += sizeof(__int8);
10362
10363 #ifdef DEBUG
10364         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10365         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10366 #endif
10367
10368     DECODE_OPCODE:
10369
10370         // Return if any previous code has caused inline to fail.
10371         if (compDonotInline())
10372         {
10373             return;
10374         }
10375
10376         /* Get the size of additional parameters */
10377
10378         signed int sz = opcodeSizes[opcode];
10379
10380 #ifdef DEBUG
10381         clsHnd  = NO_CLASS_HANDLE;
10382         lclTyp  = TYP_COUNT;
10383         callTyp = TYP_COUNT;
10384
10385         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10386         impCurOpcName = opcodeNames[opcode];
10387
10388         if (verbose && (opcode != CEE_PREFIX1))
10389         {
10390             printf("%s", impCurOpcName);
10391         }
10392
10393         /* Use assertImp() to display the opcode */
10394
10395         op1 = op2 = nullptr;
10396 #endif
10397
10398         /* See what kind of an opcode we have, then */
10399
10400         unsigned mflags   = 0;
10401         unsigned clsFlags = 0;
10402
10403         switch (opcode)
10404         {
10405             unsigned  lclNum;
10406             var_types type;
10407
10408             GenTree*   op3;
10409             genTreeOps oper;
10410             unsigned   size;
10411
10412             int val;
10413
10414             CORINFO_SIG_INFO     sig;
10415             IL_OFFSET            jmpAddr;
10416             bool                 ovfl, unordered, callNode;
10417             bool                 ldstruct;
10418             CORINFO_CLASS_HANDLE tokenType;
10419
10420             union {
10421                 int     intVal;
10422                 float   fltVal;
10423                 __int64 lngVal;
10424                 double  dblVal;
10425             } cval;
10426
10427             case CEE_PREFIX1:
10428                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10429                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10430                 codeAddr += sizeof(__int8);
10431                 goto DECODE_OPCODE;
10432
10433             SPILL_APPEND:
10434
10435                 // We need to call impSpillLclRefs() for a struct type lclVar.
10436                 // This is done for non-block assignments in the handling of stloc.
10437                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10438                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10439                 {
10440                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10441                 }
10442
10443                 /* Append 'op1' to the list of statements */
10444                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10445                 goto DONE_APPEND;
10446
10447             APPEND:
10448
10449                 /* Append 'op1' to the list of statements */
10450
10451                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10452                 goto DONE_APPEND;
10453
10454             DONE_APPEND:
10455
10456 #ifdef DEBUG
10457                 // Remember at which BC offset the tree was finished
10458                 impNoteLastILoffs();
10459 #endif
10460                 break;
10461
10462             case CEE_LDNULL:
10463                 impPushNullObjRefOnStack();
10464                 break;
10465
10466             case CEE_LDC_I4_M1:
10467             case CEE_LDC_I4_0:
10468             case CEE_LDC_I4_1:
10469             case CEE_LDC_I4_2:
10470             case CEE_LDC_I4_3:
10471             case CEE_LDC_I4_4:
10472             case CEE_LDC_I4_5:
10473             case CEE_LDC_I4_6:
10474             case CEE_LDC_I4_7:
10475             case CEE_LDC_I4_8:
10476                 cval.intVal = (opcode - CEE_LDC_I4_0);
10477                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10478                 goto PUSH_I4CON;
10479
10480             case CEE_LDC_I4_S:
10481                 cval.intVal = getI1LittleEndian(codeAddr);
10482                 goto PUSH_I4CON;
10483             case CEE_LDC_I4:
10484                 cval.intVal = getI4LittleEndian(codeAddr);
10485                 goto PUSH_I4CON;
10486             PUSH_I4CON:
10487                 JITDUMP(" %d", cval.intVal);
10488                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10489                 break;
10490
10491             case CEE_LDC_I8:
10492                 cval.lngVal = getI8LittleEndian(codeAddr);
10493                 JITDUMP(" 0x%016llx", cval.lngVal);
10494                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10495                 break;
10496
10497             case CEE_LDC_R8:
10498                 cval.dblVal = getR8LittleEndian(codeAddr);
10499                 JITDUMP(" %#.17g", cval.dblVal);
10500                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10501                 break;
10502
10503             case CEE_LDC_R4:
10504                 cval.dblVal = getR4LittleEndian(codeAddr);
10505                 JITDUMP(" %#.17g", cval.dblVal);
10506                 {
10507                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10508 #if !FEATURE_X87_DOUBLES
10509                     // X87 stack doesn't differentiate between float/double
10510                     // so R4 is treated as R8, but everybody else does
10511                     cnsOp->gtType = TYP_FLOAT;
10512 #endif // FEATURE_X87_DOUBLES
10513                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10514                 }
10515                 break;
10516
10517             case CEE_LDSTR:
10518
10519                 if (compIsForInlining())
10520                 {
10521                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10522                     {
10523                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10524                         return;
10525                     }
10526                 }
10527
10528                 val = getU4LittleEndian(codeAddr);
10529                 JITDUMP(" %08X", val);
10530                 if (tiVerificationNeeded)
10531                 {
10532                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10533                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10534                 }
10535                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10536
10537                 break;
10538
10539             case CEE_LDARG:
10540                 lclNum = getU2LittleEndian(codeAddr);
10541                 JITDUMP(" %u", lclNum);
10542                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10543                 break;
10544
10545             case CEE_LDARG_S:
10546                 lclNum = getU1LittleEndian(codeAddr);
10547                 JITDUMP(" %u", lclNum);
10548                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10549                 break;
10550
10551             case CEE_LDARG_0:
10552             case CEE_LDARG_1:
10553             case CEE_LDARG_2:
10554             case CEE_LDARG_3:
10555                 lclNum = (opcode - CEE_LDARG_0);
10556                 assert(lclNum >= 0 && lclNum < 4);
10557                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10558                 break;
10559
10560             case CEE_LDLOC:
10561                 lclNum = getU2LittleEndian(codeAddr);
10562                 JITDUMP(" %u", lclNum);
10563                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10564                 break;
10565
10566             case CEE_LDLOC_S:
10567                 lclNum = getU1LittleEndian(codeAddr);
10568                 JITDUMP(" %u", lclNum);
10569                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10570                 break;
10571
10572             case CEE_LDLOC_0:
10573             case CEE_LDLOC_1:
10574             case CEE_LDLOC_2:
10575             case CEE_LDLOC_3:
10576                 lclNum = (opcode - CEE_LDLOC_0);
10577                 assert(lclNum >= 0 && lclNum < 4);
10578                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10579                 break;
10580
10581             case CEE_STARG:
10582                 lclNum = getU2LittleEndian(codeAddr);
10583                 goto STARG;
10584
10585             case CEE_STARG_S:
10586                 lclNum = getU1LittleEndian(codeAddr);
10587             STARG:
10588                 JITDUMP(" %u", lclNum);
10589
10590                 if (tiVerificationNeeded)
10591                 {
10592                     Verify(lclNum < info.compILargsCount, "bad arg num");
10593                 }
10594
10595                 if (compIsForInlining())
10596                 {
10597                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10598                     noway_assert(op1->gtOper == GT_LCL_VAR);
10599                     lclNum = op1->AsLclVar()->gtLclNum;
10600
10601                     goto VAR_ST_VALID;
10602                 }
10603
10604                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10605                 assertImp(lclNum < numArgs);
10606
10607                 if (lclNum == info.compThisArg)
10608                 {
10609                     lclNum = lvaArg0Var;
10610                 }
10611
10612                 // We should have seen this arg write in the prescan
10613                 assert(lvaTable[lclNum].lvHasILStoreOp);
10614
10615                 if (tiVerificationNeeded)
10616                 {
10617                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10618                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10619                            "type mismatch");
10620
10621                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10622                     {
10623                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10624                     }
10625                 }
10626
10627                 goto VAR_ST;
10628
10629             case CEE_STLOC:
10630                 lclNum  = getU2LittleEndian(codeAddr);
10631                 isLocal = true;
10632                 JITDUMP(" %u", lclNum);
10633                 goto LOC_ST;
10634
10635             case CEE_STLOC_S:
10636                 lclNum  = getU1LittleEndian(codeAddr);
10637                 isLocal = true;
10638                 JITDUMP(" %u", lclNum);
10639                 goto LOC_ST;
10640
10641             case CEE_STLOC_0:
10642             case CEE_STLOC_1:
10643             case CEE_STLOC_2:
10644             case CEE_STLOC_3:
10645                 isLocal = true;
10646                 lclNum  = (opcode - CEE_STLOC_0);
10647                 assert(lclNum >= 0 && lclNum < 4);
10648
10649             LOC_ST:
10650                 if (tiVerificationNeeded)
10651                 {
10652                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10653                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10654                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10655                            "type mismatch");
10656                 }
10657
10658                 if (compIsForInlining())
10659                 {
10660                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10661
10662                     /* Have we allocated a temp for this local? */
10663
10664                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10665
10666                     goto _PopValue;
10667                 }
10668
10669                 lclNum += numArgs;
10670
10671             VAR_ST:
10672
10673                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10674                 {
10675                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10676                     BADCODE("Bad IL");
10677                 }
10678
10679             VAR_ST_VALID:
10680
10681                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10682                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10683
10684                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10685                 {
10686                     lclTyp = lvaGetRealType(lclNum);
10687                 }
10688                 else
10689                 {
10690                     lclTyp = lvaGetActualType(lclNum);
10691                 }
10692
10693             _PopValue:
10694                 /* Pop the value being assigned */
10695
10696                 {
10697                     StackEntry se = impPopStack();
10698                     clsHnd        = se.seTypeInfo.GetClassHandle();
10699                     op1           = se.val;
10700                     tiRetVal      = se.seTypeInfo;
10701                 }
10702
10703 #ifdef FEATURE_SIMD
10704                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10705                 {
10706                     assert(op1->TypeGet() == TYP_STRUCT);
10707                     op1->gtType = lclTyp;
10708                 }
10709 #endif // FEATURE_SIMD
10710
10711                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10712
10713 #ifdef _TARGET_64BIT_
10714                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10715                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10716                 {
10717                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10718                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10719                 }
10720 #endif // _TARGET_64BIT_
10721
10722                 // We had better assign it a value of the correct type
10723                 assertImp(
10724                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10725                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10726                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10727                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10728                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10729                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10730
10731                 /* If op1 is "&var" then its type is the transient "*" and it can
10732                    be used either as TYP_BYREF or TYP_I_IMPL */
10733
10734                 if (op1->IsVarAddr())
10735                 {
10736                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10737
10738                     /* When "&var" is created, we assume it is a byref. If it is
10739                        being assigned to a TYP_I_IMPL var, change the type to
10740                        prevent unnecessary GC info */
10741
10742                     if (genActualType(lclTyp) == TYP_I_IMPL)
10743                     {
10744                         op1->gtType = TYP_I_IMPL;
10745                     }
10746                 }
10747
10748                 // If this is a local and the local is a ref type, see
10749                 // if we can improve type information based on the
10750                 // value being assigned.
10751                 if (isLocal && (lclTyp == TYP_REF))
10752                 {
10753                     // We should have seen a stloc in our IL prescan.
10754                     assert(lvaTable[lclNum].lvHasILStoreOp);
10755
10756                     const bool isSingleILStoreLocal =
10757                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10758
10759                     // Conservative check that there is just one
10760                     // definition that reaches this store.
10761                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10762
10763                     if (isSingleILStoreLocal && hasSingleReachingDef)
10764                     {
10765                         lvaUpdateClass(lclNum, op1, clsHnd);
10766                     }
10767                 }
10768
10769                 /* Filter out simple assignments to itself */
10770
10771                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10772                 {
10773                     if (opts.compDbgCode)
10774                     {
10775                         op1 = gtNewNothingNode();
10776                         goto SPILL_APPEND;
10777                     }
10778                     else
10779                     {
10780                         break;
10781                     }
10782                 }
10783
10784                 /* Create the assignment node */
10785
10786                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10787
10788                 /* If the local is aliased or pinned, we need to spill calls and
10789                    indirections from the stack. */
10790
10791                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10792                     (verCurrentState.esStackDepth > 0))
10793                 {
10794                     impSpillSideEffects(false,
10795                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10796                 }
10797
10798                 /* Spill any refs to the local from the stack */
10799
10800                 impSpillLclRefs(lclNum);
10801
10802 #if !FEATURE_X87_DOUBLES
10803                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10804                 // We insert a cast to the dest 'op2' type
10805                 //
10806                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10807                     varTypeIsFloating(op2->gtType))
10808                 {
10809                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10810                 }
10811 #endif // !FEATURE_X87_DOUBLES
10812
10813                 if (varTypeIsStruct(lclTyp))
10814                 {
10815                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10816                 }
10817                 else
10818                 {
10819                     // The code generator generates GC tracking information
10820                     // based on the RHS of the assignment.  Later the LHS (which is
10821                     // is a BYREF) gets used and the emitter checks that that variable
10822                     // is being tracked.  It is not (since the RHS was an int and did
10823                     // not need tracking).  To keep this assert happy, we change the RHS
10824                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10825                     {
10826                         op1->gtType = TYP_BYREF;
10827                     }
10828                     op1 = gtNewAssignNode(op2, op1);
10829                 }
10830
10831                 goto SPILL_APPEND;
10832
10833             case CEE_LDLOCA:
10834                 lclNum = getU2LittleEndian(codeAddr);
10835                 goto LDLOCA;
10836
10837             case CEE_LDLOCA_S:
10838                 lclNum = getU1LittleEndian(codeAddr);
10839             LDLOCA:
10840                 JITDUMP(" %u", lclNum);
10841                 if (tiVerificationNeeded)
10842                 {
10843                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10844                     Verify(info.compInitMem, "initLocals not set");
10845                 }
10846
10847                 if (compIsForInlining())
10848                 {
10849                     // Get the local type
10850                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10851
10852                     /* Have we allocated a temp for this local? */
10853
10854                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10855
10856                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10857
10858                     goto _PUSH_ADRVAR;
10859                 }
10860
10861                 lclNum += numArgs;
10862                 assertImp(lclNum < info.compLocalsCount);
10863                 goto ADRVAR;
10864
10865             case CEE_LDARGA:
10866                 lclNum = getU2LittleEndian(codeAddr);
10867                 goto LDARGA;
10868
10869             case CEE_LDARGA_S:
10870                 lclNum = getU1LittleEndian(codeAddr);
10871             LDARGA:
10872                 JITDUMP(" %u", lclNum);
10873                 Verify(lclNum < info.compILargsCount, "bad arg num");
10874
10875                 if (compIsForInlining())
10876                 {
10877                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10878                     // followed by a ldfld to load the field.
10879
10880                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10881                     if (op1->gtOper != GT_LCL_VAR)
10882                     {
10883                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10884                         return;
10885                     }
10886
10887                     assert(op1->gtOper == GT_LCL_VAR);
10888
10889                     goto _PUSH_ADRVAR;
10890                 }
10891
10892                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10893                 assertImp(lclNum < numArgs);
10894
10895                 if (lclNum == info.compThisArg)
10896                 {
10897                     lclNum = lvaArg0Var;
10898                 }
10899
10900                 goto ADRVAR;
10901
10902             ADRVAR:
10903
10904                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10905
10906             _PUSH_ADRVAR:
10907                 assert(op1->gtOper == GT_LCL_VAR);
10908
10909                 /* Note that this is supposed to create the transient type "*"
10910                    which may be used as a TYP_I_IMPL. However we catch places
10911                    where it is used as a TYP_I_IMPL and change the node if needed.
10912                    Thus we are pessimistic and may report byrefs in the GC info
10913                    where it was not absolutely needed, but it is safer this way.
10914                  */
10915                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10916
10917                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10918                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10919
10920                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10921                 if (tiVerificationNeeded)
10922                 {
10923                     // Don't allow taking address of uninit this ptr.
10924                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10925                     {
10926                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10927                     }
10928
10929                     if (!tiRetVal.IsByRef())
10930                     {
10931                         tiRetVal.MakeByRef();
10932                     }
10933                     else
10934                     {
10935                         Verify(false, "byref to byref");
10936                     }
10937                 }
10938
10939                 impPushOnStack(op1, tiRetVal);
10940                 break;
10941
10942             case CEE_ARGLIST:
10943
10944                 if (!info.compIsVarArgs)
10945                 {
10946                     BADCODE("arglist in non-vararg method");
10947                 }
10948
10949                 if (tiVerificationNeeded)
10950                 {
10951                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10952                 }
10953                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10954
10955                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10956                    adjusted the arg count cos this is like fetching the last param */
10957                 assertImp(0 < numArgs);
10958                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10959                 lclNum = lvaVarargsHandleArg;
10960                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10961                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10962                 impPushOnStack(op1, tiRetVal);
10963                 break;
10964
10965             case CEE_ENDFINALLY:
10966
10967                 if (compIsForInlining())
10968                 {
10969                     assert(!"Shouldn't have exception handlers in the inliner!");
10970                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10971                     return;
10972                 }
10973
10974                 if (verCurrentState.esStackDepth > 0)
10975                 {
10976                     impEvalSideEffects();
10977                 }
10978
10979                 if (info.compXcptnsCount == 0)
10980                 {
10981                     BADCODE("endfinally outside finally");
10982                 }
10983
10984                 assert(verCurrentState.esStackDepth == 0);
10985
10986                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10987                 goto APPEND;
10988
10989             case CEE_ENDFILTER:
10990
10991                 if (compIsForInlining())
10992                 {
10993                     assert(!"Shouldn't have exception handlers in the inliner!");
10994                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10995                     return;
10996                 }
10997
10998                 block->bbSetRunRarely(); // filters are rare
10999
11000                 if (info.compXcptnsCount == 0)
11001                 {
11002                     BADCODE("endfilter outside filter");
11003                 }
11004
11005                 if (tiVerificationNeeded)
11006                 {
11007                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11008                 }
11009
11010                 op1 = impPopStack().val;
11011                 assertImp(op1->gtType == TYP_INT);
11012                 if (!bbInFilterILRange(block))
11013                 {
11014                     BADCODE("EndFilter outside a filter handler");
11015                 }
11016
11017                 /* Mark current bb as end of filter */
11018
11019                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11020                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11021
11022                 /* Mark catch handler as successor */
11023
11024                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11025                 if (verCurrentState.esStackDepth != 0)
11026                 {
11027                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11028                                                 DEBUGARG(__LINE__));
11029                 }
11030                 goto APPEND;
11031
11032             case CEE_RET:
11033                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11034             RET:
11035                 if (!impReturnInstruction(block, prefixFlags, opcode))
11036                 {
11037                     return; // abort
11038                 }
11039                 else
11040                 {
11041                     break;
11042                 }
11043
11044             case CEE_JMP:
11045
11046                 assert(!compIsForInlining());
11047
11048                 if (tiVerificationNeeded)
11049                 {
11050                     Verify(false, "Invalid opcode: CEE_JMP");
11051                 }
11052
11053                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11054                 {
11055                     /* CEE_JMP does not make sense in some "protected" regions. */
11056
11057                     BADCODE("Jmp not allowed in protected region");
11058                 }
11059
11060                 if (verCurrentState.esStackDepth != 0)
11061                 {
11062                     BADCODE("Stack must be empty after CEE_JMPs");
11063                 }
11064
11065                 _impResolveToken(CORINFO_TOKENKIND_Method);
11066
11067                 JITDUMP(" %08X", resolvedToken.token);
11068
11069                 /* The signature of the target has to be identical to ours.
11070                    At least check that argCnt and returnType match */
11071
11072                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11073                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11074                     sig.retType != info.compMethodInfo->args.retType ||
11075                     sig.callConv != info.compMethodInfo->args.callConv)
11076                 {
11077                     BADCODE("Incompatible target for CEE_JMPs");
11078                 }
11079
11080                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11081
11082                 /* Mark the basic block as being a JUMP instead of RETURN */
11083
11084                 block->bbFlags |= BBF_HAS_JMP;
11085
11086                 /* Set this flag to make sure register arguments have a location assigned
11087                  * even if we don't use them inside the method */
11088
11089                 compJmpOpUsed = true;
11090
11091                 fgNoStructPromotion = true;
11092
11093                 goto APPEND;
11094
11095             case CEE_LDELEMA:
11096                 assertImp(sz == sizeof(unsigned));
11097
11098                 _impResolveToken(CORINFO_TOKENKIND_Class);
11099
11100                 JITDUMP(" %08X", resolvedToken.token);
11101
11102                 ldelemClsHnd = resolvedToken.hClass;
11103
11104                 if (tiVerificationNeeded)
11105                 {
11106                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11107                     typeInfo tiIndex = impStackTop().seTypeInfo;
11108
11109                     // As per ECMA 'index' specified can be either int32 or native int.
11110                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11111
11112                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11113                     Verify(tiArray.IsNullObjRef() ||
11114                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11115                            "bad array");
11116
11117                     tiRetVal = arrayElemType;
11118                     tiRetVal.MakeByRef();
11119                     if (prefixFlags & PREFIX_READONLY)
11120                     {
11121                         tiRetVal.SetIsReadonlyByRef();
11122                     }
11123
11124                     // an array interior pointer is always in the heap
11125                     tiRetVal.SetIsPermanentHomeByRef();
11126                 }
11127
11128                 // If it's a value class array we just do a simple address-of
11129                 if (eeIsValueClass(ldelemClsHnd))
11130                 {
11131                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11132                     if (cit == CORINFO_TYPE_UNDEF)
11133                     {
11134                         lclTyp = TYP_STRUCT;
11135                     }
11136                     else
11137                     {
11138                         lclTyp = JITtype2varType(cit);
11139                     }
11140                     goto ARR_LD_POST_VERIFY;
11141                 }
11142
11143                 // Similarly, if its a readonly access, we can do a simple address-of
11144                 // without doing a runtime type-check
11145                 if (prefixFlags & PREFIX_READONLY)
11146                 {
11147                     lclTyp = TYP_REF;
11148                     goto ARR_LD_POST_VERIFY;
11149                 }
11150
11151                 // Otherwise we need the full helper function with run-time type check
11152                 op1 = impTokenToHandle(&resolvedToken);
11153                 if (op1 == nullptr)
11154                 { // compDonotInline()
11155                     return;
11156                 }
11157
11158                 args = gtNewArgList(op1);                      // Type
11159                 args = gtNewListNode(impPopStack().val, args); // index
11160                 args = gtNewListNode(impPopStack().val, args); // array
11161                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11162
11163                 impPushOnStack(op1, tiRetVal);
11164                 break;
11165
11166             // ldelem for reference and value types
11167             case CEE_LDELEM:
11168                 assertImp(sz == sizeof(unsigned));
11169
11170                 _impResolveToken(CORINFO_TOKENKIND_Class);
11171
11172                 JITDUMP(" %08X", resolvedToken.token);
11173
11174                 ldelemClsHnd = resolvedToken.hClass;
11175
11176                 if (tiVerificationNeeded)
11177                 {
11178                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11179                     typeInfo tiIndex = impStackTop().seTypeInfo;
11180
11181                     // As per ECMA 'index' specified can be either int32 or native int.
11182                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11183                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11184
11185                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11186                            "type of array incompatible with type operand");
11187                     tiRetVal.NormaliseForStack();
11188                 }
11189
11190                 // If it's a reference type or generic variable type
11191                 // then just generate code as though it's a ldelem.ref instruction
11192                 if (!eeIsValueClass(ldelemClsHnd))
11193                 {
11194                     lclTyp = TYP_REF;
11195                     opcode = CEE_LDELEM_REF;
11196                 }
11197                 else
11198                 {
11199                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11200                     lclTyp             = JITtype2varType(jitTyp);
11201                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11202                     tiRetVal.NormaliseForStack();
11203                 }
11204                 goto ARR_LD_POST_VERIFY;
11205
11206             case CEE_LDELEM_I1:
11207                 lclTyp = TYP_BYTE;
11208                 goto ARR_LD;
11209             case CEE_LDELEM_I2:
11210                 lclTyp = TYP_SHORT;
11211                 goto ARR_LD;
11212             case CEE_LDELEM_I:
11213                 lclTyp = TYP_I_IMPL;
11214                 goto ARR_LD;
11215
11216             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11217             // and treating it as TYP_INT avoids other asserts.
11218             case CEE_LDELEM_U4:
11219                 lclTyp = TYP_INT;
11220                 goto ARR_LD;
11221
11222             case CEE_LDELEM_I4:
11223                 lclTyp = TYP_INT;
11224                 goto ARR_LD;
11225             case CEE_LDELEM_I8:
11226                 lclTyp = TYP_LONG;
11227                 goto ARR_LD;
11228             case CEE_LDELEM_REF:
11229                 lclTyp = TYP_REF;
11230                 goto ARR_LD;
11231             case CEE_LDELEM_R4:
11232                 lclTyp = TYP_FLOAT;
11233                 goto ARR_LD;
11234             case CEE_LDELEM_R8:
11235                 lclTyp = TYP_DOUBLE;
11236                 goto ARR_LD;
11237             case CEE_LDELEM_U1:
11238                 lclTyp = TYP_UBYTE;
11239                 goto ARR_LD;
11240             case CEE_LDELEM_U2:
11241                 lclTyp = TYP_USHORT;
11242                 goto ARR_LD;
11243
11244             ARR_LD:
11245
11246                 if (tiVerificationNeeded)
11247                 {
11248                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11249                     typeInfo tiIndex = impStackTop().seTypeInfo;
11250
11251                     // As per ECMA 'index' specified can be either int32 or native int.
11252                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11253                     if (tiArray.IsNullObjRef())
11254                     {
11255                         if (lclTyp == TYP_REF)
11256                         { // we will say a deref of a null array yields a null ref
11257                             tiRetVal = typeInfo(TI_NULL);
11258                         }
11259                         else
11260                         {
11261                             tiRetVal = typeInfo(lclTyp);
11262                         }
11263                     }
11264                     else
11265                     {
11266                         tiRetVal             = verGetArrayElemType(tiArray);
11267                         typeInfo arrayElemTi = typeInfo(lclTyp);
11268 #ifdef _TARGET_64BIT_
11269                         if (opcode == CEE_LDELEM_I)
11270                         {
11271                             arrayElemTi = typeInfo::nativeInt();
11272                         }
11273
11274                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11275                         {
11276                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11277                         }
11278                         else
11279 #endif // _TARGET_64BIT_
11280                         {
11281                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11282                         }
11283                     }
11284                     tiRetVal.NormaliseForStack();
11285                 }
11286             ARR_LD_POST_VERIFY:
11287
11288                 /* Pull the index value and array address */
11289                 op2 = impPopStack().val;
11290                 op1 = impPopStack().val;
11291                 assertImp(op1->gtType == TYP_REF);
11292
11293                 /* Check for null pointer - in the inliner case we simply abort */
11294
11295                 if (compIsForInlining())
11296                 {
11297                     if (op1->gtOper == GT_CNS_INT)
11298                     {
11299                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11300                         return;
11301                     }
11302                 }
11303
11304                 op1 = impCheckForNullPointer(op1);
11305
11306                 /* Mark the block as containing an index expression */
11307
11308                 if (op1->gtOper == GT_LCL_VAR)
11309                 {
11310                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11311                     {
11312                         block->bbFlags |= BBF_HAS_IDX_LEN;
11313                         optMethodFlags |= OMF_HAS_ARRAYREF;
11314                     }
11315                 }
11316
11317                 /* Create the index node and push it on the stack */
11318
11319                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11320
11321                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11322
11323                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11324                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11325                 {
11326                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11327
11328                     // remember the element size
11329                     if (lclTyp == TYP_REF)
11330                     {
11331                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11332                     }
11333                     else
11334                     {
11335                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11336                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11337                         {
11338                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11339                         }
11340                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11341                         if (lclTyp == TYP_STRUCT)
11342                         {
11343                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11344                             op1->gtIndex.gtIndElemSize = size;
11345                             op1->gtType                = lclTyp;
11346                         }
11347                     }
11348
11349                     if ((opcode == CEE_LDELEMA) || ldstruct)
11350                     {
11351                         // wrap it in a &
11352                         lclTyp = TYP_BYREF;
11353
11354                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11355                     }
11356                     else
11357                     {
11358                         assert(lclTyp != TYP_STRUCT);
11359                     }
11360                 }
11361
11362                 if (ldstruct)
11363                 {
11364                     // Create an OBJ for the result
11365                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11366                     op1->gtFlags |= GTF_EXCEPT;
11367                 }
11368                 impPushOnStack(op1, tiRetVal);
11369                 break;
11370
11371             // stelem for reference and value types
11372             case CEE_STELEM:
11373
11374                 assertImp(sz == sizeof(unsigned));
11375
11376                 _impResolveToken(CORINFO_TOKENKIND_Class);
11377
11378                 JITDUMP(" %08X", resolvedToken.token);
11379
11380                 stelemClsHnd = resolvedToken.hClass;
11381
11382                 if (tiVerificationNeeded)
11383                 {
11384                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11385                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11386                     typeInfo tiValue = impStackTop().seTypeInfo;
11387
11388                     // As per ECMA 'index' specified can be either int32 or native int.
11389                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11390                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11391
11392                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11393                            "type operand incompatible with array element type");
11394                     arrayElem.NormaliseForStack();
11395                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11396                 }
11397
11398                 // If it's a reference type just behave as though it's a stelem.ref instruction
11399                 if (!eeIsValueClass(stelemClsHnd))
11400                 {
11401                     goto STELEM_REF_POST_VERIFY;
11402                 }
11403
11404                 // Otherwise extract the type
11405                 {
11406                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11407                     lclTyp             = JITtype2varType(jitTyp);
11408                     goto ARR_ST_POST_VERIFY;
11409                 }
11410
11411             case CEE_STELEM_REF:
11412
11413                 if (tiVerificationNeeded)
11414                 {
11415                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11416                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11417                     typeInfo tiValue = impStackTop().seTypeInfo;
11418
11419                     // As per ECMA 'index' specified can be either int32 or native int.
11420                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11421                     Verify(tiValue.IsObjRef(), "bad value");
11422
11423                     // we only check that it is an object referece, The helper does additional checks
11424                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11425                 }
11426
11427             STELEM_REF_POST_VERIFY:
11428
11429                 arrayNodeTo      = impStackTop(2).val;
11430                 arrayNodeToIndex = impStackTop(1).val;
11431                 arrayNodeFrom    = impStackTop().val;
11432
11433                 //
11434                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11435                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11436                 //
11437
11438                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11439                 // This does not need CORINFO_HELP_ARRADDR_ST
11440                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11441                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11442                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11443                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11444                 {
11445                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11446                     lclTyp = TYP_REF;
11447                     goto ARR_ST_POST_VERIFY;
11448                 }
11449
11450                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11451                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11452                 {
11453                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11454                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11455                     lclTyp = TYP_REF;
11456                     goto ARR_ST_POST_VERIFY;
11457                 }
11458
11459                 /* Call a helper function to do the assignment */
11460                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11461
11462                 goto SPILL_APPEND;
11463
11464             case CEE_STELEM_I1:
11465                 lclTyp = TYP_BYTE;
11466                 goto ARR_ST;
11467             case CEE_STELEM_I2:
11468                 lclTyp = TYP_SHORT;
11469                 goto ARR_ST;
11470             case CEE_STELEM_I:
11471                 lclTyp = TYP_I_IMPL;
11472                 goto ARR_ST;
11473             case CEE_STELEM_I4:
11474                 lclTyp = TYP_INT;
11475                 goto ARR_ST;
11476             case CEE_STELEM_I8:
11477                 lclTyp = TYP_LONG;
11478                 goto ARR_ST;
11479             case CEE_STELEM_R4:
11480                 lclTyp = TYP_FLOAT;
11481                 goto ARR_ST;
11482             case CEE_STELEM_R8:
11483                 lclTyp = TYP_DOUBLE;
11484                 goto ARR_ST;
11485
11486             ARR_ST:
11487
11488                 if (tiVerificationNeeded)
11489                 {
11490                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11491                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11492                     typeInfo tiValue = impStackTop().seTypeInfo;
11493
11494                     // As per ECMA 'index' specified can be either int32 or native int.
11495                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11496                     typeInfo arrayElem = typeInfo(lclTyp);
11497 #ifdef _TARGET_64BIT_
11498                     if (opcode == CEE_STELEM_I)
11499                     {
11500                         arrayElem = typeInfo::nativeInt();
11501                     }
11502 #endif // _TARGET_64BIT_
11503                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11504                            "bad array");
11505
11506                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11507                            "bad value");
11508                 }
11509
11510             ARR_ST_POST_VERIFY:
11511                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11512                    range-check, and then assignment. However, codegen currently
11513                    does the range-check before evaluation the RHS-operands. So to
11514                    maintain strict ordering, we spill the stack. */
11515
11516                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11517                 {
11518                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11519                                                    "Strict ordering of exceptions for Array store"));
11520                 }
11521
11522                 /* Pull the new value from the stack */
11523                 op2 = impPopStack().val;
11524
11525                 /* Pull the index value */
11526                 op1 = impPopStack().val;
11527
11528                 /* Pull the array address */
11529                 op3 = impPopStack().val;
11530
11531                 assertImp(op3->gtType == TYP_REF);
11532                 if (op2->IsVarAddr())
11533                 {
11534                     op2->gtType = TYP_I_IMPL;
11535                 }
11536
11537                 op3 = impCheckForNullPointer(op3);
11538
11539                 // Mark the block as containing an index expression
11540
11541                 if (op3->gtOper == GT_LCL_VAR)
11542                 {
11543                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11544                     {
11545                         block->bbFlags |= BBF_HAS_IDX_LEN;
11546                         optMethodFlags |= OMF_HAS_ARRAYREF;
11547                     }
11548                 }
11549
11550                 /* Create the index node */
11551
11552                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11553
11554                 /* Create the assignment node and append it */
11555
11556                 if (lclTyp == TYP_STRUCT)
11557                 {
11558                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11559
11560                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11561                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11562                 }
11563                 if (varTypeIsStruct(op1))
11564                 {
11565                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11566                 }
11567                 else
11568                 {
11569                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11570                     op1 = gtNewAssignNode(op1, op2);
11571                 }
11572
11573                 /* Mark the expression as containing an assignment */
11574
11575                 op1->gtFlags |= GTF_ASG;
11576
11577                 goto SPILL_APPEND;
11578
11579             case CEE_ADD:
11580                 oper = GT_ADD;
11581                 goto MATH_OP2;
11582
11583             case CEE_ADD_OVF:
11584                 uns = false;
11585                 goto ADD_OVF;
11586             case CEE_ADD_OVF_UN:
11587                 uns = true;
11588                 goto ADD_OVF;
11589
11590             ADD_OVF:
11591                 ovfl     = true;
11592                 callNode = false;
11593                 oper     = GT_ADD;
11594                 goto MATH_OP2_FLAGS;
11595
11596             case CEE_SUB:
11597                 oper = GT_SUB;
11598                 goto MATH_OP2;
11599
11600             case CEE_SUB_OVF:
11601                 uns = false;
11602                 goto SUB_OVF;
11603             case CEE_SUB_OVF_UN:
11604                 uns = true;
11605                 goto SUB_OVF;
11606
11607             SUB_OVF:
11608                 ovfl     = true;
11609                 callNode = false;
11610                 oper     = GT_SUB;
11611                 goto MATH_OP2_FLAGS;
11612
11613             case CEE_MUL:
11614                 oper = GT_MUL;
11615                 goto MATH_MAYBE_CALL_NO_OVF;
11616
11617             case CEE_MUL_OVF:
11618                 uns = false;
11619                 goto MUL_OVF;
11620             case CEE_MUL_OVF_UN:
11621                 uns = true;
11622                 goto MUL_OVF;
11623
11624             MUL_OVF:
11625                 ovfl = true;
11626                 oper = GT_MUL;
11627                 goto MATH_MAYBE_CALL_OVF;
11628
11629             // Other binary math operations
11630
11631             case CEE_DIV:
11632                 oper = GT_DIV;
11633                 goto MATH_MAYBE_CALL_NO_OVF;
11634
11635             case CEE_DIV_UN:
11636                 oper = GT_UDIV;
11637                 goto MATH_MAYBE_CALL_NO_OVF;
11638
11639             case CEE_REM:
11640                 oper = GT_MOD;
11641                 goto MATH_MAYBE_CALL_NO_OVF;
11642
11643             case CEE_REM_UN:
11644                 oper = GT_UMOD;
11645                 goto MATH_MAYBE_CALL_NO_OVF;
11646
11647             MATH_MAYBE_CALL_NO_OVF:
11648                 ovfl = false;
11649             MATH_MAYBE_CALL_OVF:
11650                 // Morpher has some complex logic about when to turn different
11651                 // typed nodes on different platforms into helper calls. We
11652                 // need to either duplicate that logic here, or just
11653                 // pessimistically make all the nodes large enough to become
11654                 // call nodes.  Since call nodes aren't that much larger and
11655                 // these opcodes are infrequent enough I chose the latter.
11656                 callNode = true;
11657                 goto MATH_OP2_FLAGS;
11658
11659             case CEE_AND:
11660                 oper = GT_AND;
11661                 goto MATH_OP2;
11662             case CEE_OR:
11663                 oper = GT_OR;
11664                 goto MATH_OP2;
11665             case CEE_XOR:
11666                 oper = GT_XOR;
11667                 goto MATH_OP2;
11668
11669             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11670
11671                 ovfl     = false;
11672                 callNode = false;
11673
11674             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11675
11676                 /* Pull two values and push back the result */
11677
11678                 if (tiVerificationNeeded)
11679                 {
11680                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11681                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11682
11683                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11684                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11685                     {
11686                         Verify(tiOp1.IsNumberType(), "not number");
11687                     }
11688                     else
11689                     {
11690                         Verify(tiOp1.IsIntegerType(), "not integer");
11691                     }
11692
11693                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11694
11695                     tiRetVal = tiOp1;
11696
11697 #ifdef _TARGET_64BIT_
11698                     if (tiOp2.IsNativeIntType())
11699                     {
11700                         tiRetVal = tiOp2;
11701                     }
11702 #endif // _TARGET_64BIT_
11703                 }
11704
11705                 op2 = impPopStack().val;
11706                 op1 = impPopStack().val;
11707
11708 #if !CPU_HAS_FP_SUPPORT
11709                 if (varTypeIsFloating(op1->gtType))
11710                 {
11711                     callNode = true;
11712                 }
11713 #endif
11714                 /* Can't do arithmetic with references */
11715                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11716
11717                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11718                 // if it is in the stack)
11719                 impBashVarAddrsToI(op1, op2);
11720
11721                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11722
11723                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11724
11725                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11726
11727                 if (op2->gtOper == GT_CNS_INT)
11728                 {
11729                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11730                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11731
11732                     {
11733                         impPushOnStack(op1, tiRetVal);
11734                         break;
11735                     }
11736                 }
11737
11738 #if !FEATURE_X87_DOUBLES
11739                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11740                 //
11741                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11742                 {
11743                     if (op1->TypeGet() != type)
11744                     {
11745                         // We insert a cast of op1 to 'type'
11746                         op1 = gtNewCastNode(type, op1, type);
11747                     }
11748                     if (op2->TypeGet() != type)
11749                     {
11750                         // We insert a cast of op2 to 'type'
11751                         op2 = gtNewCastNode(type, op2, type);
11752                     }
11753                 }
11754 #endif // !FEATURE_X87_DOUBLES
11755
11756 #if SMALL_TREE_NODES
11757                 if (callNode)
11758                 {
11759                     /* These operators can later be transformed into 'GT_CALL' */
11760
11761                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11762 #ifndef _TARGET_ARM_
11763                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11764                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11765                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11766                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11767 #endif
11768                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11769                     // that we'll need to transform into a general large node, but rather specifically
11770                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11771                     // and a CALL is no longer the largest.
11772                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11773                     // than an "if".
11774                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11775                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11776                 }
11777                 else
11778 #endif // SMALL_TREE_NODES
11779                 {
11780                     op1 = gtNewOperNode(oper, type, op1, op2);
11781                 }
11782
11783                 /* Special case: integer/long division may throw an exception */
11784
11785                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11786                 {
11787                     op1->gtFlags |= GTF_EXCEPT;
11788                 }
11789
11790                 if (ovfl)
11791                 {
11792                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11793                     if (ovflType != TYP_UNKNOWN)
11794                     {
11795                         op1->gtType = ovflType;
11796                     }
11797                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11798                     if (uns)
11799                     {
11800                         op1->gtFlags |= GTF_UNSIGNED;
11801                     }
11802                 }
11803
11804                 impPushOnStack(op1, tiRetVal);
11805                 break;
11806
11807             case CEE_SHL:
11808                 oper = GT_LSH;
11809                 goto CEE_SH_OP2;
11810
11811             case CEE_SHR:
11812                 oper = GT_RSH;
11813                 goto CEE_SH_OP2;
11814             case CEE_SHR_UN:
11815                 oper = GT_RSZ;
11816                 goto CEE_SH_OP2;
11817
11818             CEE_SH_OP2:
11819                 if (tiVerificationNeeded)
11820                 {
11821                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11822                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11823                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11824                     tiRetVal = tiVal;
11825                 }
11826                 op2 = impPopStack().val;
11827                 op1 = impPopStack().val; // operand to be shifted
11828                 impBashVarAddrsToI(op1, op2);
11829
11830                 type = genActualType(op1->TypeGet());
11831                 op1  = gtNewOperNode(oper, type, op1, op2);
11832
11833                 impPushOnStack(op1, tiRetVal);
11834                 break;
11835
11836             case CEE_NOT:
11837                 if (tiVerificationNeeded)
11838                 {
11839                     tiRetVal = impStackTop().seTypeInfo;
11840                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11841                 }
11842
11843                 op1 = impPopStack().val;
11844                 impBashVarAddrsToI(op1, nullptr);
11845                 type = genActualType(op1->TypeGet());
11846                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11847                 break;
11848
11849             case CEE_CKFINITE:
11850                 if (tiVerificationNeeded)
11851                 {
11852                     tiRetVal = impStackTop().seTypeInfo;
11853                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11854                 }
11855                 op1  = impPopStack().val;
11856                 type = op1->TypeGet();
11857                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11858                 op1->gtFlags |= GTF_EXCEPT;
11859
11860                 impPushOnStack(op1, tiRetVal);
11861                 break;
11862
11863             case CEE_LEAVE:
11864
11865                 val     = getI4LittleEndian(codeAddr); // jump distance
11866                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11867                 goto LEAVE;
11868
11869             case CEE_LEAVE_S:
11870                 val     = getI1LittleEndian(codeAddr); // jump distance
11871                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11872
11873             LEAVE:
11874
11875                 if (compIsForInlining())
11876                 {
11877                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11878                     return;
11879                 }
11880
11881                 JITDUMP(" %04X", jmpAddr);
11882                 if (block->bbJumpKind != BBJ_LEAVE)
11883                 {
11884                     impResetLeaveBlock(block, jmpAddr);
11885                 }
11886
11887                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11888                 impImportLeave(block);
11889                 impNoteBranchOffs();
11890
11891                 break;
11892
11893             case CEE_BR:
11894             case CEE_BR_S:
11895                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11896
11897                 if (compIsForInlining() && jmpDist == 0)
11898                 {
11899                     break; /* NOP */
11900                 }
11901
11902                 impNoteBranchOffs();
11903                 break;
11904
11905             case CEE_BRTRUE:
11906             case CEE_BRTRUE_S:
11907             case CEE_BRFALSE:
11908             case CEE_BRFALSE_S:
11909
11910                 /* Pop the comparand (now there's a neat term) from the stack */
11911                 if (tiVerificationNeeded)
11912                 {
11913                     typeInfo& tiVal = impStackTop().seTypeInfo;
11914                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11915                            "bad value");
11916                 }
11917
11918                 op1  = impPopStack().val;
11919                 type = op1->TypeGet();
11920
11921                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11922                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11923                 {
11924                     block->bbJumpKind = BBJ_NONE;
11925
11926                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11927                     {
11928                         op1 = gtUnusedValNode(op1);
11929                         goto SPILL_APPEND;
11930                     }
11931                     else
11932                     {
11933                         break;
11934                     }
11935                 }
11936
11937                 if (op1->OperIsCompare())
11938                 {
11939                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11940                     {
11941                         // Flip the sense of the compare
11942
11943                         op1 = gtReverseCond(op1);
11944                     }
11945                 }
11946                 else
11947                 {
11948                     /* We'll compare against an equally-sized integer 0 */
11949                     /* For small types, we always compare against int   */
11950                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11951
11952                     /* Create the comparison operator and try to fold it */
11953
11954                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11955                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11956                 }
11957
11958             // fall through
11959
11960             COND_JUMP:
11961
11962                 /* Fold comparison if we can */
11963
11964                 op1 = gtFoldExpr(op1);
11965
11966                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11967                 /* Don't make any blocks unreachable in import only mode */
11968
11969                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11970                 {
11971                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11972                        unreachable under compDbgCode */
11973                     assert(!opts.compDbgCode);
11974
11975                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11976                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11977                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11978                                                                          // block for the second time
11979
11980                     block->bbJumpKind = foldedJumpKind;
11981 #ifdef DEBUG
11982                     if (verbose)
11983                     {
11984                         if (op1->gtIntCon.gtIconVal)
11985                         {
11986                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11987                                    block->bbJumpDest->bbNum);
11988                         }
11989                         else
11990                         {
11991                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11992                         }
11993                     }
11994 #endif
11995                     break;
11996                 }
11997
11998                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11999
12000                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12001                    in impImportBlock(block). For correct line numbers, spill stack. */
12002
12003                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12004                 {
12005                     impSpillStackEnsure(true);
12006                 }
12007
12008                 goto SPILL_APPEND;
12009
12010             case CEE_CEQ:
12011                 oper = GT_EQ;
12012                 uns  = false;
12013                 goto CMP_2_OPs;
12014             case CEE_CGT_UN:
12015                 oper = GT_GT;
12016                 uns  = true;
12017                 goto CMP_2_OPs;
12018             case CEE_CGT:
12019                 oper = GT_GT;
12020                 uns  = false;
12021                 goto CMP_2_OPs;
12022             case CEE_CLT_UN:
12023                 oper = GT_LT;
12024                 uns  = true;
12025                 goto CMP_2_OPs;
12026             case CEE_CLT:
12027                 oper = GT_LT;
12028                 uns  = false;
12029                 goto CMP_2_OPs;
12030
12031             CMP_2_OPs:
12032                 if (tiVerificationNeeded)
12033                 {
12034                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12035                     tiRetVal = typeInfo(TI_INT);
12036                 }
12037
12038                 op2 = impPopStack().val;
12039                 op1 = impPopStack().val;
12040
12041 #ifdef _TARGET_64BIT_
12042                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12043                 {
12044                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12045                 }
12046                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12047                 {
12048                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12049                 }
12050 #endif // _TARGET_64BIT_
12051
12052                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12053                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12054                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12055
12056                 /* Create the comparison node */
12057
12058                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12059
12060                 /* TODO: setting both flags when only one is appropriate */
12061                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12062                 {
12063                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12064                 }
12065
12066                 // Fold result, if possible.
12067                 op1 = gtFoldExpr(op1);
12068
12069                 impPushOnStack(op1, tiRetVal);
12070                 break;
12071
12072             case CEE_BEQ_S:
12073             case CEE_BEQ:
12074                 oper = GT_EQ;
12075                 goto CMP_2_OPs_AND_BR;
12076
12077             case CEE_BGE_S:
12078             case CEE_BGE:
12079                 oper = GT_GE;
12080                 goto CMP_2_OPs_AND_BR;
12081
12082             case CEE_BGE_UN_S:
12083             case CEE_BGE_UN:
12084                 oper = GT_GE;
12085                 goto CMP_2_OPs_AND_BR_UN;
12086
12087             case CEE_BGT_S:
12088             case CEE_BGT:
12089                 oper = GT_GT;
12090                 goto CMP_2_OPs_AND_BR;
12091
12092             case CEE_BGT_UN_S:
12093             case CEE_BGT_UN:
12094                 oper = GT_GT;
12095                 goto CMP_2_OPs_AND_BR_UN;
12096
12097             case CEE_BLE_S:
12098             case CEE_BLE:
12099                 oper = GT_LE;
12100                 goto CMP_2_OPs_AND_BR;
12101
12102             case CEE_BLE_UN_S:
12103             case CEE_BLE_UN:
12104                 oper = GT_LE;
12105                 goto CMP_2_OPs_AND_BR_UN;
12106
12107             case CEE_BLT_S:
12108             case CEE_BLT:
12109                 oper = GT_LT;
12110                 goto CMP_2_OPs_AND_BR;
12111
12112             case CEE_BLT_UN_S:
12113             case CEE_BLT_UN:
12114                 oper = GT_LT;
12115                 goto CMP_2_OPs_AND_BR_UN;
12116
12117             case CEE_BNE_UN_S:
12118             case CEE_BNE_UN:
12119                 oper = GT_NE;
12120                 goto CMP_2_OPs_AND_BR_UN;
12121
12122             CMP_2_OPs_AND_BR_UN:
12123                 uns       = true;
12124                 unordered = true;
12125                 goto CMP_2_OPs_AND_BR_ALL;
12126             CMP_2_OPs_AND_BR:
12127                 uns       = false;
12128                 unordered = false;
12129                 goto CMP_2_OPs_AND_BR_ALL;
12130             CMP_2_OPs_AND_BR_ALL:
12131
12132                 if (tiVerificationNeeded)
12133                 {
12134                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12135                 }
12136
12137                 /* Pull two values */
12138                 op2 = impPopStack().val;
12139                 op1 = impPopStack().val;
12140
12141 #ifdef _TARGET_64BIT_
12142                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12143                 {
12144                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12145                 }
12146                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12147                 {
12148                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12149                 }
12150 #endif // _TARGET_64BIT_
12151
12152                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12153                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12154                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12155
12156                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12157                 {
12158                     block->bbJumpKind = BBJ_NONE;
12159
12160                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12161                     {
12162                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12163                                                        "Branch to next Optimization, op1 side effect"));
12164                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12165                     }
12166                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12167                     {
12168                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12169                                                        "Branch to next Optimization, op2 side effect"));
12170                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12171                     }
12172
12173 #ifdef DEBUG
12174                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12175                     {
12176                         impNoteLastILoffs();
12177                     }
12178 #endif
12179                     break;
12180                 }
12181 #if !FEATURE_X87_DOUBLES
12182                 // We can generate an compare of different sized floating point op1 and op2
12183                 // We insert a cast
12184                 //
12185                 if (varTypeIsFloating(op1->TypeGet()))
12186                 {
12187                     if (op1->TypeGet() != op2->TypeGet())
12188                     {
12189                         assert(varTypeIsFloating(op2->TypeGet()));
12190
12191                         // say op1=double, op2=float. To avoid loss of precision
12192                         // while comparing, op2 is converted to double and double
12193                         // comparison is done.
12194                         if (op1->TypeGet() == TYP_DOUBLE)
12195                         {
12196                             // We insert a cast of op2 to TYP_DOUBLE
12197                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
12198                         }
12199                         else if (op2->TypeGet() == TYP_DOUBLE)
12200                         {
12201                             // We insert a cast of op1 to TYP_DOUBLE
12202                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
12203                         }
12204                     }
12205                 }
12206 #endif // !FEATURE_X87_DOUBLES
12207
12208                 /* Create and append the operator */
12209
12210                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12211
12212                 if (uns)
12213                 {
12214                     op1->gtFlags |= GTF_UNSIGNED;
12215                 }
12216
12217                 if (unordered)
12218                 {
12219                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12220                 }
12221
12222                 goto COND_JUMP;
12223
12224             case CEE_SWITCH:
12225                 assert(!compIsForInlining());
12226
12227                 if (tiVerificationNeeded)
12228                 {
12229                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12230                 }
12231                 /* Pop the switch value off the stack */
12232                 op1 = impPopStack().val;
12233                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12234
12235                 /* We can create a switch node */
12236
12237                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12238
12239                 val = (int)getU4LittleEndian(codeAddr);
12240                 codeAddr += 4 + val * 4; // skip over the switch-table
12241
12242                 goto SPILL_APPEND;
12243
12244             /************************** Casting OPCODES ***************************/
12245
12246             case CEE_CONV_OVF_I1:
12247                 lclTyp = TYP_BYTE;
12248                 goto CONV_OVF;
12249             case CEE_CONV_OVF_I2:
12250                 lclTyp = TYP_SHORT;
12251                 goto CONV_OVF;
12252             case CEE_CONV_OVF_I:
12253                 lclTyp = TYP_I_IMPL;
12254                 goto CONV_OVF;
12255             case CEE_CONV_OVF_I4:
12256                 lclTyp = TYP_INT;
12257                 goto CONV_OVF;
12258             case CEE_CONV_OVF_I8:
12259                 lclTyp = TYP_LONG;
12260                 goto CONV_OVF;
12261
12262             case CEE_CONV_OVF_U1:
12263                 lclTyp = TYP_UBYTE;
12264                 goto CONV_OVF;
12265             case CEE_CONV_OVF_U2:
12266                 lclTyp = TYP_USHORT;
12267                 goto CONV_OVF;
12268             case CEE_CONV_OVF_U:
12269                 lclTyp = TYP_U_IMPL;
12270                 goto CONV_OVF;
12271             case CEE_CONV_OVF_U4:
12272                 lclTyp = TYP_UINT;
12273                 goto CONV_OVF;
12274             case CEE_CONV_OVF_U8:
12275                 lclTyp = TYP_ULONG;
12276                 goto CONV_OVF;
12277
12278             case CEE_CONV_OVF_I1_UN:
12279                 lclTyp = TYP_BYTE;
12280                 goto CONV_OVF_UN;
12281             case CEE_CONV_OVF_I2_UN:
12282                 lclTyp = TYP_SHORT;
12283                 goto CONV_OVF_UN;
12284             case CEE_CONV_OVF_I_UN:
12285                 lclTyp = TYP_I_IMPL;
12286                 goto CONV_OVF_UN;
12287             case CEE_CONV_OVF_I4_UN:
12288                 lclTyp = TYP_INT;
12289                 goto CONV_OVF_UN;
12290             case CEE_CONV_OVF_I8_UN:
12291                 lclTyp = TYP_LONG;
12292                 goto CONV_OVF_UN;
12293
12294             case CEE_CONV_OVF_U1_UN:
12295                 lclTyp = TYP_UBYTE;
12296                 goto CONV_OVF_UN;
12297             case CEE_CONV_OVF_U2_UN:
12298                 lclTyp = TYP_USHORT;
12299                 goto CONV_OVF_UN;
12300             case CEE_CONV_OVF_U_UN:
12301                 lclTyp = TYP_U_IMPL;
12302                 goto CONV_OVF_UN;
12303             case CEE_CONV_OVF_U4_UN:
12304                 lclTyp = TYP_UINT;
12305                 goto CONV_OVF_UN;
12306             case CEE_CONV_OVF_U8_UN:
12307                 lclTyp = TYP_ULONG;
12308                 goto CONV_OVF_UN;
12309
12310             CONV_OVF_UN:
12311                 uns = true;
12312                 goto CONV_OVF_COMMON;
12313             CONV_OVF:
12314                 uns = false;
12315                 goto CONV_OVF_COMMON;
12316
12317             CONV_OVF_COMMON:
12318                 ovfl = true;
12319                 goto _CONV;
12320
12321             case CEE_CONV_I1:
12322                 lclTyp = TYP_BYTE;
12323                 goto CONV;
12324             case CEE_CONV_I2:
12325                 lclTyp = TYP_SHORT;
12326                 goto CONV;
12327             case CEE_CONV_I:
12328                 lclTyp = TYP_I_IMPL;
12329                 goto CONV;
12330             case CEE_CONV_I4:
12331                 lclTyp = TYP_INT;
12332                 goto CONV;
12333             case CEE_CONV_I8:
12334                 lclTyp = TYP_LONG;
12335                 goto CONV;
12336
12337             case CEE_CONV_U1:
12338                 lclTyp = TYP_UBYTE;
12339                 goto CONV;
12340             case CEE_CONV_U2:
12341                 lclTyp = TYP_USHORT;
12342                 goto CONV;
12343 #if (REGSIZE_BYTES == 8)
12344             case CEE_CONV_U:
12345                 lclTyp = TYP_U_IMPL;
12346                 goto CONV_UN;
12347 #else
12348             case CEE_CONV_U:
12349                 lclTyp = TYP_U_IMPL;
12350                 goto CONV;
12351 #endif
12352             case CEE_CONV_U4:
12353                 lclTyp = TYP_UINT;
12354                 goto CONV;
12355             case CEE_CONV_U8:
12356                 lclTyp = TYP_ULONG;
12357                 goto CONV_UN;
12358
12359             case CEE_CONV_R4:
12360                 lclTyp = TYP_FLOAT;
12361                 goto CONV;
12362             case CEE_CONV_R8:
12363                 lclTyp = TYP_DOUBLE;
12364                 goto CONV;
12365
12366             case CEE_CONV_R_UN:
12367                 lclTyp = TYP_DOUBLE;
12368                 goto CONV_UN;
12369
12370             CONV_UN:
12371                 uns  = true;
12372                 ovfl = false;
12373                 goto _CONV;
12374
12375             CONV:
12376                 uns  = false;
12377                 ovfl = false;
12378                 goto _CONV;
12379
12380             _CONV:
12381                 // just check that we have a number on the stack
12382                 if (tiVerificationNeeded)
12383                 {
12384                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12385                     Verify(tiVal.IsNumberType(), "bad arg");
12386
12387 #ifdef _TARGET_64BIT_
12388                     bool isNative = false;
12389
12390                     switch (opcode)
12391                     {
12392                         case CEE_CONV_OVF_I:
12393                         case CEE_CONV_OVF_I_UN:
12394                         case CEE_CONV_I:
12395                         case CEE_CONV_OVF_U:
12396                         case CEE_CONV_OVF_U_UN:
12397                         case CEE_CONV_U:
12398                             isNative = true;
12399                         default:
12400                             // leave 'isNative' = false;
12401                             break;
12402                     }
12403                     if (isNative)
12404                     {
12405                         tiRetVal = typeInfo::nativeInt();
12406                     }
12407                     else
12408 #endif // _TARGET_64BIT_
12409                     {
12410                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12411                     }
12412                 }
12413
12414                 // only converts from FLOAT or DOUBLE to an integer type
12415                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12416
12417                 if (varTypeIsFloating(lclTyp))
12418                 {
12419                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12420 #ifdef _TARGET_64BIT_
12421                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12422                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12423                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12424                                // and generate SSE2 code instead of going through helper calls.
12425                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12426 #endif
12427                         ;
12428                 }
12429                 else
12430                 {
12431                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12432                 }
12433
12434                 // At this point uns, ovf, callNode all set
12435
12436                 op1 = impPopStack().val;
12437                 impBashVarAddrsToI(op1);
12438
12439                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12440                 {
12441                     op2 = op1->gtOp.gtOp2;
12442
12443                     if (op2->gtOper == GT_CNS_INT)
12444                     {
12445                         ssize_t ival = op2->gtIntCon.gtIconVal;
12446                         ssize_t mask, umask;
12447
12448                         switch (lclTyp)
12449                         {
12450                             case TYP_BYTE:
12451                             case TYP_UBYTE:
12452                                 mask  = 0x00FF;
12453                                 umask = 0x007F;
12454                                 break;
12455                             case TYP_USHORT:
12456                             case TYP_SHORT:
12457                                 mask  = 0xFFFF;
12458                                 umask = 0x7FFF;
12459                                 break;
12460
12461                             default:
12462                                 assert(!"unexpected type");
12463                                 return;
12464                         }
12465
12466                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12467                         {
12468                             /* Toss the cast, it's a waste of time */
12469
12470                             impPushOnStack(op1, tiRetVal);
12471                             break;
12472                         }
12473                         else if (ival == mask)
12474                         {
12475                             /* Toss the masking, it's a waste of time, since
12476                                we sign-extend from the small value anyways */
12477
12478                             op1 = op1->gtOp.gtOp1;
12479                         }
12480                     }
12481                 }
12482
12483                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12484                     since the result of a cast to one of the 'small' integer
12485                     types is an integer.
12486                  */
12487
12488                 type = genActualType(lclTyp);
12489
12490 #if SMALL_TREE_NODES
12491                 if (callNode)
12492                 {
12493                     op1 = gtNewCastNodeL(type, op1, lclTyp);
12494                 }
12495                 else
12496 #endif // SMALL_TREE_NODES
12497                 {
12498                     op1 = gtNewCastNode(type, op1, lclTyp);
12499                 }
12500
12501                 if (ovfl)
12502                 {
12503                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12504                 }
12505                 if (uns)
12506                 {
12507                     op1->gtFlags |= GTF_UNSIGNED;
12508                 }
12509                 impPushOnStack(op1, tiRetVal);
12510                 break;
12511
12512             case CEE_NEG:
12513                 if (tiVerificationNeeded)
12514                 {
12515                     tiRetVal = impStackTop().seTypeInfo;
12516                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12517                 }
12518
12519                 op1 = impPopStack().val;
12520                 impBashVarAddrsToI(op1, nullptr);
12521                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12522                 break;
12523
12524             case CEE_POP:
12525             {
12526                 /* Pull the top value from the stack */
12527
12528                 StackEntry se = impPopStack();
12529                 clsHnd        = se.seTypeInfo.GetClassHandle();
12530                 op1           = se.val;
12531
12532                 /* Get hold of the type of the value being duplicated */
12533
12534                 lclTyp = genActualType(op1->gtType);
12535
12536                 /* Does the value have any side effects? */
12537
12538                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12539                 {
12540                     // Since we are throwing away the value, just normalize
12541                     // it to its address.  This is more efficient.
12542
12543                     if (varTypeIsStruct(op1))
12544                     {
12545 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12546                         // Non-calls, such as obj or ret_expr, have to go through this.
12547                         // Calls with large struct return value have to go through this.
12548                         // Helper calls with small struct return value also have to go
12549                         // through this since they do not follow Unix calling convention.
12550                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12551                             op1->AsCall()->gtCallType == CT_HELPER)
12552 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12553                         {
12554                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12555                         }
12556                     }
12557
12558                     // If op1 is non-overflow cast, throw it away since it is useless.
12559                     // Another reason for throwing away the useless cast is in the context of
12560                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12561                     // The cast gets added as part of importing GT_CALL, which gets in the way
12562                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12563                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12564                     {
12565                         op1 = op1->gtOp.gtOp1;
12566                     }
12567
12568                     // If 'op1' is an expression, create an assignment node.
12569                     // Helps analyses (like CSE) to work fine.
12570
12571                     if (op1->gtOper != GT_CALL)
12572                     {
12573                         op1 = gtUnusedValNode(op1);
12574                     }
12575
12576                     /* Append the value to the tree list */
12577                     goto SPILL_APPEND;
12578                 }
12579
12580                 /* No side effects - just throw the <BEEP> thing away */
12581             }
12582             break;
12583
12584             case CEE_DUP:
12585             {
12586                 if (tiVerificationNeeded)
12587                 {
12588                     // Dup could start the begining of delegate creation sequence, remember that
12589                     delegateCreateStart = codeAddr - 1;
12590                     impStackTop(0);
12591                 }
12592
12593                 // If the expression to dup is simple, just clone it.
12594                 // Otherwise spill it to a temp, and reload the temp
12595                 // twice.
12596                 StackEntry se   = impPopStack();
12597                 GenTree*   tree = se.val;
12598                 tiRetVal        = se.seTypeInfo;
12599                 op1             = tree;
12600
12601                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12602                 {
12603                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12604                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12605                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12606                     op1            = gtNewLclvNode(tmpNum, type);
12607
12608                     // Propagate type info to the temp from the stack and the original tree
12609                     if (type == TYP_REF)
12610                     {
12611                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12612                     }
12613                 }
12614
12615                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12616                                    nullptr DEBUGARG("DUP instruction"));
12617
12618                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12619                 impPushOnStack(op1, tiRetVal);
12620                 impPushOnStack(op2, tiRetVal);
12621             }
12622             break;
12623
12624             case CEE_STIND_I1:
12625                 lclTyp = TYP_BYTE;
12626                 goto STIND;
12627             case CEE_STIND_I2:
12628                 lclTyp = TYP_SHORT;
12629                 goto STIND;
12630             case CEE_STIND_I4:
12631                 lclTyp = TYP_INT;
12632                 goto STIND;
12633             case CEE_STIND_I8:
12634                 lclTyp = TYP_LONG;
12635                 goto STIND;
12636             case CEE_STIND_I:
12637                 lclTyp = TYP_I_IMPL;
12638                 goto STIND;
12639             case CEE_STIND_REF:
12640                 lclTyp = TYP_REF;
12641                 goto STIND;
12642             case CEE_STIND_R4:
12643                 lclTyp = TYP_FLOAT;
12644                 goto STIND;
12645             case CEE_STIND_R8:
12646                 lclTyp = TYP_DOUBLE;
12647                 goto STIND;
12648             STIND:
12649
12650                 if (tiVerificationNeeded)
12651                 {
12652                     typeInfo instrType(lclTyp);
12653 #ifdef _TARGET_64BIT_
12654                     if (opcode == CEE_STIND_I)
12655                     {
12656                         instrType = typeInfo::nativeInt();
12657                     }
12658 #endif // _TARGET_64BIT_
12659                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12660                 }
12661                 else
12662                 {
12663                     compUnsafeCastUsed = true; // Have to go conservative
12664                 }
12665
12666             STIND_POST_VERIFY:
12667
12668                 op2 = impPopStack().val; // value to store
12669                 op1 = impPopStack().val; // address to store to
12670
12671                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12672                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12673
12674                 impBashVarAddrsToI(op1, op2);
12675
12676                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12677
12678 #ifdef _TARGET_64BIT_
12679                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12680                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12681                 {
12682                     op2->gtType = TYP_I_IMPL;
12683                 }
12684                 else
12685                 {
12686                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12687                     //
12688                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12689                     {
12690                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12691                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12692                     }
12693                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12694                     //
12695                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12696                     {
12697                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12698                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12699                     }
12700                 }
12701 #endif // _TARGET_64BIT_
12702
12703                 if (opcode == CEE_STIND_REF)
12704                 {
12705                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12706                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12707                     lclTyp = genActualType(op2->TypeGet());
12708                 }
12709
12710 // Check target type.
12711 #ifdef DEBUG
12712                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12713                 {
12714                     if (op2->gtType == TYP_BYREF)
12715                     {
12716                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12717                     }
12718                     else if (lclTyp == TYP_BYREF)
12719                     {
12720                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12721                     }
12722                 }
12723                 else
12724                 {
12725                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12726                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12727                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12728                 }
12729 #endif
12730
12731                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12732
12733                 // stind could point anywhere, example a boxed class static int
12734                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12735
12736                 if (prefixFlags & PREFIX_VOLATILE)
12737                 {
12738                     assert(op1->OperGet() == GT_IND);
12739                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12740                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12741                     op1->gtFlags |= GTF_IND_VOLATILE;
12742                 }
12743
12744                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12745                 {
12746                     assert(op1->OperGet() == GT_IND);
12747                     op1->gtFlags |= GTF_IND_UNALIGNED;
12748                 }
12749
12750                 op1 = gtNewAssignNode(op1, op2);
12751                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12752
12753                 // Spill side-effects AND global-data-accesses
12754                 if (verCurrentState.esStackDepth > 0)
12755                 {
12756                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12757                 }
12758
12759                 goto APPEND;
12760
12761             case CEE_LDIND_I1:
12762                 lclTyp = TYP_BYTE;
12763                 goto LDIND;
12764             case CEE_LDIND_I2:
12765                 lclTyp = TYP_SHORT;
12766                 goto LDIND;
12767             case CEE_LDIND_U4:
12768             case CEE_LDIND_I4:
12769                 lclTyp = TYP_INT;
12770                 goto LDIND;
12771             case CEE_LDIND_I8:
12772                 lclTyp = TYP_LONG;
12773                 goto LDIND;
12774             case CEE_LDIND_REF:
12775                 lclTyp = TYP_REF;
12776                 goto LDIND;
12777             case CEE_LDIND_I:
12778                 lclTyp = TYP_I_IMPL;
12779                 goto LDIND;
12780             case CEE_LDIND_R4:
12781                 lclTyp = TYP_FLOAT;
12782                 goto LDIND;
12783             case CEE_LDIND_R8:
12784                 lclTyp = TYP_DOUBLE;
12785                 goto LDIND;
12786             case CEE_LDIND_U1:
12787                 lclTyp = TYP_UBYTE;
12788                 goto LDIND;
12789             case CEE_LDIND_U2:
12790                 lclTyp = TYP_USHORT;
12791                 goto LDIND;
12792             LDIND:
12793
12794                 if (tiVerificationNeeded)
12795                 {
12796                     typeInfo lclTiType(lclTyp);
12797 #ifdef _TARGET_64BIT_
12798                     if (opcode == CEE_LDIND_I)
12799                     {
12800                         lclTiType = typeInfo::nativeInt();
12801                     }
12802 #endif // _TARGET_64BIT_
12803                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12804                     tiRetVal.NormaliseForStack();
12805                 }
12806                 else
12807                 {
12808                     compUnsafeCastUsed = true; // Have to go conservative
12809                 }
12810
12811             LDIND_POST_VERIFY:
12812
12813                 op1 = impPopStack().val; // address to load from
12814                 impBashVarAddrsToI(op1);
12815
12816 #ifdef _TARGET_64BIT_
12817                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12818                 //
12819                 if (genActualType(op1->gtType) == TYP_INT)
12820                 {
12821                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12822                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12823                 }
12824 #endif
12825
12826                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12827
12828                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12829
12830                 // ldind could point anywhere, example a boxed class static int
12831                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12832
12833                 if (prefixFlags & PREFIX_VOLATILE)
12834                 {
12835                     assert(op1->OperGet() == GT_IND);
12836                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12837                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12838                     op1->gtFlags |= GTF_IND_VOLATILE;
12839                 }
12840
12841                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12842                 {
12843                     assert(op1->OperGet() == GT_IND);
12844                     op1->gtFlags |= GTF_IND_UNALIGNED;
12845                 }
12846
12847                 impPushOnStack(op1, tiRetVal);
12848
12849                 break;
12850
12851             case CEE_UNALIGNED:
12852
12853                 assert(sz == 1);
12854                 val = getU1LittleEndian(codeAddr);
12855                 ++codeAddr;
12856                 JITDUMP(" %u", val);
12857                 if ((val != 1) && (val != 2) && (val != 4))
12858                 {
12859                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12860                 }
12861
12862                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12863                 prefixFlags |= PREFIX_UNALIGNED;
12864
12865                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12866
12867             PREFIX:
12868                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12869                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12870                 codeAddr += sizeof(__int8);
12871                 goto DECODE_OPCODE;
12872
12873             case CEE_VOLATILE:
12874
12875                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12876                 prefixFlags |= PREFIX_VOLATILE;
12877
12878                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12879
12880                 assert(sz == 0);
12881                 goto PREFIX;
12882
12883             case CEE_LDFTN:
12884             {
12885                 // Need to do a lookup here so that we perform an access check
12886                 // and do a NOWAY if protections are violated
12887                 _impResolveToken(CORINFO_TOKENKIND_Method);
12888
12889                 JITDUMP(" %08X", resolvedToken.token);
12890
12891                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12892                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12893                               &callInfo);
12894
12895                 // This check really only applies to intrinsic Array.Address methods
12896                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12897                 {
12898                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12899                 }
12900
12901                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12902                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12903
12904                 if (tiVerificationNeeded)
12905                 {
12906                     // LDFTN could start the begining of delegate creation sequence, remember that
12907                     delegateCreateStart = codeAddr - 2;
12908
12909                     // check any constraints on the callee's class and type parameters
12910                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12911                                    "method has unsatisfied class constraints");
12912                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12913                                                                                 resolvedToken.hMethod),
12914                                    "method has unsatisfied method constraints");
12915
12916                     mflags = callInfo.verMethodFlags;
12917                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12918                 }
12919
12920             DO_LDFTN:
12921                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12922                 if (compDonotInline())
12923                 {
12924                     return;
12925                 }
12926
12927                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12928                 impPushOnStack(op1, typeInfo(heapToken));
12929
12930                 break;
12931             }
12932
12933             case CEE_LDVIRTFTN:
12934             {
12935                 /* Get the method token */
12936
12937                 _impResolveToken(CORINFO_TOKENKIND_Method);
12938
12939                 JITDUMP(" %08X", resolvedToken.token);
12940
12941                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12942                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12943                                                     CORINFO_CALLINFO_CALLVIRT)),
12944                               &callInfo);
12945
12946                 // This check really only applies to intrinsic Array.Address methods
12947                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12948                 {
12949                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12950                 }
12951
12952                 mflags = callInfo.methodFlags;
12953
12954                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12955
12956                 if (compIsForInlining())
12957                 {
12958                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12959                     {
12960                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12961                         return;
12962                     }
12963                 }
12964
12965                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12966
12967                 if (tiVerificationNeeded)
12968                 {
12969
12970                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12971                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12972
12973                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12974                     typeInfo declType =
12975                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12976
12977                     typeInfo arg = impStackTop().seTypeInfo;
12978                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12979                            "bad ldvirtftn");
12980
12981                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12982                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12983                     {
12984                         instanceClassHnd = arg.GetClassHandleForObjRef();
12985                     }
12986
12987                     // check any constraints on the method's class and type parameters
12988                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12989                                    "method has unsatisfied class constraints");
12990                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12991                                                                                 resolvedToken.hMethod),
12992                                    "method has unsatisfied method constraints");
12993
12994                     if (mflags & CORINFO_FLG_PROTECTED)
12995                     {
12996                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12997                                "Accessing protected method through wrong type.");
12998                     }
12999                 }
13000
13001                 /* Get the object-ref */
13002                 op1 = impPopStack().val;
13003                 assertImp(op1->gtType == TYP_REF);
13004
13005                 if (opts.IsReadyToRun())
13006                 {
13007                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13008                     {
13009                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13010                         {
13011                             op1 = gtUnusedValNode(op1);
13012                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13013                         }
13014                         goto DO_LDFTN;
13015                     }
13016                 }
13017                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13018                 {
13019                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13020                     {
13021                         op1 = gtUnusedValNode(op1);
13022                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13023                     }
13024                     goto DO_LDFTN;
13025                 }
13026
13027                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13028                 if (compDonotInline())
13029                 {
13030                     return;
13031                 }
13032
13033                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13034                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13035                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13036                 impPushOnStack(fptr, typeInfo(heapToken));
13037
13038                 break;
13039             }
13040
13041             case CEE_CONSTRAINED:
13042
13043                 assertImp(sz == sizeof(unsigned));
13044                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13045                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13046                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13047
13048                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13049                 prefixFlags |= PREFIX_CONSTRAINED;
13050
13051                 {
13052                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13053                     if (actualOpcode != CEE_CALLVIRT)
13054                     {
13055                         BADCODE("constrained. has to be followed by callvirt");
13056                     }
13057                 }
13058
13059                 goto PREFIX;
13060
13061             case CEE_READONLY:
13062                 JITDUMP(" readonly.");
13063
13064                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13065                 prefixFlags |= PREFIX_READONLY;
13066
13067                 {
13068                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13069                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13070                     {
13071                         BADCODE("readonly. has to be followed by ldelema or call");
13072                     }
13073                 }
13074
13075                 assert(sz == 0);
13076                 goto PREFIX;
13077
13078             case CEE_TAILCALL:
13079                 JITDUMP(" tail.");
13080
13081                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13082                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13083
13084                 {
13085                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13086                     if (!impOpcodeIsCallOpcode(actualOpcode))
13087                     {
13088                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13089                     }
13090                 }
13091                 assert(sz == 0);
13092                 goto PREFIX;
13093
13094             case CEE_NEWOBJ:
13095
13096                 /* Since we will implicitly insert newObjThisPtr at the start of the
13097                    argument list, spill any GTF_ORDER_SIDEEFF */
13098                 impSpillSpecialSideEff();
13099
13100                 /* NEWOBJ does not respond to TAIL */
13101                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13102
13103                 /* NEWOBJ does not respond to CONSTRAINED */
13104                 prefixFlags &= ~PREFIX_CONSTRAINED;
13105
13106                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13107
13108                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13109                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13110                               &callInfo);
13111
13112                 if (compIsForInlining())
13113                 {
13114                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13115                     {
13116                         // Check to see if this call violates the boundary.
13117                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13118                         return;
13119                     }
13120                 }
13121
13122                 mflags = callInfo.methodFlags;
13123
13124                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13125                 {
13126                     BADCODE("newobj on static or abstract method");
13127                 }
13128
13129                 // Insert the security callout before any actual code is generated
13130                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13131
13132                 // There are three different cases for new
13133                 // Object size is variable (depends on arguments)
13134                 //      1) Object is an array (arrays treated specially by the EE)
13135                 //      2) Object is some other variable sized object (e.g. String)
13136                 //      3) Class Size can be determined beforehand (normal case)
13137                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13138                 // in the second case we call the constructor with a '0' this pointer
13139                 // In the third case we alloc the memory, then call the constuctor
13140
13141                 clsFlags = callInfo.classFlags;
13142                 if (clsFlags & CORINFO_FLG_ARRAY)
13143                 {
13144                     if (tiVerificationNeeded)
13145                     {
13146                         CORINFO_CLASS_HANDLE elemTypeHnd;
13147                         INDEBUG(CorInfoType corType =)
13148                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13149                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13150                         Verify(elemTypeHnd == nullptr ||
13151                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13152                                "newarr of byref-like objects");
13153                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13154                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13155                                       &callInfo DEBUGARG(info.compFullName));
13156                     }
13157                     // Arrays need to call the NEWOBJ helper.
13158                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13159
13160                     impImportNewObjArray(&resolvedToken, &callInfo);
13161                     if (compDonotInline())
13162                     {
13163                         return;
13164                     }
13165
13166                     callTyp = TYP_REF;
13167                     break;
13168                 }
13169                 // At present this can only be String
13170                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13171                 {
13172                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13173                     {
13174                         // The dummy argument does not exist in CoreRT
13175                         newObjThisPtr = nullptr;
13176                     }
13177                     else
13178                     {
13179                         // This is the case for variable-sized objects that are not
13180                         // arrays.  In this case, call the constructor with a null 'this'
13181                         // pointer
13182                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13183                     }
13184
13185                     /* Remember that this basic block contains 'new' of an object */
13186                     block->bbFlags |= BBF_HAS_NEWOBJ;
13187                     optMethodFlags |= OMF_HAS_NEWOBJ;
13188                 }
13189                 else
13190                 {
13191                     // This is the normal case where the size of the object is
13192                     // fixed.  Allocate the memory and call the constructor.
13193
13194                     // Note: We cannot add a peep to avoid use of temp here
13195                     // becase we don't have enough interference info to detect when
13196                     // sources and destination interfere, example: s = new S(ref);
13197
13198                     // TODO: We find the correct place to introduce a general
13199                     // reverse copy prop for struct return values from newobj or
13200                     // any function returning structs.
13201
13202                     /* get a temporary for the new object */
13203                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13204                     if (compDonotInline())
13205                     {
13206                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13207                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13208                         return;
13209                     }
13210
13211                     // In the value class case we only need clsHnd for size calcs.
13212                     //
13213                     // The lookup of the code pointer will be handled by CALL in this case
13214                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13215                     {
13216                         if (compIsForInlining())
13217                         {
13218                             // If value class has GC fields, inform the inliner. It may choose to
13219                             // bail out on the inline.
13220                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13221                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13222                             {
13223                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13224                                 if (compInlineResult->IsFailure())
13225                                 {
13226                                     return;
13227                                 }
13228
13229                                 // Do further notification in the case where the call site is rare;
13230                                 // some policies do not track the relative hotness of call sites for
13231                                 // "always" inline cases.
13232                                 if (impInlineInfo->iciBlock->isRunRarely())
13233                                 {
13234                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13235                                     if (compInlineResult->IsFailure())
13236                                     {
13237                                         return;
13238                                     }
13239                                 }
13240                             }
13241                         }
13242
13243                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13244                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13245
13246                         if (impIsPrimitive(jitTyp))
13247                         {
13248                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13249                         }
13250                         else
13251                         {
13252                             // The local variable itself is the allocated space.
13253                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13254                             // and potentially exploitable.
13255                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13256                         }
13257                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13258                         {
13259                             // Append a tree to zero-out the temp
13260                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13261
13262                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13263                                                            gtNewIconNode(0), // Value
13264                                                            size,             // Size
13265                                                            false,            // isVolatile
13266                                                            false);           // not copyBlock
13267                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13268                         }
13269
13270                         // Obtain the address of the temp
13271                         newObjThisPtr =
13272                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13273                     }
13274                     else
13275                     {
13276 #ifdef FEATURE_READYTORUN_COMPILER
13277                         if (opts.IsReadyToRun())
13278                         {
13279                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13280                             usingReadyToRunHelper = (op1 != nullptr);
13281                         }
13282
13283                         if (!usingReadyToRunHelper)
13284 #endif
13285                         {
13286                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13287                             if (op1 == nullptr)
13288                             { // compDonotInline()
13289                                 return;
13290                             }
13291
13292                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13293                             // and the newfast call with a single call to a dynamic R2R cell that will:
13294                             //      1) Load the context
13295                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13296                             //      stub
13297                             //      3) Allocate and return the new object
13298                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13299
13300                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13301                                                     resolvedToken.hClass, TYP_REF, op1);
13302                         }
13303
13304                         // Remember that this basic block contains 'new' of an object
13305                         block->bbFlags |= BBF_HAS_NEWOBJ;
13306                         optMethodFlags |= OMF_HAS_NEWOBJ;
13307
13308                         // Append the assignment to the temp/local. Dont need to spill
13309                         // at all as we are just calling an EE-Jit helper which can only
13310                         // cause an (async) OutOfMemoryException.
13311
13312                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13313                         // to a temp. Note that the pattern "temp = allocObj" is required
13314                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13315                         // without exhaustive walk over all expressions.
13316
13317                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13318                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13319
13320                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13321                     }
13322                 }
13323                 goto CALL;
13324
13325             case CEE_CALLI:
13326
13327                 /* CALLI does not respond to CONSTRAINED */
13328                 prefixFlags &= ~PREFIX_CONSTRAINED;
13329
13330                 if (compIsForInlining())
13331                 {
13332                     // CALLI doesn't have a method handle, so assume the worst.
13333                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13334                     {
13335                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13336                         return;
13337                     }
13338                 }
13339
13340             // fall through
13341
13342             case CEE_CALLVIRT:
13343             case CEE_CALL:
13344
13345                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13346                 // many other places.  We unfortunately embed that knowledge here.
13347                 if (opcode != CEE_CALLI)
13348                 {
13349                     _impResolveToken(CORINFO_TOKENKIND_Method);
13350
13351                     eeGetCallInfo(&resolvedToken,
13352                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13353                                   // this is how impImportCall invokes getCallInfo
13354                                   addVerifyFlag(
13355                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13356                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13357                                                                        : CORINFO_CALLINFO_NONE)),
13358                                   &callInfo);
13359                 }
13360                 else
13361                 {
13362                     // Suppress uninitialized use warning.
13363                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13364                     memset(&callInfo, 0, sizeof(callInfo));
13365
13366                     resolvedToken.token = getU4LittleEndian(codeAddr);
13367                 }
13368
13369             CALL: // memberRef should be set.
13370                 // newObjThisPtr should be set for CEE_NEWOBJ
13371
13372                 JITDUMP(" %08X", resolvedToken.token);
13373                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13374
13375                 bool newBBcreatedForTailcallStress;
13376
13377                 newBBcreatedForTailcallStress = false;
13378
13379                 if (compIsForInlining())
13380                 {
13381                     if (compDonotInline())
13382                     {
13383                         return;
13384                     }
13385                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13386                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13387                 }
13388                 else
13389                 {
13390                     if (compTailCallStress())
13391                     {
13392                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13393                         // Tail call stress only recognizes call+ret patterns and forces them to be
13394                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13395                         // doesn't import 'ret' opcode following the call into the basic block containing
13396                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13397                         // is already checking that there is an opcode following call and hence it is
13398                         // safe here to read next opcode without bounds check.
13399                         newBBcreatedForTailcallStress =
13400                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13401                                                              // make it jump to RET.
13402                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13403
13404                         if (newBBcreatedForTailcallStress &&
13405                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13406                             verCheckTailCallConstraint(opcode, &resolvedToken,
13407                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13408                                                        true) // Is it legal to do tailcall?
13409                             )
13410                         {
13411                             // Stress the tailcall.
13412                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13413                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13414                         }
13415                     }
13416                 }
13417
13418                 // This is split up to avoid goto flow warnings.
13419                 bool isRecursive;
13420                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13421
13422                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13423                 // hence will not be considered for implicit tail calling.
13424                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13425                 {
13426                     if (compIsForInlining())
13427                     {
13428 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13429                         // Are we inlining at an implicit tail call site? If so the we can flag
13430                         // implicit tail call sites in the inline body. These call sites
13431                         // often end up in non BBJ_RETURN blocks, so only flag them when
13432                         // we're able to handle shared returns.
13433                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13434                         {
13435                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13436                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13437                         }
13438 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13439                     }
13440                     else
13441                     {
13442                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13443                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13444                     }
13445                 }
13446
13447                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13448                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13449                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13450
13451                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13452                 {
13453                     // All calls and delegates need a security callout.
13454                     // For delegates, this is the call to the delegate constructor, not the access check on the
13455                     // LD(virt)FTN.
13456                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13457
13458 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13459
13460                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13461                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13462                 // ldtoken <filed token>, and we now check accessibility
13463                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13464                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13465                 {
13466                     if (prevOpcode != CEE_LDTOKEN)
13467                     {
13468                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13469                     }
13470                     else
13471                     {
13472                         assert(lastLoadToken != NULL);
13473                         // Now that we know we have a token, verify that it is accessible for loading
13474                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13475                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13476                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13477                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13478                     }
13479                 }
13480
13481 #endif // DevDiv 410397
13482                 }
13483
13484                 if (tiVerificationNeeded)
13485                 {
13486                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13487                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13488                                   &callInfo DEBUGARG(info.compFullName));
13489                 }
13490
13491                 // Insert delegate callout here.
13492                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13493                 {
13494 #ifdef DEBUG
13495                     // We should do this only if verification is enabled
13496                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13497                     if (tiVerificationNeeded)
13498                     {
13499                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13500                         // We should get here only for well formed delegate creation.
13501                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13502                     }
13503 #endif
13504                 }
13505
13506                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13507                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13508                 if (compDonotInline())
13509                 {
13510                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13511                     assert((callTyp == TYP_UNDEF) ||
13512                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13513                     return;
13514                 }
13515
13516                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13517                                                                        // have created a new BB after the "call"
13518                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13519                 {
13520                     assert(!compIsForInlining());
13521                     goto RET;
13522                 }
13523
13524                 break;
13525
13526             case CEE_LDFLD:
13527             case CEE_LDSFLD:
13528             case CEE_LDFLDA:
13529             case CEE_LDSFLDA:
13530             {
13531
13532                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13533                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13534
13535                 /* Get the CP_Fieldref index */
13536                 assertImp(sz == sizeof(unsigned));
13537
13538                 _impResolveToken(CORINFO_TOKENKIND_Field);
13539
13540                 JITDUMP(" %08X", resolvedToken.token);
13541
13542                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13543
13544                 GenTree*             obj     = nullptr;
13545                 typeInfo*            tiObj   = nullptr;
13546                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13547
13548                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13549                 {
13550                     tiObj         = &impStackTop().seTypeInfo;
13551                     StackEntry se = impPopStack();
13552                     objType       = se.seTypeInfo.GetClassHandle();
13553                     obj           = se.val;
13554
13555                     if (impIsThis(obj))
13556                     {
13557                         aflags |= CORINFO_ACCESS_THIS;
13558
13559                         // An optimization for Contextful classes:
13560                         // we unwrap the proxy when we have a 'this reference'
13561
13562                         if (info.compUnwrapContextful)
13563                         {
13564                             aflags |= CORINFO_ACCESS_UNWRAP;
13565                         }
13566                     }
13567                 }
13568
13569                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13570
13571                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13572                 // handle
13573                 CorInfoType ciType = fieldInfo.fieldType;
13574                 clsHnd             = fieldInfo.structType;
13575
13576                 lclTyp = JITtype2varType(ciType);
13577
13578 #ifdef _TARGET_AMD64
13579                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13580 #endif // _TARGET_AMD64
13581
13582                 if (compIsForInlining())
13583                 {
13584                     switch (fieldInfo.fieldAccessor)
13585                     {
13586                         case CORINFO_FIELD_INSTANCE_HELPER:
13587                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13588                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13589                         case CORINFO_FIELD_STATIC_TLS:
13590
13591                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13592                             return;
13593
13594                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13595                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13596                             /* We may be able to inline the field accessors in specific instantiations of generic
13597                              * methods */
13598                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13599                             return;
13600
13601                         default:
13602                             break;
13603                     }
13604
13605                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13606                         clsHnd)
13607                     {
13608                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13609                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13610                         {
13611                             // Loading a static valuetype field usually will cause a JitHelper to be called
13612                             // for the static base. This will bloat the code.
13613                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13614
13615                             if (compInlineResult->IsFailure())
13616                             {
13617                                 return;
13618                             }
13619                         }
13620                     }
13621                 }
13622
13623                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13624                 if (isLoadAddress)
13625                 {
13626                     tiRetVal.MakeByRef();
13627                 }
13628                 else
13629                 {
13630                     tiRetVal.NormaliseForStack();
13631                 }
13632
13633                 // Perform this check always to ensure that we get field access exceptions even with
13634                 // SkipVerification.
13635                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13636
13637                 if (tiVerificationNeeded)
13638                 {
13639                     // You can also pass the unboxed struct to  LDFLD
13640                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13641                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13642                     {
13643                         bAllowPlainValueTypeAsThis = TRUE;
13644                     }
13645
13646                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13647
13648                     // If we're doing this on a heap object or from a 'safe' byref
13649                     // then the result is a safe byref too
13650                     if (isLoadAddress) // load address
13651                     {
13652                         if (fieldInfo.fieldFlags &
13653                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13654                         {
13655                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13656                             {
13657                                 tiRetVal.SetIsPermanentHomeByRef();
13658                             }
13659                         }
13660                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13661                         {
13662                             // ldflda of byref is safe if done on a gc object or on  a
13663                             // safe byref
13664                             tiRetVal.SetIsPermanentHomeByRef();
13665                         }
13666                     }
13667                 }
13668                 else
13669                 {
13670                     // tiVerificationNeeded is false.
13671                     // Raise InvalidProgramException if static load accesses non-static field
13672                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13673                     {
13674                         BADCODE("static access on an instance field");
13675                     }
13676                 }
13677
13678                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13679                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13680                 {
13681                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13682                     {
13683                         obj = gtUnusedValNode(obj);
13684                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13685                     }
13686                     obj = nullptr;
13687                 }
13688
13689                 /* Preserve 'small' int types */
13690                 if (!varTypeIsSmall(lclTyp))
13691                 {
13692                     lclTyp = genActualType(lclTyp);
13693                 }
13694
13695                 bool usesHelper = false;
13696
13697                 switch (fieldInfo.fieldAccessor)
13698                 {
13699                     case CORINFO_FIELD_INSTANCE:
13700 #ifdef FEATURE_READYTORUN_COMPILER
13701                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13702 #endif
13703                     {
13704                         bool nullcheckNeeded = false;
13705
13706                         obj = impCheckForNullPointer(obj);
13707
13708                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13709                         {
13710                             nullcheckNeeded = true;
13711                         }
13712
13713                         // If the object is a struct, what we really want is
13714                         // for the field to operate on the address of the struct.
13715                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13716                         {
13717                             assert(opcode == CEE_LDFLD && objType != nullptr);
13718
13719                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13720                         }
13721
13722                         /* Create the data member node */
13723                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13724
13725 #ifdef FEATURE_READYTORUN_COMPILER
13726                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13727                         {
13728                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13729                         }
13730 #endif
13731
13732                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13733
13734                         if (fgAddrCouldBeNull(obj))
13735                         {
13736                             op1->gtFlags |= GTF_EXCEPT;
13737                         }
13738
13739                         // If gtFldObj is a BYREF then our target is a value class and
13740                         // it could point anywhere, example a boxed class static int
13741                         if (obj->gtType == TYP_BYREF)
13742                         {
13743                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13744                         }
13745
13746                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13747                         if (StructHasOverlappingFields(typeFlags))
13748                         {
13749                             op1->gtField.gtFldMayOverlap = true;
13750                         }
13751
13752                         // wrap it in a address of operator if necessary
13753                         if (isLoadAddress)
13754                         {
13755                             op1 = gtNewOperNode(GT_ADDR,
13756                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13757                         }
13758                         else
13759                         {
13760                             if (compIsForInlining() &&
13761                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13762                                                                                    impInlineInfo->inlArgInfo))
13763                             {
13764                                 impInlineInfo->thisDereferencedFirst = true;
13765                             }
13766                         }
13767                     }
13768                     break;
13769
13770                     case CORINFO_FIELD_STATIC_TLS:
13771 #ifdef _TARGET_X86_
13772                         // Legacy TLS access is implemented as intrinsic on x86 only
13773
13774                         /* Create the data member node */
13775                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13776                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13777
13778                         if (isLoadAddress)
13779                         {
13780                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13781                         }
13782                         break;
13783 #else
13784                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13785
13786                         __fallthrough;
13787 #endif
13788
13789                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13790                     case CORINFO_FIELD_INSTANCE_HELPER:
13791                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13792                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13793                                                clsHnd, nullptr);
13794                         usesHelper = true;
13795                         break;
13796
13797                     case CORINFO_FIELD_STATIC_ADDRESS:
13798                         // Replace static read-only fields with constant if possible
13799                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13800                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13801                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13802                         {
13803                             CorInfoInitClassResult initClassResult =
13804                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13805                                                             impTokenLookupContextHandle);
13806
13807                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13808                             {
13809                                 void** pFldAddr = nullptr;
13810                                 void*  fldAddr =
13811                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13812
13813                                 // We should always be able to access this static's address directly
13814                                 assert(pFldAddr == nullptr);
13815
13816                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13817                                 goto FIELD_DONE;
13818                             }
13819                         }
13820
13821                         __fallthrough;
13822
13823                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13824                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13825                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13826                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13827                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13828                                                          lclTyp);
13829                         break;
13830
13831                     case CORINFO_FIELD_INTRINSIC_ZERO:
13832                     {
13833                         assert(aflags & CORINFO_ACCESS_GET);
13834                         op1 = gtNewIconNode(0, lclTyp);
13835                         goto FIELD_DONE;
13836                     }
13837                     break;
13838
13839                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13840                     {
13841                         assert(aflags & CORINFO_ACCESS_GET);
13842
13843                         LPVOID         pValue;
13844                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13845                         op1                = gtNewStringLiteralNode(iat, pValue);
13846                         goto FIELD_DONE;
13847                     }
13848                     break;
13849
13850                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13851                     {
13852                         assert(aflags & CORINFO_ACCESS_GET);
13853 #if BIGENDIAN
13854                         op1 = gtNewIconNode(0, lclTyp);
13855 #else
13856                         op1                     = gtNewIconNode(1, lclTyp);
13857 #endif
13858                         goto FIELD_DONE;
13859                     }
13860                     break;
13861
13862                     default:
13863                         assert(!"Unexpected fieldAccessor");
13864                 }
13865
13866                 if (!isLoadAddress)
13867                 {
13868
13869                     if (prefixFlags & PREFIX_VOLATILE)
13870                     {
13871                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13872                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13873
13874                         if (!usesHelper)
13875                         {
13876                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13877                                    (op1->OperGet() == GT_OBJ));
13878                             op1->gtFlags |= GTF_IND_VOLATILE;
13879                         }
13880                     }
13881
13882                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13883                     {
13884                         if (!usesHelper)
13885                         {
13886                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13887                                    (op1->OperGet() == GT_OBJ));
13888                             op1->gtFlags |= GTF_IND_UNALIGNED;
13889                         }
13890                     }
13891                 }
13892
13893                 /* Check if the class needs explicit initialization */
13894
13895                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13896                 {
13897                     GenTree* helperNode = impInitClass(&resolvedToken);
13898                     if (compDonotInline())
13899                     {
13900                         return;
13901                     }
13902                     if (helperNode != nullptr)
13903                     {
13904                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13905                     }
13906                 }
13907
13908             FIELD_DONE:
13909                 impPushOnStack(op1, tiRetVal);
13910             }
13911             break;
13912
13913             case CEE_STFLD:
13914             case CEE_STSFLD:
13915             {
13916
13917                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13918
13919                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13920
13921                 /* Get the CP_Fieldref index */
13922
13923                 assertImp(sz == sizeof(unsigned));
13924
13925                 _impResolveToken(CORINFO_TOKENKIND_Field);
13926
13927                 JITDUMP(" %08X", resolvedToken.token);
13928
13929                 int       aflags = CORINFO_ACCESS_SET;
13930                 GenTree*  obj    = nullptr;
13931                 typeInfo* tiObj  = nullptr;
13932                 typeInfo  tiVal;
13933
13934                 /* Pull the value from the stack */
13935                 StackEntry se = impPopStack();
13936                 op2           = se.val;
13937                 tiVal         = se.seTypeInfo;
13938                 clsHnd        = tiVal.GetClassHandle();
13939
13940                 if (opcode == CEE_STFLD)
13941                 {
13942                     tiObj = &impStackTop().seTypeInfo;
13943                     obj   = impPopStack().val;
13944
13945                     if (impIsThis(obj))
13946                     {
13947                         aflags |= CORINFO_ACCESS_THIS;
13948
13949                         // An optimization for Contextful classes:
13950                         // we unwrap the proxy when we have a 'this reference'
13951
13952                         if (info.compUnwrapContextful)
13953                         {
13954                             aflags |= CORINFO_ACCESS_UNWRAP;
13955                         }
13956                     }
13957                 }
13958
13959                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13960
13961                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13962                 // handle
13963                 CorInfoType ciType = fieldInfo.fieldType;
13964                 fieldClsHnd        = fieldInfo.structType;
13965
13966                 lclTyp = JITtype2varType(ciType);
13967
13968                 if (compIsForInlining())
13969                 {
13970                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13971                      * per-inst static? */
13972
13973                     switch (fieldInfo.fieldAccessor)
13974                     {
13975                         case CORINFO_FIELD_INSTANCE_HELPER:
13976                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13977                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13978                         case CORINFO_FIELD_STATIC_TLS:
13979
13980                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13981                             return;
13982
13983                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13984                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13985                             /* We may be able to inline the field accessors in specific instantiations of generic
13986                              * methods */
13987                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13988                             return;
13989
13990                         default:
13991                             break;
13992                     }
13993                 }
13994
13995                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13996
13997                 if (tiVerificationNeeded)
13998                 {
13999                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
14000                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
14001                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14002                 }
14003                 else
14004                 {
14005                     // tiVerificationNeed is false.
14006                     // Raise InvalidProgramException if static store accesses non-static field
14007                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14008                     {
14009                         BADCODE("static access on an instance field");
14010                     }
14011                 }
14012
14013                 // We are using stfld on a static field.
14014                 // We allow it, but need to eval any side-effects for obj
14015                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14016                 {
14017                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14018                     {
14019                         obj = gtUnusedValNode(obj);
14020                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14021                     }
14022                     obj = nullptr;
14023                 }
14024
14025                 /* Preserve 'small' int types */
14026                 if (!varTypeIsSmall(lclTyp))
14027                 {
14028                     lclTyp = genActualType(lclTyp);
14029                 }
14030
14031                 switch (fieldInfo.fieldAccessor)
14032                 {
14033                     case CORINFO_FIELD_INSTANCE:
14034 #ifdef FEATURE_READYTORUN_COMPILER
14035                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14036 #endif
14037                     {
14038                         obj = impCheckForNullPointer(obj);
14039
14040                         /* Create the data member node */
14041                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14042                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14043                         if (StructHasOverlappingFields(typeFlags))
14044                         {
14045                             op1->gtField.gtFldMayOverlap = true;
14046                         }
14047
14048 #ifdef FEATURE_READYTORUN_COMPILER
14049                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14050                         {
14051                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14052                         }
14053 #endif
14054
14055                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14056
14057                         if (fgAddrCouldBeNull(obj))
14058                         {
14059                             op1->gtFlags |= GTF_EXCEPT;
14060                         }
14061
14062                         // If gtFldObj is a BYREF then our target is a value class and
14063                         // it could point anywhere, example a boxed class static int
14064                         if (obj->gtType == TYP_BYREF)
14065                         {
14066                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14067                         }
14068
14069                         if (compIsForInlining() &&
14070                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14071                         {
14072                             impInlineInfo->thisDereferencedFirst = true;
14073                         }
14074                     }
14075                     break;
14076
14077                     case CORINFO_FIELD_STATIC_TLS:
14078 #ifdef _TARGET_X86_
14079                         // Legacy TLS access is implemented as intrinsic on x86 only
14080
14081                         /* Create the data member node */
14082                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14083                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14084
14085                         break;
14086 #else
14087                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14088
14089                         __fallthrough;
14090 #endif
14091
14092                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14093                     case CORINFO_FIELD_INSTANCE_HELPER:
14094                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14095                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14096                                                clsHnd, op2);
14097                         goto SPILL_APPEND;
14098
14099                     case CORINFO_FIELD_STATIC_ADDRESS:
14100                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14101                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14102                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14103                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14104                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14105                                                          lclTyp);
14106                         break;
14107
14108                     default:
14109                         assert(!"Unexpected fieldAccessor");
14110                 }
14111
14112                 // Create the member assignment, unless we have a struct.
14113                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14114                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14115
14116                 if (!deferStructAssign)
14117                 {
14118                     if (prefixFlags & PREFIX_VOLATILE)
14119                     {
14120                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14121                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14122                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14123                         op1->gtFlags |= GTF_IND_VOLATILE;
14124                     }
14125                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14126                     {
14127                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14128                         op1->gtFlags |= GTF_IND_UNALIGNED;
14129                     }
14130
14131                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14132                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14133                        importation and reads from the union as if it were a long during code generation. Though this
14134                        can potentially read garbage, one can get lucky to have this working correctly.
14135
14136                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14137                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14138                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14139                        it works correctly always.
14140
14141                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14142                        for V4.0.
14143                     */
14144                     CLANG_FORMAT_COMMENT_ANCHOR;
14145
14146 #ifndef _TARGET_64BIT_
14147                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14148                     // generated for ARM as well as x86, so the following IR will be accepted:
14149                     //     *  STMT      void
14150                     //         |  /--*  CNS_INT   int    2
14151                     //         \--*  ASG       long
14152                     //            \--*  CLS_VAR   long
14153
14154                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14155                         varTypeIsLong(op1->TypeGet()))
14156                     {
14157                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14158                     }
14159 #endif
14160
14161 #ifdef _TARGET_64BIT_
14162                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14163                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14164                     {
14165                         op2->gtType = TYP_I_IMPL;
14166                     }
14167                     else
14168                     {
14169                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14170                         //
14171                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14172                         {
14173                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
14174                         }
14175                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14176                         //
14177                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14178                         {
14179                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
14180                         }
14181                     }
14182 #endif
14183
14184 #if !FEATURE_X87_DOUBLES
14185                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14186                     // We insert a cast to the dest 'op1' type
14187                     //
14188                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14189                         varTypeIsFloating(op2->gtType))
14190                     {
14191                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14192                     }
14193 #endif // !FEATURE_X87_DOUBLES
14194
14195                     op1 = gtNewAssignNode(op1, op2);
14196
14197                     /* Mark the expression as containing an assignment */
14198
14199                     op1->gtFlags |= GTF_ASG;
14200                 }
14201
14202                 /* Check if the class needs explicit initialization */
14203
14204                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14205                 {
14206                     GenTree* helperNode = impInitClass(&resolvedToken);
14207                     if (compDonotInline())
14208                     {
14209                         return;
14210                     }
14211                     if (helperNode != nullptr)
14212                     {
14213                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14214                     }
14215                 }
14216
14217                 /* stfld can interfere with value classes (consider the sequence
14218                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14219                    spill all value class references from the stack. */
14220
14221                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14222                 {
14223                     assert(tiObj);
14224
14225                     if (impIsValueType(tiObj))
14226                     {
14227                         impSpillEvalStack();
14228                     }
14229                     else
14230                     {
14231                         impSpillValueClasses();
14232                     }
14233                 }
14234
14235                 /* Spill any refs to the same member from the stack */
14236
14237                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14238
14239                 /* stsfld also interferes with indirect accesses (for aliased
14240                    statics) and calls. But don't need to spill other statics
14241                    as we have explicitly spilled this particular static field. */
14242
14243                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14244
14245                 if (deferStructAssign)
14246                 {
14247                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14248                 }
14249             }
14250                 goto APPEND;
14251
14252             case CEE_NEWARR:
14253             {
14254
14255                 /* Get the class type index operand */
14256
14257                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14258
14259                 JITDUMP(" %08X", resolvedToken.token);
14260
14261                 if (!opts.IsReadyToRun())
14262                 {
14263                     // Need to restore array classes before creating array objects on the heap
14264                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14265                     if (op1 == nullptr)
14266                     { // compDonotInline()
14267                         return;
14268                     }
14269                 }
14270
14271                 if (tiVerificationNeeded)
14272                 {
14273                     // As per ECMA 'numElems' specified can be either int32 or native int.
14274                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14275
14276                     CORINFO_CLASS_HANDLE elemTypeHnd;
14277                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14278                     Verify(elemTypeHnd == nullptr ||
14279                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14280                            "array of byref-like type");
14281                 }
14282
14283                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14284
14285                 accessAllowedResult =
14286                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14287                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14288
14289                 /* Form the arglist: array class handle, size */
14290                 op2 = impPopStack().val;
14291                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14292
14293 #ifdef FEATURE_READYTORUN_COMPILER
14294                 if (opts.IsReadyToRun())
14295                 {
14296                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14297                                                     gtNewArgList(op2));
14298                     usingReadyToRunHelper = (op1 != nullptr);
14299
14300                     if (!usingReadyToRunHelper)
14301                     {
14302                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14303                         // and the newarr call with a single call to a dynamic R2R cell that will:
14304                         //      1) Load the context
14305                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14306                         //      3) Allocate the new array
14307                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14308
14309                         // Need to restore array classes before creating array objects on the heap
14310                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14311                         if (op1 == nullptr)
14312                         { // compDonotInline()
14313                             return;
14314                         }
14315                     }
14316                 }
14317
14318                 if (!usingReadyToRunHelper)
14319 #endif
14320                 {
14321                     args = gtNewArgList(op1, op2);
14322
14323                     /* Create a call to 'new' */
14324
14325                     // Note that this only works for shared generic code because the same helper is used for all
14326                     // reference array types
14327                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14328                 }
14329
14330                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14331
14332                 /* Remember that this basic block contains 'new' of an sd array */
14333
14334                 block->bbFlags |= BBF_HAS_NEWARRAY;
14335                 optMethodFlags |= OMF_HAS_NEWARRAY;
14336
14337                 /* Push the result of the call on the stack */
14338
14339                 impPushOnStack(op1, tiRetVal);
14340
14341                 callTyp = TYP_REF;
14342             }
14343             break;
14344
14345             case CEE_LOCALLOC:
14346                 if (tiVerificationNeeded)
14347                 {
14348                     Verify(false, "bad opcode");
14349                 }
14350
14351                 // We don't allow locallocs inside handlers
14352                 if (block->hasHndIndex())
14353                 {
14354                     BADCODE("Localloc can't be inside handler");
14355                 }
14356
14357                 setNeedsGSSecurityCookie();
14358
14359                 // Get the size to allocate
14360
14361                 op2 = impPopStack().val;
14362                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14363
14364                 if (verCurrentState.esStackDepth != 0)
14365                 {
14366                     BADCODE("Localloc can only be used when the stack is empty");
14367                 }
14368
14369                 // If the localloc is not in a loop and its size is a small constant,
14370                 // create a new local var of TYP_BLK and return its address.
14371                 {
14372                     bool convertedToLocal = false;
14373
14374                     // Need to aggressively fold here, as even fixed-size locallocs
14375                     // will have casts in the way.
14376                     op2 = gtFoldExpr(op2);
14377
14378                     if (op2->IsIntegralConst())
14379                     {
14380                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14381
14382                         if (allocSize == 0)
14383                         {
14384                             // Result is nullptr
14385                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14386                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14387                             convertedToLocal = true;
14388                         }
14389                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14390                         {
14391                             // Get the size threshold for local conversion
14392                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14393
14394 #ifdef DEBUG
14395                             // Optionally allow this to be modified
14396                             maxSize = JitConfig.JitStackAllocToLocalSize();
14397 #endif // DEBUG
14398
14399                             if (allocSize <= maxSize)
14400                             {
14401                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14402                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14403                                         stackallocAsLocal);
14404                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14405                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14406                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14407                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14408                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14409                                 convertedToLocal         = true;
14410                                 compGSReorderStackLayout = true;
14411                             }
14412                         }
14413                     }
14414
14415                     if (!convertedToLocal)
14416                     {
14417                         // Bail out if inlining and the localloc was not converted.
14418                         //
14419                         // Note we might consider allowing the inline, if the call
14420                         // site is not in a loop.
14421                         if (compIsForInlining())
14422                         {
14423                             InlineObservation obs = op2->IsIntegralConst()
14424                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14425                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14426                             compInlineResult->NoteFatal(obs);
14427                             return;
14428                         }
14429
14430                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14431                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14432                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14433
14434                         /* The FP register may not be back to the original value at the end
14435                            of the method, even if the frame size is 0, as localloc may
14436                            have modified it. So we will HAVE to reset it */
14437                         compLocallocUsed = true;
14438                     }
14439                     else
14440                     {
14441                         compLocallocOptimized = true;
14442                     }
14443                 }
14444
14445                 impPushOnStack(op1, tiRetVal);
14446                 break;
14447
14448             case CEE_ISINST:
14449             {
14450                 /* Get the type token */
14451                 assertImp(sz == sizeof(unsigned));
14452
14453                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14454
14455                 JITDUMP(" %08X", resolvedToken.token);
14456
14457                 if (!opts.IsReadyToRun())
14458                 {
14459                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14460                     if (op2 == nullptr)
14461                     { // compDonotInline()
14462                         return;
14463                     }
14464                 }
14465
14466                 if (tiVerificationNeeded)
14467                 {
14468                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14469                     // Even if this is a value class, we know it is boxed.
14470                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14471                 }
14472                 accessAllowedResult =
14473                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14474                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14475
14476                 op1 = impPopStack().val;
14477
14478                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14479
14480                 if (optTree != nullptr)
14481                 {
14482                     impPushOnStack(optTree, tiRetVal);
14483                 }
14484                 else
14485                 {
14486
14487 #ifdef FEATURE_READYTORUN_COMPILER
14488                     if (opts.IsReadyToRun())
14489                     {
14490                         GenTreeCall* opLookup =
14491                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14492                                                       gtNewArgList(op1));
14493                         usingReadyToRunHelper = (opLookup != nullptr);
14494                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14495
14496                         if (!usingReadyToRunHelper)
14497                         {
14498                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14499                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14500                             //      1) Load the context
14501                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14502                             //      stub
14503                             //      3) Perform the 'is instance' check on the input object
14504                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14505
14506                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14507                             if (op2 == nullptr)
14508                             { // compDonotInline()
14509                                 return;
14510                             }
14511                         }
14512                     }
14513
14514                     if (!usingReadyToRunHelper)
14515 #endif
14516                     {
14517                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14518                     }
14519                     if (compDonotInline())
14520                     {
14521                         return;
14522                     }
14523
14524                     impPushOnStack(op1, tiRetVal);
14525                 }
14526                 break;
14527             }
14528
14529             case CEE_REFANYVAL:
14530
14531                 // get the class handle and make a ICON node out of it
14532
14533                 _impResolveToken(CORINFO_TOKENKIND_Class);
14534
14535                 JITDUMP(" %08X", resolvedToken.token);
14536
14537                 op2 = impTokenToHandle(&resolvedToken);
14538                 if (op2 == nullptr)
14539                 { // compDonotInline()
14540                     return;
14541                 }
14542
14543                 if (tiVerificationNeeded)
14544                 {
14545                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14546                            "need refany");
14547                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14548                 }
14549
14550                 op1 = impPopStack().val;
14551                 // make certain it is normalized;
14552                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14553
14554                 // Call helper GETREFANY(classHandle, op1);
14555                 args = gtNewArgList(op2, op1);
14556                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14557
14558                 impPushOnStack(op1, tiRetVal);
14559                 break;
14560
14561             case CEE_REFANYTYPE:
14562
14563                 if (tiVerificationNeeded)
14564                 {
14565                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14566                            "need refany");
14567                 }
14568
14569                 op1 = impPopStack().val;
14570
14571                 // make certain it is normalized;
14572                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14573
14574                 if (op1->gtOper == GT_OBJ)
14575                 {
14576                     // Get the address of the refany
14577                     op1 = op1->gtOp.gtOp1;
14578
14579                     // Fetch the type from the correct slot
14580                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14581                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14582                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14583                 }
14584                 else
14585                 {
14586                     assertImp(op1->gtOper == GT_MKREFANY);
14587
14588                     // The pointer may have side-effects
14589                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14590                     {
14591                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14592 #ifdef DEBUG
14593                         impNoteLastILoffs();
14594 #endif
14595                     }
14596
14597                     // We already have the class handle
14598                     op1 = op1->gtOp.gtOp2;
14599                 }
14600
14601                 // convert native TypeHandle to RuntimeTypeHandle
14602                 {
14603                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14604
14605                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14606
14607                     // The handle struct is returned in register
14608                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14609
14610                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14611                 }
14612
14613                 impPushOnStack(op1, tiRetVal);
14614                 break;
14615
14616             case CEE_LDTOKEN:
14617             {
14618                 /* Get the Class index */
14619                 assertImp(sz == sizeof(unsigned));
14620                 lastLoadToken = codeAddr;
14621                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14622
14623                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14624
14625                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14626                 if (op1 == nullptr)
14627                 { // compDonotInline()
14628                     return;
14629                 }
14630
14631                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14632                 assert(resolvedToken.hClass != nullptr);
14633
14634                 if (resolvedToken.hMethod != nullptr)
14635                 {
14636                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14637                 }
14638                 else if (resolvedToken.hField != nullptr)
14639                 {
14640                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14641                 }
14642
14643                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14644
14645                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14646
14647                 // The handle struct is returned in register
14648                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14649
14650                 tiRetVal = verMakeTypeInfo(tokenType);
14651                 impPushOnStack(op1, tiRetVal);
14652             }
14653             break;
14654
14655             case CEE_UNBOX:
14656             case CEE_UNBOX_ANY:
14657             {
14658                 /* Get the Class index */
14659                 assertImp(sz == sizeof(unsigned));
14660
14661                 _impResolveToken(CORINFO_TOKENKIND_Class);
14662
14663                 JITDUMP(" %08X", resolvedToken.token);
14664
14665                 BOOL runtimeLookup;
14666                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14667                 if (op2 == nullptr)
14668                 {
14669                     assert(compDonotInline());
14670                     return;
14671                 }
14672
14673                 // Run this always so we can get access exceptions even with SkipVerification.
14674                 accessAllowedResult =
14675                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14676                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14677
14678                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14679                 {
14680                     if (tiVerificationNeeded)
14681                     {
14682                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14683                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14684                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14685                         tiRetVal.NormaliseForStack();
14686                     }
14687                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14688                     op1 = impPopStack().val;
14689                     goto CASTCLASS;
14690                 }
14691
14692                 /* Pop the object and create the unbox helper call */
14693                 /* You might think that for UNBOX_ANY we need to push a different */
14694                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14695                 /* for the intermediate pointer which we then transfer onto the OBJ */
14696                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14697                 if (tiVerificationNeeded)
14698                 {
14699                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14700                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14701
14702                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14703                     Verify(tiRetVal.IsValueClass(), "not value class");
14704                     tiRetVal.MakeByRef();
14705
14706                     // We always come from an objref, so this is safe byref
14707                     tiRetVal.SetIsPermanentHomeByRef();
14708                     tiRetVal.SetIsReadonlyByRef();
14709                 }
14710
14711                 op1 = impPopStack().val;
14712                 assertImp(op1->gtType == TYP_REF);
14713
14714                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14715                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14716
14717                 // Check legality and profitability of inline expansion for unboxing.
14718                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14719                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14720
14721                 if (canExpandInline && shouldExpandInline)
14722                 {
14723                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14724                     // we are doing normal unboxing
14725                     // inline the common case of the unbox helper
14726                     // UNBOX(exp) morphs into
14727                     // clone = pop(exp);
14728                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14729                     // push(clone + TARGET_POINTER_SIZE)
14730                     //
14731                     GenTree* cloneOperand;
14732                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14733                                        nullptr DEBUGARG("inline UNBOX clone1"));
14734                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14735
14736                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14737
14738                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14739                                        nullptr DEBUGARG("inline UNBOX clone2"));
14740                     op2 = impTokenToHandle(&resolvedToken);
14741                     if (op2 == nullptr)
14742                     { // compDonotInline()
14743                         return;
14744                     }
14745                     args = gtNewArgList(op2, op1);
14746                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14747
14748                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14749                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14750                     condBox->gtFlags |= GTF_RELOP_QMARK;
14751
14752                     // QMARK nodes cannot reside on the evaluation stack. Because there
14753                     // may be other trees on the evaluation stack that side-effect the
14754                     // sources of the UNBOX operation we must spill the stack.
14755
14756                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14757
14758                     // Create the address-expression to reference past the object header
14759                     // to the beginning of the value-type. Today this means adjusting
14760                     // past the base of the objects vtable field which is pointer sized.
14761
14762                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14763                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14764                 }
14765                 else
14766                 {
14767                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14768                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14769
14770                     // Don't optimize, just call the helper and be done with it
14771                     args = gtNewArgList(op2, op1);
14772                     op1 =
14773                         gtNewHelperCallNode(helper,
14774                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14775                 }
14776
14777                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14778                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14779                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14780                        );
14781
14782                 /*
14783                   ----------------------------------------------------------------------
14784                   | \ helper  |                         |                              |
14785                   |   \       |                         |                              |
14786                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14787                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14788                   | opcode  \ |                         |                              |
14789                   |---------------------------------------------------------------------
14790                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14791                   |           |                         | push the BYREF to this local |
14792                   |---------------------------------------------------------------------
14793                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14794                   |           | the BYREF               | For Linux when the           |
14795                   |           |                         |  struct is returned in two   |
14796                   |           |                         |  registers create a temp     |
14797                   |           |                         |  which address is passed to  |
14798                   |           |                         |  the unbox_nullable helper.  |
14799                   |---------------------------------------------------------------------
14800                 */
14801
14802                 if (opcode == CEE_UNBOX)
14803                 {
14804                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14805                     {
14806                         // Unbox nullable helper returns a struct type.
14807                         // We need to spill it to a temp so than can take the address of it.
14808                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14809                         // further along and potetially be exploitable.
14810
14811                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14812                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14813
14814                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14815                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14816                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14817
14818                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14819                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14820                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14821                     }
14822
14823                     assert(op1->gtType == TYP_BYREF);
14824                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14825                 }
14826                 else
14827                 {
14828                     assert(opcode == CEE_UNBOX_ANY);
14829
14830                     if (helper == CORINFO_HELP_UNBOX)
14831                     {
14832                         // Normal unbox helper returns a TYP_BYREF.
14833                         impPushOnStack(op1, tiRetVal);
14834                         oper = GT_OBJ;
14835                         goto OBJ;
14836                     }
14837
14838                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14839
14840 #if FEATURE_MULTIREG_RET
14841
14842                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14843                     {
14844                         // Unbox nullable helper returns a TYP_STRUCT.
14845                         // For the multi-reg case we need to spill it to a temp so that
14846                         // we can pass the address to the unbox_nullable jit helper.
14847
14848                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14849                         lvaTable[tmp].lvIsMultiRegArg = true;
14850                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14851
14852                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14853                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14854                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14855
14856                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14857                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14858                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14859
14860                         // In this case the return value of the unbox helper is TYP_BYREF.
14861                         // Make sure the right type is placed on the operand type stack.
14862                         impPushOnStack(op1, tiRetVal);
14863
14864                         // Load the struct.
14865                         oper = GT_OBJ;
14866
14867                         assert(op1->gtType == TYP_BYREF);
14868                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14869
14870                         goto OBJ;
14871                     }
14872                     else
14873
14874 #endif // !FEATURE_MULTIREG_RET
14875
14876                     {
14877                         // If non register passable struct we have it materialized in the RetBuf.
14878                         assert(op1->gtType == TYP_STRUCT);
14879                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14880                         assert(tiRetVal.IsValueClass());
14881                     }
14882                 }
14883
14884                 impPushOnStack(op1, tiRetVal);
14885             }
14886             break;
14887
14888             case CEE_BOX:
14889             {
14890                 /* Get the Class index */
14891                 assertImp(sz == sizeof(unsigned));
14892
14893                 _impResolveToken(CORINFO_TOKENKIND_Box);
14894
14895                 JITDUMP(" %08X", resolvedToken.token);
14896
14897                 if (tiVerificationNeeded)
14898                 {
14899                     typeInfo tiActual = impStackTop().seTypeInfo;
14900                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14901
14902                     Verify(verIsBoxable(tiBox), "boxable type expected");
14903
14904                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14905                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14906                            "boxed type has unsatisfied class constraints");
14907
14908                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14909
14910                     // Observation: the following code introduces a boxed value class on the stack, but,
14911                     // according to the ECMA spec, one would simply expect: tiRetVal =
14912                     // typeInfo(TI_REF,impGetObjectClass());
14913
14914                     // Push the result back on the stack,
14915                     // even if clsHnd is a value class we want the TI_REF
14916                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14917                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14918                 }
14919
14920                 accessAllowedResult =
14921                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14922                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14923
14924                 // Note BOX can be used on things that are not value classes, in which
14925                 // case we get a NOP.  However the verifier's view of the type on the
14926                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14927                 if (!eeIsValueClass(resolvedToken.hClass))
14928                 {
14929                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14930                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14931                     break;
14932                 }
14933
14934                 // Look ahead for unbox.any
14935                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14936                 {
14937                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14938
14939                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14940
14941                     // See if the resolved tokens describe types that are equal.
14942                     const TypeCompareState compare =
14943                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14944
14945                     // If so, box/unbox.any is a nop.
14946                     if (compare == TypeCompareState::Must)
14947                     {
14948                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14949                         // Skip the next unbox.any instruction
14950                         sz += sizeof(mdToken) + 1;
14951                         break;
14952                     }
14953                 }
14954
14955                 impImportAndPushBox(&resolvedToken);
14956                 if (compDonotInline())
14957                 {
14958                     return;
14959                 }
14960             }
14961             break;
14962
14963             case CEE_SIZEOF:
14964
14965                 /* Get the Class index */
14966                 assertImp(sz == sizeof(unsigned));
14967
14968                 _impResolveToken(CORINFO_TOKENKIND_Class);
14969
14970                 JITDUMP(" %08X", resolvedToken.token);
14971
14972                 if (tiVerificationNeeded)
14973                 {
14974                     tiRetVal = typeInfo(TI_INT);
14975                 }
14976
14977                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14978                 impPushOnStack(op1, tiRetVal);
14979                 break;
14980
14981             case CEE_CASTCLASS:
14982
14983                 /* Get the Class index */
14984
14985                 assertImp(sz == sizeof(unsigned));
14986
14987                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14988
14989                 JITDUMP(" %08X", resolvedToken.token);
14990
14991                 if (!opts.IsReadyToRun())
14992                 {
14993                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14994                     if (op2 == nullptr)
14995                     { // compDonotInline()
14996                         return;
14997                     }
14998                 }
14999
15000                 if (tiVerificationNeeded)
15001                 {
15002                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
15003                     // box it
15004                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15005                 }
15006
15007                 accessAllowedResult =
15008                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15009                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15010
15011                 op1 = impPopStack().val;
15012
15013             /* Pop the address and create the 'checked cast' helper call */
15014
15015             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15016             // and op2 to contain code that creates the type handle corresponding to typeRef
15017             CASTCLASS:
15018             {
15019                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15020
15021                 if (optTree != nullptr)
15022                 {
15023                     impPushOnStack(optTree, tiRetVal);
15024                 }
15025                 else
15026                 {
15027
15028 #ifdef FEATURE_READYTORUN_COMPILER
15029                     if (opts.IsReadyToRun())
15030                     {
15031                         GenTreeCall* opLookup =
15032                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15033                                                       gtNewArgList(op1));
15034                         usingReadyToRunHelper = (opLookup != nullptr);
15035                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15036
15037                         if (!usingReadyToRunHelper)
15038                         {
15039                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15040                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15041                             //      1) Load the context
15042                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15043                             //      stub
15044                             //      3) Check the object on the stack for the type-cast
15045                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15046
15047                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15048                             if (op2 == nullptr)
15049                             { // compDonotInline()
15050                                 return;
15051                             }
15052                         }
15053                     }
15054
15055                     if (!usingReadyToRunHelper)
15056 #endif
15057                     {
15058                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15059                     }
15060                     if (compDonotInline())
15061                     {
15062                         return;
15063                     }
15064
15065                     /* Push the result back on the stack */
15066                     impPushOnStack(op1, tiRetVal);
15067                 }
15068             }
15069             break;
15070
15071             case CEE_THROW:
15072
15073                 if (compIsForInlining())
15074                 {
15075                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15076                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15077                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15078
15079                     /* Do we have just the exception on the stack ?*/
15080
15081                     if (verCurrentState.esStackDepth != 1)
15082                     {
15083                         /* if not, just don't inline the method */
15084
15085                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15086                         return;
15087                     }
15088                 }
15089
15090                 if (tiVerificationNeeded)
15091                 {
15092                     tiRetVal = impStackTop().seTypeInfo;
15093                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15094                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15095                     {
15096                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15097                     }
15098                 }
15099
15100                 block->bbSetRunRarely(); // any block with a throw is rare
15101                 /* Pop the exception object and create the 'throw' helper call */
15102
15103                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15104
15105             EVAL_APPEND:
15106                 if (verCurrentState.esStackDepth > 0)
15107                 {
15108                     impEvalSideEffects();
15109                 }
15110
15111                 assert(verCurrentState.esStackDepth == 0);
15112
15113                 goto APPEND;
15114
15115             case CEE_RETHROW:
15116
15117                 assert(!compIsForInlining());
15118
15119                 if (info.compXcptnsCount == 0)
15120                 {
15121                     BADCODE("rethrow outside catch");
15122                 }
15123
15124                 if (tiVerificationNeeded)
15125                 {
15126                     Verify(block->hasHndIndex(), "rethrow outside catch");
15127                     if (block->hasHndIndex())
15128                     {
15129                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15130                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15131                         if (HBtab->HasFilter())
15132                         {
15133                             // we better be in the handler clause part, not the filter part
15134                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15135                                    "rethrow in filter");
15136                         }
15137                     }
15138                 }
15139
15140                 /* Create the 'rethrow' helper call */
15141
15142                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15143
15144                 goto EVAL_APPEND;
15145
15146             case CEE_INITOBJ:
15147
15148                 assertImp(sz == sizeof(unsigned));
15149
15150                 _impResolveToken(CORINFO_TOKENKIND_Class);
15151
15152                 JITDUMP(" %08X", resolvedToken.token);
15153
15154                 if (tiVerificationNeeded)
15155                 {
15156                     typeInfo tiTo    = impStackTop().seTypeInfo;
15157                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15158
15159                     Verify(tiTo.IsByRef(), "byref expected");
15160                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15161
15162                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15163                            "type operand incompatible with type of address");
15164                 }
15165
15166                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15167                 op2  = gtNewIconNode(0);                                     // Value
15168                 op1  = impPopStack().val;                                    // Dest
15169                 op1  = gtNewBlockVal(op1, size);
15170                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15171                 goto SPILL_APPEND;
15172
15173             case CEE_INITBLK:
15174
15175                 if (tiVerificationNeeded)
15176                 {
15177                     Verify(false, "bad opcode");
15178                 }
15179
15180                 op3 = impPopStack().val; // Size
15181                 op2 = impPopStack().val; // Value
15182                 op1 = impPopStack().val; // Dest
15183
15184                 if (op3->IsCnsIntOrI())
15185                 {
15186                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15187                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15188                 }
15189                 else
15190                 {
15191                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15192                     size = 0;
15193                 }
15194                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15195
15196                 goto SPILL_APPEND;
15197
15198             case CEE_CPBLK:
15199
15200                 if (tiVerificationNeeded)
15201                 {
15202                     Verify(false, "bad opcode");
15203                 }
15204                 op3 = impPopStack().val; // Size
15205                 op2 = impPopStack().val; // Src
15206                 op1 = impPopStack().val; // Dest
15207
15208                 if (op3->IsCnsIntOrI())
15209                 {
15210                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15211                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15212                 }
15213                 else
15214                 {
15215                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15216                     size = 0;
15217                 }
15218                 if (op2->OperGet() == GT_ADDR)
15219                 {
15220                     op2 = op2->gtOp.gtOp1;
15221                 }
15222                 else
15223                 {
15224                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15225                 }
15226
15227                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15228                 goto SPILL_APPEND;
15229
15230             case CEE_CPOBJ:
15231
15232                 assertImp(sz == sizeof(unsigned));
15233
15234                 _impResolveToken(CORINFO_TOKENKIND_Class);
15235
15236                 JITDUMP(" %08X", resolvedToken.token);
15237
15238                 if (tiVerificationNeeded)
15239                 {
15240                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15241                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15242                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15243
15244                     Verify(tiFrom.IsByRef(), "expected byref source");
15245                     Verify(tiTo.IsByRef(), "expected byref destination");
15246
15247                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15248                            "type of source address incompatible with type operand");
15249                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15250                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15251                            "type operand incompatible with type of destination address");
15252                 }
15253
15254                 if (!eeIsValueClass(resolvedToken.hClass))
15255                 {
15256                     op1 = impPopStack().val; // address to load from
15257
15258                     impBashVarAddrsToI(op1);
15259
15260                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15261
15262                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15263                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15264
15265                     impPushOnStack(op1, typeInfo());
15266                     opcode = CEE_STIND_REF;
15267                     lclTyp = TYP_REF;
15268                     goto STIND_POST_VERIFY;
15269                 }
15270
15271                 op2 = impPopStack().val; // Src
15272                 op1 = impPopStack().val; // Dest
15273                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15274                 goto SPILL_APPEND;
15275
15276             case CEE_STOBJ:
15277             {
15278                 assertImp(sz == sizeof(unsigned));
15279
15280                 _impResolveToken(CORINFO_TOKENKIND_Class);
15281
15282                 JITDUMP(" %08X", resolvedToken.token);
15283
15284                 if (eeIsValueClass(resolvedToken.hClass))
15285                 {
15286                     lclTyp = TYP_STRUCT;
15287                 }
15288                 else
15289                 {
15290                     lclTyp = TYP_REF;
15291                 }
15292
15293                 if (tiVerificationNeeded)
15294                 {
15295
15296                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15297
15298                     // Make sure we have a good looking byref
15299                     Verify(tiPtr.IsByRef(), "pointer not byref");
15300                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15301                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15302                     {
15303                         compUnsafeCastUsed = true;
15304                     }
15305
15306                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15307                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15308
15309                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15310                     {
15311                         Verify(false, "type of value incompatible with type operand");
15312                         compUnsafeCastUsed = true;
15313                     }
15314
15315                     if (!tiCompatibleWith(argVal, ptrVal, false))
15316                     {
15317                         Verify(false, "type operand incompatible with type of address");
15318                         compUnsafeCastUsed = true;
15319                     }
15320                 }
15321                 else
15322                 {
15323                     compUnsafeCastUsed = true;
15324                 }
15325
15326                 if (lclTyp == TYP_REF)
15327                 {
15328                     opcode = CEE_STIND_REF;
15329                     goto STIND_POST_VERIFY;
15330                 }
15331
15332                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15333                 if (impIsPrimitive(jitTyp))
15334                 {
15335                     lclTyp = JITtype2varType(jitTyp);
15336                     goto STIND_POST_VERIFY;
15337                 }
15338
15339                 op2 = impPopStack().val; // Value
15340                 op1 = impPopStack().val; // Ptr
15341
15342                 assertImp(varTypeIsStruct(op2));
15343
15344                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15345
15346                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15347                 {
15348                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15349                 }
15350                 goto SPILL_APPEND;
15351             }
15352
15353             case CEE_MKREFANY:
15354
15355                 assert(!compIsForInlining());
15356
15357                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15358                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15359
15360                 JITDUMP("disabling struct promotion because of mkrefany\n");
15361                 fgNoStructPromotion = true;
15362
15363                 oper = GT_MKREFANY;
15364                 assertImp(sz == sizeof(unsigned));
15365
15366                 _impResolveToken(CORINFO_TOKENKIND_Class);
15367
15368                 JITDUMP(" %08X", resolvedToken.token);
15369
15370                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15371                 if (op2 == nullptr)
15372                 { // compDonotInline()
15373                     return;
15374                 }
15375
15376                 if (tiVerificationNeeded)
15377                 {
15378                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15379                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15380
15381                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15382                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15383                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15384                 }
15385
15386                 accessAllowedResult =
15387                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15388                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15389
15390                 op1 = impPopStack().val;
15391
15392                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15393                 // But JIT32 allowed it, so we continue to allow it.
15394                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15395
15396                 // MKREFANY returns a struct.  op2 is the class token.
15397                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15398
15399                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15400                 break;
15401
15402             case CEE_LDOBJ:
15403             {
15404                 oper = GT_OBJ;
15405                 assertImp(sz == sizeof(unsigned));
15406
15407                 _impResolveToken(CORINFO_TOKENKIND_Class);
15408
15409                 JITDUMP(" %08X", resolvedToken.token);
15410
15411             OBJ:
15412
15413                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15414
15415                 if (tiVerificationNeeded)
15416                 {
15417                     typeInfo tiPtr = impStackTop().seTypeInfo;
15418
15419                     // Make sure we have a byref
15420                     if (!tiPtr.IsByRef())
15421                     {
15422                         Verify(false, "pointer not byref");
15423                         compUnsafeCastUsed = true;
15424                     }
15425                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15426
15427                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15428                     {
15429                         Verify(false, "type of address incompatible with type operand");
15430                         compUnsafeCastUsed = true;
15431                     }
15432                     tiRetVal.NormaliseForStack();
15433                 }
15434                 else
15435                 {
15436                     compUnsafeCastUsed = true;
15437                 }
15438
15439                 if (eeIsValueClass(resolvedToken.hClass))
15440                 {
15441                     lclTyp = TYP_STRUCT;
15442                 }
15443                 else
15444                 {
15445                     lclTyp = TYP_REF;
15446                     opcode = CEE_LDIND_REF;
15447                     goto LDIND_POST_VERIFY;
15448                 }
15449
15450                 op1 = impPopStack().val;
15451
15452                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15453
15454                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15455                 if (impIsPrimitive(jitTyp))
15456                 {
15457                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15458
15459                     // Could point anywhere, example a boxed class static int
15460                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15461                     assertImp(varTypeIsArithmetic(op1->gtType));
15462                 }
15463                 else
15464                 {
15465                     // OBJ returns a struct
15466                     // and an inline argument which is the class token of the loaded obj
15467                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15468                 }
15469                 op1->gtFlags |= GTF_EXCEPT;
15470
15471                 if (prefixFlags & PREFIX_UNALIGNED)
15472                 {
15473                     op1->gtFlags |= GTF_IND_UNALIGNED;
15474                 }
15475
15476                 impPushOnStack(op1, tiRetVal);
15477                 break;
15478             }
15479
15480             case CEE_LDLEN:
15481                 if (tiVerificationNeeded)
15482                 {
15483                     typeInfo tiArray = impStackTop().seTypeInfo;
15484                     Verify(verIsSDArray(tiArray), "bad array");
15485                     tiRetVal = typeInfo(TI_INT);
15486                 }
15487
15488                 op1 = impPopStack().val;
15489                 if (!opts.MinOpts() && !opts.compDbgCode)
15490                 {
15491                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15492                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15493
15494                     /* Mark the block as containing a length expression */
15495
15496                     if (op1->gtOper == GT_LCL_VAR)
15497                     {
15498                         block->bbFlags |= BBF_HAS_IDX_LEN;
15499                     }
15500
15501                     op1 = arrLen;
15502                 }
15503                 else
15504                 {
15505                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15506                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15507                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15508                     op1 = gtNewIndir(TYP_INT, op1);
15509                     op1->gtFlags |= GTF_IND_ARR_LEN;
15510                 }
15511
15512                 /* Push the result back on the stack */
15513                 impPushOnStack(op1, tiRetVal);
15514                 break;
15515
15516             case CEE_BREAK:
15517                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15518                 goto SPILL_APPEND;
15519
15520             case CEE_NOP:
15521                 if (opts.compDbgCode)
15522                 {
15523                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15524                     goto SPILL_APPEND;
15525                 }
15526                 break;
15527
15528             /******************************** NYI *******************************/
15529
15530             case 0xCC:
15531                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15532
15533             case CEE_ILLEGAL:
15534             case CEE_MACRO_END:
15535
15536             default:
15537                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15538         }
15539
15540         codeAddr += sz;
15541         prevOpcode = opcode;
15542
15543         prefixFlags = 0;
15544     }
15545
15546     return;
15547 #undef _impResolveToken
15548 }
15549 #ifdef _PREFAST_
15550 #pragma warning(pop)
15551 #endif
15552
15553 // Push a local/argument treeon the operand stack
15554 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15555 {
15556     tiRetVal.NormaliseForStack();
15557
15558     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15559     {
15560         tiRetVal.SetUninitialisedObjRef();
15561     }
15562
15563     impPushOnStack(op, tiRetVal);
15564 }
15565
15566 // Load a local/argument on the operand stack
15567 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15568 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15569 {
15570     var_types lclTyp;
15571
15572     if (lvaTable[lclNum].lvNormalizeOnLoad())
15573     {
15574         lclTyp = lvaGetRealType(lclNum);
15575     }
15576     else
15577     {
15578         lclTyp = lvaGetActualType(lclNum);
15579     }
15580
15581     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15582 }
15583
15584 // Load an argument on the operand stack
15585 // Shared by the various CEE_LDARG opcodes
15586 // ilArgNum is the argument index as specified in IL.
15587 // It will be mapped to the correct lvaTable index
15588 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15589 {
15590     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15591
15592     if (compIsForInlining())
15593     {
15594         if (ilArgNum >= info.compArgsCount)
15595         {
15596             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15597             return;
15598         }
15599
15600         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15601                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15602     }
15603     else
15604     {
15605         if (ilArgNum >= info.compArgsCount)
15606         {
15607             BADCODE("Bad IL");
15608         }
15609
15610         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15611
15612         if (lclNum == info.compThisArg)
15613         {
15614             lclNum = lvaArg0Var;
15615         }
15616
15617         impLoadVar(lclNum, offset);
15618     }
15619 }
15620
15621 // Load a local on the operand stack
15622 // Shared by the various CEE_LDLOC opcodes
15623 // ilLclNum is the local index as specified in IL.
15624 // It will be mapped to the correct lvaTable index
15625 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15626 {
15627     if (tiVerificationNeeded)
15628     {
15629         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15630         Verify(info.compInitMem, "initLocals not set");
15631     }
15632
15633     if (compIsForInlining())
15634     {
15635         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15636         {
15637             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15638             return;
15639         }
15640
15641         // Get the local type
15642         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15643
15644         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15645
15646         /* Have we allocated a temp for this local? */
15647
15648         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15649
15650         // All vars of inlined methods should be !lvNormalizeOnLoad()
15651
15652         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15653         lclTyp = genActualType(lclTyp);
15654
15655         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15656     }
15657     else
15658     {
15659         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15660         {
15661             BADCODE("Bad IL");
15662         }
15663
15664         unsigned lclNum = info.compArgsCount + ilLclNum;
15665
15666         impLoadVar(lclNum, offset);
15667     }
15668 }
15669
15670 #ifdef _TARGET_ARM_
15671 /**************************************************************************************
15672  *
15673  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15674  *  dst struct, because struct promotion will turn it into a float/double variable while
15675  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15676  *  a float, but there is nothing that might prevent us from doing so. The tree however
15677  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15678  *
15679  *  tmpNum - the lcl dst variable num that is a struct.
15680  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15681  *  hClass - the type handle for the struct variable.
15682  *
15683  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15684  *        however, we could do a codegen of transferring from int to float registers
15685  *        (transfer, not a cast.)
15686  *
15687  */
15688 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15689 {
15690     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15691     {
15692         int       hfaSlots = GetHfaCount(hClass);
15693         var_types hfaType  = GetHfaType(hClass);
15694
15695         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15696         // type: struct/float at importer because the ABI calls out return in integer registers.
15697         // We don't want struct promotion to replace an expression like this:
15698         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15699         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15700         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15701             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15702         {
15703             // Make sure this struct type stays as struct so we can receive the call in a struct.
15704             lvaTable[tmpNum].lvIsMultiRegRet = true;
15705         }
15706     }
15707 }
15708 #endif // _TARGET_ARM_
15709
15710 #if FEATURE_MULTIREG_RET
15711 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15712 {
15713     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15714     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15715     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15716
15717     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15718     ret->gtFlags |= GTF_DONT_CSE;
15719
15720     assert(IsMultiRegReturnedType(hClass));
15721
15722     // Mark the var so that fields are not promoted and stay together.
15723     lvaTable[tmpNum].lvIsMultiRegRet = true;
15724
15725     return ret;
15726 }
15727 #endif // FEATURE_MULTIREG_RET
15728
15729 // do import for a return
15730 // returns false if inlining was aborted
15731 // opcode can be ret or call in the case of a tail.call
15732 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15733 {
15734     if (tiVerificationNeeded)
15735     {
15736         verVerifyThisPtrInitialised();
15737
15738         unsigned expectedStack = 0;
15739         if (info.compRetType != TYP_VOID)
15740         {
15741             typeInfo tiVal = impStackTop().seTypeInfo;
15742             typeInfo tiDeclared =
15743                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15744
15745             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15746
15747             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15748             expectedStack = 1;
15749         }
15750         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15751     }
15752
15753 #ifdef DEBUG
15754     // If we are importing an inlinee and have GC ref locals we always
15755     // need to have a spill temp for the return value.  This temp
15756     // should have been set up in advance, over in fgFindBasicBlocks.
15757     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15758     {
15759         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15760     }
15761 #endif // DEBUG
15762
15763     GenTree*             op2       = nullptr;
15764     GenTree*             op1       = nullptr;
15765     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15766
15767     if (info.compRetType != TYP_VOID)
15768     {
15769         StackEntry se = impPopStack();
15770         retClsHnd     = se.seTypeInfo.GetClassHandle();
15771         op2           = se.val;
15772
15773         if (!compIsForInlining())
15774         {
15775             impBashVarAddrsToI(op2);
15776             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15777             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15778             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15779                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15780                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15781                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15782                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15783
15784 #ifdef DEBUG
15785             if (opts.compGcChecks && info.compRetType == TYP_REF)
15786             {
15787                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15788                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15789                 // one-return BB.
15790
15791                 assert(op2->gtType == TYP_REF);
15792
15793                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15794                 GenTreeArgList* args = gtNewArgList(op2);
15795                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15796
15797                 if (verbose)
15798                 {
15799                     printf("\ncompGcChecks tree:\n");
15800                     gtDispTree(op2);
15801                 }
15802             }
15803 #endif
15804         }
15805         else
15806         {
15807             // inlinee's stack should be empty now.
15808             assert(verCurrentState.esStackDepth == 0);
15809
15810 #ifdef DEBUG
15811             if (verbose)
15812             {
15813                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15814                 gtDispTree(op2);
15815             }
15816 #endif
15817
15818             // Make sure the type matches the original call.
15819
15820             var_types returnType       = genActualType(op2->gtType);
15821             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15822             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15823             {
15824                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15825             }
15826
15827             if (returnType != originalCallType)
15828             {
15829                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15830                 return false;
15831             }
15832
15833             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15834             // expression. At this point, retExpr could already be set if there are multiple
15835             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15836             // the other blocks already set it. If there is only a single return block,
15837             // retExpr shouldn't be set. However, this is not true if we reimport a block
15838             // with a return. In that case, retExpr will be set, then the block will be
15839             // reimported, but retExpr won't get cleared as part of setting the block to
15840             // be reimported. The reimported retExpr value should be the same, so even if
15841             // we don't unconditionally overwrite it, it shouldn't matter.
15842             if (info.compRetNativeType != TYP_STRUCT)
15843             {
15844                 // compRetNativeType is not TYP_STRUCT.
15845                 // This implies it could be either a scalar type or SIMD vector type or
15846                 // a struct type that can be normalized to a scalar type.
15847
15848                 if (varTypeIsStruct(info.compRetType))
15849                 {
15850                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15851                     // adjust the type away from struct to integral
15852                     // and no normalizing
15853                     op2 = impFixupStructReturnType(op2, retClsHnd);
15854                 }
15855                 else
15856                 {
15857                     // Do we have to normalize?
15858                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15859                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15860                         fgCastNeeded(op2, fncRealRetType))
15861                     {
15862                         // Small-typed return values are normalized by the callee
15863                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15864                     }
15865                 }
15866
15867                 if (fgNeedReturnSpillTemp())
15868                 {
15869                     assert(info.compRetNativeType != TYP_VOID &&
15870                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15871
15872                     // If this method returns a ref type, track the actual types seen
15873                     // in the returns.
15874                     if (info.compRetType == TYP_REF)
15875                     {
15876                         bool                 isExact      = false;
15877                         bool                 isNonNull    = false;
15878                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15879
15880                         if (impInlineInfo->retExpr == nullptr)
15881                         {
15882                             // This is the first return, so best known type is the type
15883                             // of this return value.
15884                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15885                             impInlineInfo->retExprClassHndIsExact = isExact;
15886                         }
15887                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15888                         {
15889                             // This return site type differs from earlier seen sites,
15890                             // so reset the info and we'll fall back to using the method's
15891                             // declared return type for the return spill temp.
15892                             impInlineInfo->retExprClassHnd        = nullptr;
15893                             impInlineInfo->retExprClassHndIsExact = false;
15894                         }
15895                     }
15896
15897                     // This is a bit of a workaround...
15898                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15899                     // not a struct (for example, the struct is composed of exactly one int, and the native
15900                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15901                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15902                     // to the *native* return type), and at least one of the return blocks is the result of
15903                     // a call, then we have a problem. The situation is like this (from a failed test case):
15904                     //
15905                     // inliner:
15906                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15907                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15908                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15909                     //
15910                     // inlinee:
15911                     //      ...
15912                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15913                     //      ret
15914                     //      ...
15915                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15916                     //      object&, class System.Func`1<!!0>)
15917                     //      ret
15918                     //
15919                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15920                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15921                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15922                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15923                     //
15924                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15925                     // native return type, which is what it will be set to eventually. We generate the
15926                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15927                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15928
15929                     bool restoreType = false;
15930                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15931                     {
15932                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15933                         op2->gtType = info.compRetNativeType;
15934                         restoreType = true;
15935                     }
15936
15937                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15938                                      (unsigned)CHECK_SPILL_ALL);
15939
15940                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15941
15942                     if (restoreType)
15943                     {
15944                         op2->gtType = TYP_STRUCT; // restore it to what it was
15945                     }
15946
15947                     op2 = tmpOp2;
15948
15949 #ifdef DEBUG
15950                     if (impInlineInfo->retExpr)
15951                     {
15952                         // Some other block(s) have seen the CEE_RET first.
15953                         // Better they spilled to the same temp.
15954                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15955                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15956                     }
15957 #endif
15958                 }
15959
15960 #ifdef DEBUG
15961                 if (verbose)
15962                 {
15963                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15964                     gtDispTree(op2);
15965                 }
15966 #endif
15967
15968                 // Report the return expression
15969                 impInlineInfo->retExpr = op2;
15970             }
15971             else
15972             {
15973                 // compRetNativeType is TYP_STRUCT.
15974                 // This implies that struct return via RetBuf arg or multi-reg struct return
15975
15976                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15977
15978                 // Assign the inlinee return into a spill temp.
15979                 // spill temp only exists if there are multiple return points
15980                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15981                 {
15982                     // in this case we have to insert multiple struct copies to the temp
15983                     // and the retexpr is just the temp.
15984                     assert(info.compRetNativeType != TYP_VOID);
15985                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15986
15987                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15988                                      (unsigned)CHECK_SPILL_ALL);
15989                 }
15990
15991 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15992 #if defined(_TARGET_ARM_)
15993                 // TODO-ARM64-NYI: HFA
15994                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15995                 // next ifdefs could be refactored in a single method with the ifdef inside.
15996                 if (IsHfa(retClsHnd))
15997                 {
15998 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15999 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16000                 ReturnTypeDesc retTypeDesc;
16001                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16002                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16003
16004                 if (retRegCount != 0)
16005                 {
16006                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16007                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16008                     // max allowed.)
16009                     assert(retRegCount == MAX_RET_REG_COUNT);
16010                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16011                     CLANG_FORMAT_COMMENT_ANCHOR;
16012 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16013
16014                     if (fgNeedReturnSpillTemp())
16015                     {
16016                         if (!impInlineInfo->retExpr)
16017                         {
16018 #if defined(_TARGET_ARM_)
16019                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16020 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16021                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16022                             impInlineInfo->retExpr =
16023                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16024 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16025                         }
16026                     }
16027                     else
16028                     {
16029                         impInlineInfo->retExpr = op2;
16030                     }
16031                 }
16032                 else
16033 #elif defined(_TARGET_ARM64_)
16034                 ReturnTypeDesc retTypeDesc;
16035                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16036                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16037
16038                 if (retRegCount != 0)
16039                 {
16040                     assert(!iciCall->HasRetBufArg());
16041                     assert(retRegCount >= 2);
16042                     if (fgNeedReturnSpillTemp())
16043                     {
16044                         if (!impInlineInfo->retExpr)
16045                         {
16046                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16047                             impInlineInfo->retExpr =
16048                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16049                         }
16050                     }
16051                     else
16052                     {
16053                         impInlineInfo->retExpr = op2;
16054                     }
16055                 }
16056                 else
16057 #endif // defined(_TARGET_ARM64_)
16058                 {
16059                     assert(iciCall->HasRetBufArg());
16060                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16061                     // spill temp only exists if there are multiple return points
16062                     if (fgNeedReturnSpillTemp())
16063                     {
16064                         // if this is the first return we have seen set the retExpr
16065                         if (!impInlineInfo->retExpr)
16066                         {
16067                             impInlineInfo->retExpr =
16068                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16069                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16070                         }
16071                     }
16072                     else
16073                     {
16074                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16075                     }
16076                 }
16077             }
16078         }
16079     }
16080
16081     if (compIsForInlining())
16082     {
16083         return true;
16084     }
16085
16086     if (info.compRetType == TYP_VOID)
16087     {
16088         // return void
16089         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16090     }
16091     else if (info.compRetBuffArg != BAD_VAR_NUM)
16092     {
16093         // Assign value to return buff (first param)
16094         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16095
16096         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16097         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16098
16099         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16100         CLANG_FORMAT_COMMENT_ANCHOR;
16101
16102 #if defined(_TARGET_AMD64_)
16103
16104         // x64 (System V and Win64) calling convention requires to
16105         // return the implicit return buffer explicitly (in RAX).
16106         // Change the return type to be BYREF.
16107         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16108 #else  // !defined(_TARGET_AMD64_)
16109         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16110         // In such case the return value of the function is changed to BYREF.
16111         // If profiler hook is not needed the return type of the function is TYP_VOID.
16112         if (compIsProfilerHookNeeded())
16113         {
16114             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16115         }
16116         else
16117         {
16118             // return void
16119             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16120         }
16121 #endif // !defined(_TARGET_AMD64_)
16122     }
16123     else if (varTypeIsStruct(info.compRetType))
16124     {
16125 #if !FEATURE_MULTIREG_RET
16126         // For both ARM architectures the HFA native types are maintained as structs.
16127         // Also on System V AMD64 the multireg structs returns are also left as structs.
16128         noway_assert(info.compRetNativeType != TYP_STRUCT);
16129 #endif
16130         op2 = impFixupStructReturnType(op2, retClsHnd);
16131         // return op2
16132         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16133     }
16134     else
16135     {
16136         // return op2
16137         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16138     }
16139
16140     // We must have imported a tailcall and jumped to RET
16141     if (prefixFlags & PREFIX_TAILCALL)
16142     {
16143 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16144         // Jit64 compat:
16145         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16146         //      tail.call
16147         //      pop
16148         //      ret
16149         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16150 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16151
16152         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16153
16154         // impImportCall() would have already appended TYP_VOID calls
16155         if (info.compRetType == TYP_VOID)
16156         {
16157             return true;
16158         }
16159     }
16160
16161     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16162 #ifdef DEBUG
16163     // Remember at which BC offset the tree was finished
16164     impNoteLastILoffs();
16165 #endif
16166     return true;
16167 }
16168
16169 /*****************************************************************************
16170  *  Mark the block as unimported.
16171  *  Note that the caller is responsible for calling impImportBlockPending(),
16172  *  with the appropriate stack-state
16173  */
16174
16175 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16176 {
16177 #ifdef DEBUG
16178     if (verbose && (block->bbFlags & BBF_IMPORTED))
16179     {
16180         printf("\nBB%02u will be reimported\n", block->bbNum);
16181     }
16182 #endif
16183
16184     block->bbFlags &= ~BBF_IMPORTED;
16185 }
16186
16187 /*****************************************************************************
16188  *  Mark the successors of the given block as unimported.
16189  *  Note that the caller is responsible for calling impImportBlockPending()
16190  *  for all the successors, with the appropriate stack-state.
16191  */
16192
16193 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16194 {
16195     const unsigned numSuccs = block->NumSucc();
16196     for (unsigned i = 0; i < numSuccs; i++)
16197     {
16198         impReimportMarkBlock(block->GetSucc(i));
16199     }
16200 }
16201
16202 /*****************************************************************************
16203  *
16204  *  Filter wrapper to handle only passed in exception code
16205  *  from it).
16206  */
16207
16208 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16209 {
16210     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16211     {
16212         return EXCEPTION_EXECUTE_HANDLER;
16213     }
16214
16215     return EXCEPTION_CONTINUE_SEARCH;
16216 }
16217
16218 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16219 {
16220     assert(block->hasTryIndex());
16221     assert(!compIsForInlining());
16222
16223     unsigned  tryIndex = block->getTryIndex();
16224     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16225
16226     if (isTryStart)
16227     {
16228         assert(block->bbFlags & BBF_TRY_BEG);
16229
16230         // The Stack must be empty
16231         //
16232         if (block->bbStkDepth != 0)
16233         {
16234             BADCODE("Evaluation stack must be empty on entry into a try block");
16235         }
16236     }
16237
16238     // Save the stack contents, we'll need to restore it later
16239     //
16240     SavedStack blockState;
16241     impSaveStackState(&blockState, false);
16242
16243     while (HBtab != nullptr)
16244     {
16245         if (isTryStart)
16246         {
16247             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16248             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16249             //
16250             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16251             {
16252                 // We  trigger an invalid program exception here unless we have a try/fault region.
16253                 //
16254                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16255                 {
16256                     BADCODE(
16257                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16258                 }
16259                 else
16260                 {
16261                     // Allow a try/fault region to proceed.
16262                     assert(HBtab->HasFaultHandler());
16263                 }
16264             }
16265
16266             /* Recursively process the handler block */
16267             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16268
16269             //  Construct the proper verification stack state
16270             //   either empty or one that contains just
16271             //   the Exception Object that we are dealing with
16272             //
16273             verCurrentState.esStackDepth = 0;
16274
16275             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16276             {
16277                 CORINFO_CLASS_HANDLE clsHnd;
16278
16279                 if (HBtab->HasFilter())
16280                 {
16281                     clsHnd = impGetObjectClass();
16282                 }
16283                 else
16284                 {
16285                     CORINFO_RESOLVED_TOKEN resolvedToken;
16286
16287                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16288                     resolvedToken.tokenScope   = info.compScopeHnd;
16289                     resolvedToken.token        = HBtab->ebdTyp;
16290                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16291                     info.compCompHnd->resolveToken(&resolvedToken);
16292
16293                     clsHnd = resolvedToken.hClass;
16294                 }
16295
16296                 // push catch arg the stack, spill to a temp if necessary
16297                 // Note: can update HBtab->ebdHndBeg!
16298                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16299             }
16300
16301             // Queue up the handler for importing
16302             //
16303             impImportBlockPending(hndBegBB);
16304
16305             if (HBtab->HasFilter())
16306             {
16307                 /* @VERIFICATION : Ideally the end of filter state should get
16308                    propagated to the catch handler, this is an incompleteness,
16309                    but is not a security/compliance issue, since the only
16310                    interesting state is the 'thisInit' state.
16311                    */
16312
16313                 verCurrentState.esStackDepth = 0;
16314
16315                 BasicBlock* filterBB = HBtab->ebdFilter;
16316
16317                 // push catch arg the stack, spill to a temp if necessary
16318                 // Note: can update HBtab->ebdFilter!
16319                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16320                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16321
16322                 impImportBlockPending(filterBB);
16323             }
16324         }
16325         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16326         {
16327             /* Recursively process the handler block */
16328
16329             verCurrentState.esStackDepth = 0;
16330
16331             // Queue up the fault handler for importing
16332             //
16333             impImportBlockPending(HBtab->ebdHndBeg);
16334         }
16335
16336         // Now process our enclosing try index (if any)
16337         //
16338         tryIndex = HBtab->ebdEnclosingTryIndex;
16339         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16340         {
16341             HBtab = nullptr;
16342         }
16343         else
16344         {
16345             HBtab = ehGetDsc(tryIndex);
16346         }
16347     }
16348
16349     // Restore the stack contents
16350     impRestoreStackState(&blockState);
16351 }
16352
16353 //***************************************************************
16354 // Import the instructions for the given basic block.  Perform
16355 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16356 // time, or whose verification pre-state is changed.
16357
16358 #ifdef _PREFAST_
16359 #pragma warning(push)
16360 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16361 #endif
16362 void Compiler::impImportBlock(BasicBlock* block)
16363 {
16364     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16365     // handle them specially. In particular, there is no IL to import for them, but we do need
16366     // to mark them as imported and put their successors on the pending import list.
16367     if (block->bbFlags & BBF_INTERNAL)
16368     {
16369         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16370         block->bbFlags |= BBF_IMPORTED;
16371
16372         const unsigned numSuccs = block->NumSucc();
16373         for (unsigned i = 0; i < numSuccs; i++)
16374         {
16375             impImportBlockPending(block->GetSucc(i));
16376         }
16377
16378         return;
16379     }
16380
16381     bool markImport;
16382
16383     assert(block);
16384
16385     /* Make the block globaly available */
16386
16387     compCurBB = block;
16388
16389 #ifdef DEBUG
16390     /* Initialize the debug variables */
16391     impCurOpcName = "unknown";
16392     impCurOpcOffs = block->bbCodeOffs;
16393 #endif
16394
16395     /* Set the current stack state to the merged result */
16396     verResetCurrentState(block, &verCurrentState);
16397
16398     /* Now walk the code and import the IL into GenTrees */
16399
16400     struct FilterVerificationExceptionsParam
16401     {
16402         Compiler*   pThis;
16403         BasicBlock* block;
16404     };
16405     FilterVerificationExceptionsParam param;
16406
16407     param.pThis = this;
16408     param.block = block;
16409
16410     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16411     {
16412         /* @VERIFICATION : For now, the only state propagation from try
16413            to it's handler is "thisInit" state (stack is empty at start of try).
16414            In general, for state that we track in verification, we need to
16415            model the possibility that an exception might happen at any IL
16416            instruction, so we really need to merge all states that obtain
16417            between IL instructions in a try block into the start states of
16418            all handlers.
16419
16420            However we do not allow the 'this' pointer to be uninitialized when
16421            entering most kinds try regions (only try/fault are allowed to have
16422            an uninitialized this pointer on entry to the try)
16423
16424            Fortunately, the stack is thrown away when an exception
16425            leads to a handler, so we don't have to worry about that.
16426            We DO, however, have to worry about the "thisInit" state.
16427            But only for the try/fault case.
16428
16429            The only allowed transition is from TIS_Uninit to TIS_Init.
16430
16431            So for a try/fault region for the fault handler block
16432            we will merge the start state of the try begin
16433            and the post-state of each block that is part of this try region
16434         */
16435
16436         // merge the start state of the try begin
16437         //
16438         if (pParam->block->bbFlags & BBF_TRY_BEG)
16439         {
16440             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16441         }
16442
16443         pParam->pThis->impImportBlockCode(pParam->block);
16444
16445         // As discussed above:
16446         // merge the post-state of each block that is part of this try region
16447         //
16448         if (pParam->block->hasTryIndex())
16449         {
16450             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16451         }
16452     }
16453     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16454     {
16455         verHandleVerificationFailure(block DEBUGARG(false));
16456     }
16457     PAL_ENDTRY
16458
16459     if (compDonotInline())
16460     {
16461         return;
16462     }
16463
16464     assert(!compDonotInline());
16465
16466     markImport = false;
16467
16468 SPILLSTACK:
16469
16470     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16471     bool        reimportSpillClique = false;
16472     BasicBlock* tgtBlock            = nullptr;
16473
16474     /* If the stack is non-empty, we might have to spill its contents */
16475
16476     if (verCurrentState.esStackDepth != 0)
16477     {
16478         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16479                                   // on the stack, its lifetime is hard to determine, simply
16480                                   // don't reuse such temps.
16481
16482         GenTree* addStmt = nullptr;
16483
16484         /* Do the successors of 'block' have any other predecessors ?
16485            We do not want to do some of the optimizations related to multiRef
16486            if we can reimport blocks */
16487
16488         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16489
16490         switch (block->bbJumpKind)
16491         {
16492             case BBJ_COND:
16493
16494                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16495
16496                 assert(impTreeLast);
16497                 assert(impTreeLast->gtOper == GT_STMT);
16498                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16499
16500                 addStmt     = impTreeLast;
16501                 impTreeLast = impTreeLast->gtPrev;
16502
16503                 /* Note if the next block has more than one ancestor */
16504
16505                 multRef |= block->bbNext->bbRefs;
16506
16507                 /* Does the next block have temps assigned? */
16508
16509                 baseTmp  = block->bbNext->bbStkTempsIn;
16510                 tgtBlock = block->bbNext;
16511
16512                 if (baseTmp != NO_BASE_TMP)
16513                 {
16514                     break;
16515                 }
16516
16517                 /* Try the target of the jump then */
16518
16519                 multRef |= block->bbJumpDest->bbRefs;
16520                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16521                 tgtBlock = block->bbJumpDest;
16522                 break;
16523
16524             case BBJ_ALWAYS:
16525                 multRef |= block->bbJumpDest->bbRefs;
16526                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16527                 tgtBlock = block->bbJumpDest;
16528                 break;
16529
16530             case BBJ_NONE:
16531                 multRef |= block->bbNext->bbRefs;
16532                 baseTmp  = block->bbNext->bbStkTempsIn;
16533                 tgtBlock = block->bbNext;
16534                 break;
16535
16536             case BBJ_SWITCH:
16537
16538                 BasicBlock** jmpTab;
16539                 unsigned     jmpCnt;
16540
16541                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16542
16543                 assert(impTreeLast);
16544                 assert(impTreeLast->gtOper == GT_STMT);
16545                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16546
16547                 addStmt     = impTreeLast;
16548                 impTreeLast = impTreeLast->gtPrev;
16549
16550                 jmpCnt = block->bbJumpSwt->bbsCount;
16551                 jmpTab = block->bbJumpSwt->bbsDstTab;
16552
16553                 do
16554                 {
16555                     tgtBlock = (*jmpTab);
16556
16557                     multRef |= tgtBlock->bbRefs;
16558
16559                     // Thanks to spill cliques, we should have assigned all or none
16560                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16561                     baseTmp = tgtBlock->bbStkTempsIn;
16562                     if (multRef > 1)
16563                     {
16564                         break;
16565                     }
16566                 } while (++jmpTab, --jmpCnt);
16567
16568                 break;
16569
16570             case BBJ_CALLFINALLY:
16571             case BBJ_EHCATCHRET:
16572             case BBJ_RETURN:
16573             case BBJ_EHFINALLYRET:
16574             case BBJ_EHFILTERRET:
16575             case BBJ_THROW:
16576                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16577                 break;
16578
16579             default:
16580                 noway_assert(!"Unexpected bbJumpKind");
16581                 break;
16582         }
16583
16584         assert(multRef >= 1);
16585
16586         /* Do we have a base temp number? */
16587
16588         bool newTemps = (baseTmp == NO_BASE_TMP);
16589
16590         if (newTemps)
16591         {
16592             /* Grab enough temps for the whole stack */
16593             baseTmp = impGetSpillTmpBase(block);
16594         }
16595
16596         /* Spill all stack entries into temps */
16597         unsigned level, tempNum;
16598
16599         JITDUMP("\nSpilling stack entries into temps\n");
16600         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16601         {
16602             GenTree* tree = verCurrentState.esStack[level].val;
16603
16604             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16605                the other. This should merge to a byref in unverifiable code.
16606                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16607                successor would be imported assuming there was a TYP_I_IMPL on
16608                the stack. Thus the value would not get GC-tracked. Hence,
16609                change the temp to TYP_BYREF and reimport the successors.
16610                Note: We should only allow this in unverifiable code.
16611             */
16612             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16613             {
16614                 lvaTable[tempNum].lvType = TYP_BYREF;
16615                 impReimportMarkSuccessors(block);
16616                 markImport = true;
16617             }
16618
16619 #ifdef _TARGET_64BIT_
16620             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16621             {
16622                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16623                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16624                 {
16625                     // Merge the current state into the entry state of block;
16626                     // the call to verMergeEntryStates must have changed
16627                     // the entry state of the block by merging the int local var
16628                     // and the native-int stack entry.
16629                     bool changed = false;
16630                     if (verMergeEntryStates(tgtBlock, &changed))
16631                     {
16632                         impRetypeEntryStateTemps(tgtBlock);
16633                         impReimportBlockPending(tgtBlock);
16634                         assert(changed);
16635                     }
16636                     else
16637                     {
16638                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16639                         break;
16640                     }
16641                 }
16642
16643                 // Some other block in the spill clique set this to "int", but now we have "native int".
16644                 // Change the type and go back to re-import any blocks that used the wrong type.
16645                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16646                 reimportSpillClique      = true;
16647             }
16648             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16649             {
16650                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16651                 // Insert a sign-extension to "native int" so we match the clique.
16652                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16653             }
16654
16655             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16656             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16657             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16658             // behavior instead of asserting and then generating bad code (where we save/restore the
16659             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16660             // imported already, we need to change the type of the local and reimport the spill clique.
16661             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16662             // the 'byref' size.
16663             if (!tiVerificationNeeded)
16664             {
16665                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16666                 {
16667                     // Some other block in the spill clique set this to "int", but now we have "byref".
16668                     // Change the type and go back to re-import any blocks that used the wrong type.
16669                     lvaTable[tempNum].lvType = TYP_BYREF;
16670                     reimportSpillClique      = true;
16671                 }
16672                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16673                 {
16674                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16675                     // Insert a sign-extension to "native int" so we match the clique size.
16676                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16677                 }
16678             }
16679 #endif // _TARGET_64BIT_
16680
16681 #if FEATURE_X87_DOUBLES
16682             // X87 stack doesn't differentiate between float/double
16683             // so promoting is no big deal.
16684             // For everybody else keep it as float until we have a collision and then promote
16685             // Just like for x64's TYP_INT<->TYP_I_IMPL
16686
16687             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16688             {
16689                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16690             }
16691
16692 #else // !FEATURE_X87_DOUBLES
16693
16694             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16695             {
16696                 // Some other block in the spill clique set this to "float", but now we have "double".
16697                 // Change the type and go back to re-import any blocks that used the wrong type.
16698                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16699                 reimportSpillClique      = true;
16700             }
16701             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16702             {
16703                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16704                 // Insert a cast to "double" so we match the clique.
16705                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16706             }
16707
16708 #endif // FEATURE_X87_DOUBLES
16709
16710             /* If addStmt has a reference to tempNum (can only happen if we
16711                are spilling to the temps already used by a previous block),
16712                we need to spill addStmt */
16713
16714             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16715             {
16716                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16717
16718                 if (addTree->gtOper == GT_JTRUE)
16719                 {
16720                     GenTree* relOp = addTree->gtOp.gtOp1;
16721                     assert(relOp->OperIsCompare());
16722
16723                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16724
16725                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16726                     {
16727                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16728                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16729                         type              = genActualType(lvaTable[temp].TypeGet());
16730                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16731                     }
16732
16733                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16734                     {
16735                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16736                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16737                         type              = genActualType(lvaTable[temp].TypeGet());
16738                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16739                     }
16740                 }
16741                 else
16742                 {
16743                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16744
16745                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16746                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16747                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16748                 }
16749             }
16750
16751             /* Spill the stack entry, and replace with the temp */
16752
16753             if (!impSpillStackEntry(level, tempNum
16754 #ifdef DEBUG
16755                                     ,
16756                                     true, "Spill Stack Entry"
16757 #endif
16758                                     ))
16759             {
16760                 if (markImport)
16761                 {
16762                     BADCODE("bad stack state");
16763                 }
16764
16765                 // Oops. Something went wrong when spilling. Bad code.
16766                 verHandleVerificationFailure(block DEBUGARG(true));
16767
16768                 goto SPILLSTACK;
16769             }
16770         }
16771
16772         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16773
16774         if (addStmt)
16775         {
16776             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16777         }
16778     }
16779
16780     // Some of the append/spill logic works on compCurBB
16781
16782     assert(compCurBB == block);
16783
16784     /* Save the tree list in the block */
16785     impEndTreeList(block);
16786
16787     // impEndTreeList sets BBF_IMPORTED on the block
16788     // We do *NOT* want to set it later than this because
16789     // impReimportSpillClique might clear it if this block is both a
16790     // predecessor and successor in the current spill clique
16791     assert(block->bbFlags & BBF_IMPORTED);
16792
16793     // If we had a int/native int, or float/double collision, we need to re-import
16794     if (reimportSpillClique)
16795     {
16796         // This will re-import all the successors of block (as well as each of their predecessors)
16797         impReimportSpillClique(block);
16798
16799         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16800         const unsigned numSuccs = block->NumSucc();
16801         for (unsigned i = 0; i < numSuccs; i++)
16802         {
16803             BasicBlock* succ = block->GetSucc(i);
16804             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16805             {
16806                 impImportBlockPending(succ);
16807             }
16808         }
16809     }
16810     else // the normal case
16811     {
16812         // otherwise just import the successors of block
16813
16814         /* Does this block jump to any other blocks? */
16815         const unsigned numSuccs = block->NumSucc();
16816         for (unsigned i = 0; i < numSuccs; i++)
16817         {
16818             impImportBlockPending(block->GetSucc(i));
16819         }
16820     }
16821 }
16822 #ifdef _PREFAST_
16823 #pragma warning(pop)
16824 #endif
16825
16826 /*****************************************************************************/
16827 //
16828 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16829 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16830 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16831 // (its "pre-state").
16832
16833 void Compiler::impImportBlockPending(BasicBlock* block)
16834 {
16835 #ifdef DEBUG
16836     if (verbose)
16837     {
16838         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16839     }
16840 #endif
16841
16842     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16843     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16844     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16845
16846     // If the block has not been imported, add to pending set.
16847     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16848
16849     // Initialize bbEntryState just the first time we try to add this block to the pending list
16850     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16851     // We use NULL to indicate the 'common' state to avoid memory allocation
16852     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16853         (impGetPendingBlockMember(block) == 0))
16854     {
16855         verInitBBEntryState(block, &verCurrentState);
16856         assert(block->bbStkDepth == 0);
16857         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16858         assert(addToPending);
16859         assert(impGetPendingBlockMember(block) == 0);
16860     }
16861     else
16862     {
16863         // The stack should have the same height on entry to the block from all its predecessors.
16864         if (block->bbStkDepth != verCurrentState.esStackDepth)
16865         {
16866 #ifdef DEBUG
16867             char buffer[400];
16868             sprintf_s(buffer, sizeof(buffer),
16869                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16870                       "Previous depth was %d, current depth is %d",
16871                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16872                       verCurrentState.esStackDepth);
16873             buffer[400 - 1] = 0;
16874             NO_WAY(buffer);
16875 #else
16876             NO_WAY("Block entered with different stack depths");
16877 #endif
16878         }
16879
16880         // Additionally, if we need to verify, merge the verification state.
16881         if (tiVerificationNeeded)
16882         {
16883             // Merge the current state into the entry state of block; if this does not change the entry state
16884             // by merging, do not add the block to the pending-list.
16885             bool changed = false;
16886             if (!verMergeEntryStates(block, &changed))
16887             {
16888                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16889                 addToPending = true; // We will pop it off, and check the flag set above.
16890             }
16891             else if (changed)
16892             {
16893                 addToPending = true;
16894
16895                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16896             }
16897         }
16898
16899         if (!addToPending)
16900         {
16901             return;
16902         }
16903
16904         if (block->bbStkDepth > 0)
16905         {
16906             // We need to fix the types of any spill temps that might have changed:
16907             //   int->native int, float->double, int->byref, etc.
16908             impRetypeEntryStateTemps(block);
16909         }
16910
16911         // OK, we must add to the pending list, if it's not already in it.
16912         if (impGetPendingBlockMember(block) != 0)
16913         {
16914             return;
16915         }
16916     }
16917
16918     // Get an entry to add to the pending list
16919
16920     PendingDsc* dsc;
16921
16922     if (impPendingFree)
16923     {
16924         // We can reuse one of the freed up dscs.
16925         dsc            = impPendingFree;
16926         impPendingFree = dsc->pdNext;
16927     }
16928     else
16929     {
16930         // We have to create a new dsc
16931         dsc = new (this, CMK_Unknown) PendingDsc;
16932     }
16933
16934     dsc->pdBB                 = block;
16935     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16936     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16937
16938     // Save the stack trees for later
16939
16940     if (verCurrentState.esStackDepth)
16941     {
16942         impSaveStackState(&dsc->pdSavedStack, false);
16943     }
16944
16945     // Add the entry to the pending list
16946
16947     dsc->pdNext    = impPendingList;
16948     impPendingList = dsc;
16949     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16950
16951     // Various assertions require us to now to consider the block as not imported (at least for
16952     // the final time...)
16953     block->bbFlags &= ~BBF_IMPORTED;
16954
16955 #ifdef DEBUG
16956     if (verbose && 0)
16957     {
16958         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16959     }
16960 #endif
16961 }
16962
16963 /*****************************************************************************/
16964 //
16965 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16966 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16967 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16968
16969 void Compiler::impReimportBlockPending(BasicBlock* block)
16970 {
16971     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16972
16973     assert(block->bbFlags & BBF_IMPORTED);
16974
16975     // OK, we must add to the pending list, if it's not already in it.
16976     if (impGetPendingBlockMember(block) != 0)
16977     {
16978         return;
16979     }
16980
16981     // Get an entry to add to the pending list
16982
16983     PendingDsc* dsc;
16984
16985     if (impPendingFree)
16986     {
16987         // We can reuse one of the freed up dscs.
16988         dsc            = impPendingFree;
16989         impPendingFree = dsc->pdNext;
16990     }
16991     else
16992     {
16993         // We have to create a new dsc
16994         dsc = new (this, CMK_ImpStack) PendingDsc;
16995     }
16996
16997     dsc->pdBB = block;
16998
16999     if (block->bbEntryState)
17000     {
17001         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
17002         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17003         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17004     }
17005     else
17006     {
17007         dsc->pdThisPtrInit        = TIS_Bottom;
17008         dsc->pdSavedStack.ssDepth = 0;
17009         dsc->pdSavedStack.ssTrees = nullptr;
17010     }
17011
17012     // Add the entry to the pending list
17013
17014     dsc->pdNext    = impPendingList;
17015     impPendingList = dsc;
17016     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17017
17018     // Various assertions require us to now to consider the block as not imported (at least for
17019     // the final time...)
17020     block->bbFlags &= ~BBF_IMPORTED;
17021
17022 #ifdef DEBUG
17023     if (verbose && 0)
17024     {
17025         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17026     }
17027 #endif
17028 }
17029
17030 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17031 {
17032     if (comp->impBlockListNodeFreeList == nullptr)
17033     {
17034         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17035     }
17036     else
17037     {
17038         BlockListNode* res             = comp->impBlockListNodeFreeList;
17039         comp->impBlockListNodeFreeList = res->m_next;
17040         return res;
17041     }
17042 }
17043
17044 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17045 {
17046     node->m_next             = impBlockListNodeFreeList;
17047     impBlockListNodeFreeList = node;
17048 }
17049
17050 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17051 {
17052     bool toDo = true;
17053
17054     noway_assert(!fgComputePredsDone);
17055     if (!fgCheapPredsValid)
17056     {
17057         fgComputeCheapPreds();
17058     }
17059
17060     BlockListNode* succCliqueToDo = nullptr;
17061     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17062     while (toDo)
17063     {
17064         toDo = false;
17065         // Look at the successors of every member of the predecessor to-do list.
17066         while (predCliqueToDo != nullptr)
17067         {
17068             BlockListNode* node = predCliqueToDo;
17069             predCliqueToDo      = node->m_next;
17070             BasicBlock* blk     = node->m_blk;
17071             FreeBlockListNode(node);
17072
17073             const unsigned numSuccs = blk->NumSucc();
17074             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17075             {
17076                 BasicBlock* succ = blk->GetSucc(succNum);
17077                 // If it's not already in the clique, add it, and also add it
17078                 // as a member of the successor "toDo" set.
17079                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17080                 {
17081                     callback->Visit(SpillCliqueSucc, succ);
17082                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17083                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17084                     toDo           = true;
17085                 }
17086             }
17087         }
17088         // Look at the predecessors of every member of the successor to-do list.
17089         while (succCliqueToDo != nullptr)
17090         {
17091             BlockListNode* node = succCliqueToDo;
17092             succCliqueToDo      = node->m_next;
17093             BasicBlock* blk     = node->m_blk;
17094             FreeBlockListNode(node);
17095
17096             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17097             {
17098                 BasicBlock* predBlock = pred->block;
17099                 // If it's not already in the clique, add it, and also add it
17100                 // as a member of the predecessor "toDo" set.
17101                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17102                 {
17103                     callback->Visit(SpillCliquePred, predBlock);
17104                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17105                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17106                     toDo           = true;
17107                 }
17108             }
17109         }
17110     }
17111
17112     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17113     // miss walking back to include the predecessor we started from.
17114     // This most likely cause: missing or out of date bbPreds
17115     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17116 }
17117
17118 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17119 {
17120     if (predOrSucc == SpillCliqueSucc)
17121     {
17122         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17123         blk->bbStkTempsIn = m_baseTmp;
17124     }
17125     else
17126     {
17127         assert(predOrSucc == SpillCliquePred);
17128         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17129         blk->bbStkTempsOut = m_baseTmp;
17130     }
17131 }
17132
17133 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17134 {
17135     // For Preds we could be a little smarter and just find the existing store
17136     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17137     // just re-import the whole block (just like we do for successors)
17138
17139     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17140     {
17141         // If we haven't imported this block and we're not going to (because it isn't on
17142         // the pending list) then just ignore it for now.
17143
17144         // This block has either never been imported (EntryState == NULL) or it failed
17145         // verification. Neither state requires us to force it to be imported now.
17146         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17147         return;
17148     }
17149
17150     // For successors we have a valid verCurrentState, so just mark them for reimport
17151     // the 'normal' way
17152     // Unlike predecessors, we *DO* need to reimport the current block because the
17153     // initial import had the wrong entry state types.
17154     // Similarly, blocks that are currently on the pending list, still need to call
17155     // impImportBlockPending to fixup their entry state.
17156     if (predOrSucc == SpillCliqueSucc)
17157     {
17158         m_pComp->impReimportMarkBlock(blk);
17159
17160         // Set the current stack state to that of the blk->bbEntryState
17161         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17162         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17163
17164         m_pComp->impImportBlockPending(blk);
17165     }
17166     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17167     {
17168         // As described above, we are only visiting predecessors so they can
17169         // add the appropriate casts, since we have already done that for the current
17170         // block, it does not need to be reimported.
17171         // Nor do we need to reimport blocks that are still pending, but not yet
17172         // imported.
17173         //
17174         // For predecessors, we have no state to seed the EntryState, so we just have
17175         // to assume the existing one is correct.
17176         // If the block is also a successor, it will get the EntryState properly
17177         // updated when it is visited as a successor in the above "if" block.
17178         assert(predOrSucc == SpillCliquePred);
17179         m_pComp->impReimportBlockPending(blk);
17180     }
17181 }
17182
17183 // Re-type the incoming lclVar nodes to match the varDsc.
17184 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17185 {
17186     if (blk->bbEntryState != nullptr)
17187     {
17188         EntryState* es = blk->bbEntryState;
17189         for (unsigned level = 0; level < es->esStackDepth; level++)
17190         {
17191             GenTree* tree = es->esStack[level].val;
17192             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17193             {
17194                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17195                 noway_assert(lclNum < lvaCount);
17196                 LclVarDsc* varDsc              = lvaTable + lclNum;
17197                 es->esStack[level].val->gtType = varDsc->TypeGet();
17198             }
17199         }
17200     }
17201 }
17202
17203 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17204 {
17205     if (block->bbStkTempsOut != NO_BASE_TMP)
17206     {
17207         return block->bbStkTempsOut;
17208     }
17209
17210 #ifdef DEBUG
17211     if (verbose)
17212     {
17213         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17214     }
17215 #endif // DEBUG
17216
17217     // Otherwise, choose one, and propagate to all members of the spill clique.
17218     // Grab enough temps for the whole stack.
17219     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17220     SetSpillTempsBase callback(baseTmp);
17221
17222     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17223     // to one spill clique, and similarly can only be the sucessor to one spill clique
17224     impWalkSpillCliqueFromPred(block, &callback);
17225
17226     return baseTmp;
17227 }
17228
17229 void Compiler::impReimportSpillClique(BasicBlock* block)
17230 {
17231 #ifdef DEBUG
17232     if (verbose)
17233     {
17234         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17235     }
17236 #endif // DEBUG
17237
17238     // If we get here, it is because this block is already part of a spill clique
17239     // and one predecessor had an outgoing live stack slot of type int, and this
17240     // block has an outgoing live stack slot of type native int.
17241     // We need to reset these before traversal because they have already been set
17242     // by the previous walk to determine all the members of the spill clique.
17243     impInlineRoot()->impSpillCliquePredMembers.Reset();
17244     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17245
17246     ReimportSpillClique callback(this);
17247
17248     impWalkSpillCliqueFromPred(block, &callback);
17249 }
17250
17251 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17252 // a copy of "srcState", cloning tree pointers as required.
17253 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17254 {
17255     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17256     {
17257         block->bbEntryState = nullptr;
17258         return;
17259     }
17260
17261     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17262
17263     // block->bbEntryState.esRefcount = 1;
17264
17265     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17266     block->bbEntryState->thisInitialized = TIS_Bottom;
17267
17268     if (srcState->esStackDepth > 0)
17269     {
17270         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17271         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17272
17273         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17274         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17275         {
17276             GenTree* tree                           = srcState->esStack[level].val;
17277             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17278         }
17279     }
17280
17281     if (verTrackObjCtorInitState)
17282     {
17283         verSetThisInit(block, srcState->thisInitialized);
17284     }
17285
17286     return;
17287 }
17288
17289 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17290 {
17291     assert(tis != TIS_Bottom); // Precondition.
17292     if (block->bbEntryState == nullptr)
17293     {
17294         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17295     }
17296
17297     block->bbEntryState->thisInitialized = tis;
17298 }
17299
17300 /*
17301  * Resets the current state to the state at the start of the basic block
17302  */
17303 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17304 {
17305
17306     if (block->bbEntryState == nullptr)
17307     {
17308         destState->esStackDepth    = 0;
17309         destState->thisInitialized = TIS_Bottom;
17310         return;
17311     }
17312
17313     destState->esStackDepth = block->bbEntryState->esStackDepth;
17314
17315     if (destState->esStackDepth > 0)
17316     {
17317         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17318
17319         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17320     }
17321
17322     destState->thisInitialized = block->bbThisOnEntry();
17323
17324     return;
17325 }
17326
17327 ThisInitState BasicBlock::bbThisOnEntry()
17328 {
17329     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17330 }
17331
17332 unsigned BasicBlock::bbStackDepthOnEntry()
17333 {
17334     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17335 }
17336
17337 void BasicBlock::bbSetStack(void* stackBuffer)
17338 {
17339     assert(bbEntryState);
17340     assert(stackBuffer);
17341     bbEntryState->esStack = (StackEntry*)stackBuffer;
17342 }
17343
17344 StackEntry* BasicBlock::bbStackOnEntry()
17345 {
17346     assert(bbEntryState);
17347     return bbEntryState->esStack;
17348 }
17349
17350 void Compiler::verInitCurrentState()
17351 {
17352     verTrackObjCtorInitState        = FALSE;
17353     verCurrentState.thisInitialized = TIS_Bottom;
17354
17355     if (tiVerificationNeeded)
17356     {
17357         // Track this ptr initialization
17358         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17359         {
17360             verTrackObjCtorInitState        = TRUE;
17361             verCurrentState.thisInitialized = TIS_Uninit;
17362         }
17363     }
17364
17365     // initialize stack info
17366
17367     verCurrentState.esStackDepth = 0;
17368     assert(verCurrentState.esStack != nullptr);
17369
17370     // copy current state to entry state of first BB
17371     verInitBBEntryState(fgFirstBB, &verCurrentState);
17372 }
17373
17374 Compiler* Compiler::impInlineRoot()
17375 {
17376     if (impInlineInfo == nullptr)
17377     {
17378         return this;
17379     }
17380     else
17381     {
17382         return impInlineInfo->InlineRoot;
17383     }
17384 }
17385
17386 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17387 {
17388     if (predOrSucc == SpillCliquePred)
17389     {
17390         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17391     }
17392     else
17393     {
17394         assert(predOrSucc == SpillCliqueSucc);
17395         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17396     }
17397 }
17398
17399 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17400 {
17401     if (predOrSucc == SpillCliquePred)
17402     {
17403         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17404     }
17405     else
17406     {
17407         assert(predOrSucc == SpillCliqueSucc);
17408         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17409     }
17410 }
17411
17412 /*****************************************************************************
17413  *
17414  *  Convert the instrs ("import") into our internal format (trees). The
17415  *  basic flowgraph has already been constructed and is passed in.
17416  */
17417
17418 void Compiler::impImport(BasicBlock* method)
17419 {
17420 #ifdef DEBUG
17421     if (verbose)
17422     {
17423         printf("*************** In impImport() for %s\n", info.compFullName);
17424     }
17425 #endif
17426
17427     /* Allocate the stack contents */
17428
17429     if (info.compMaxStack <= _countof(impSmallStack))
17430     {
17431         /* Use local variable, don't waste time allocating on the heap */
17432
17433         impStkSize              = _countof(impSmallStack);
17434         verCurrentState.esStack = impSmallStack;
17435     }
17436     else
17437     {
17438         impStkSize              = info.compMaxStack;
17439         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17440     }
17441
17442     // initialize the entry state at start of method
17443     verInitCurrentState();
17444
17445     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17446     Compiler* inlineRoot = impInlineRoot();
17447     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17448     {
17449         // We have initialized these previously, but to size 0.  Make them larger.
17450         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17451         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17452         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17453     }
17454     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17455     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17456     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17457     impBlockListNodeFreeList = nullptr;
17458
17459 #ifdef DEBUG
17460     impLastILoffsStmt   = nullptr;
17461     impNestedStackSpill = false;
17462 #endif
17463     impBoxTemp = BAD_VAR_NUM;
17464
17465     impPendingList = impPendingFree = nullptr;
17466
17467     /* Add the entry-point to the worker-list */
17468
17469     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17470     // from EH normalization.
17471     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17472     // out.
17473     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17474     {
17475         // Treat these as imported.
17476         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17477         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17478         method->bbFlags |= BBF_IMPORTED;
17479     }
17480
17481     impImportBlockPending(method);
17482
17483     /* Import blocks in the worker-list until there are no more */
17484
17485     while (impPendingList)
17486     {
17487         /* Remove the entry at the front of the list */
17488
17489         PendingDsc* dsc = impPendingList;
17490         impPendingList  = impPendingList->pdNext;
17491         impSetPendingBlockMember(dsc->pdBB, 0);
17492
17493         /* Restore the stack state */
17494
17495         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17496         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17497         if (verCurrentState.esStackDepth)
17498         {
17499             impRestoreStackState(&dsc->pdSavedStack);
17500         }
17501
17502         /* Add the entry to the free list for reuse */
17503
17504         dsc->pdNext    = impPendingFree;
17505         impPendingFree = dsc;
17506
17507         /* Now import the block */
17508
17509         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17510         {
17511
17512 #ifdef _TARGET_64BIT_
17513             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17514             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17515             // method for further explanation on why we raise this exception instead of making the jitted
17516             // code throw the verification exception during execution.
17517             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17518             {
17519                 BADCODE("Basic block marked as not verifiable");
17520             }
17521             else
17522 #endif // _TARGET_64BIT_
17523             {
17524                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17525                 impEndTreeList(dsc->pdBB);
17526             }
17527         }
17528         else
17529         {
17530             impImportBlock(dsc->pdBB);
17531
17532             if (compDonotInline())
17533             {
17534                 return;
17535             }
17536             if (compIsForImportOnly() && !tiVerificationNeeded)
17537             {
17538                 return;
17539             }
17540         }
17541     }
17542
17543 #ifdef DEBUG
17544     if (verbose && info.compXcptnsCount)
17545     {
17546         printf("\nAfter impImport() added block for try,catch,finally");
17547         fgDispBasicBlocks();
17548         printf("\n");
17549     }
17550
17551     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17552     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17553     {
17554         block->bbFlags &= ~BBF_VISITED;
17555     }
17556 #endif
17557
17558     assert(!compIsForInlining() || !tiVerificationNeeded);
17559 }
17560
17561 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17562 // The invariant here is that if it's not a ref or a method and has a class handle
17563 // it's a valuetype
17564 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17565 {
17566     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17567     {
17568         return true;
17569     }
17570     else
17571     {
17572         return false;
17573     }
17574 }
17575
17576 /*****************************************************************************
17577  *  Check to see if the tree is the address of a local or
17578     the address of a field in a local.
17579
17580     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17581
17582  */
17583
17584 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17585 {
17586     if (tree->gtOper != GT_ADDR)
17587     {
17588         return FALSE;
17589     }
17590
17591     GenTree* op = tree->gtOp.gtOp1;
17592     while (op->gtOper == GT_FIELD)
17593     {
17594         op = op->gtField.gtFldObj;
17595         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17596         {
17597             op = op->gtOp.gtOp1;
17598         }
17599         else
17600         {
17601             return false;
17602         }
17603     }
17604
17605     if (op->gtOper == GT_LCL_VAR)
17606     {
17607         *lclVarTreeOut = op;
17608         return TRUE;
17609     }
17610     else
17611     {
17612         return FALSE;
17613     }
17614 }
17615
17616 //------------------------------------------------------------------------
17617 // impMakeDiscretionaryInlineObservations: make observations that help
17618 // determine the profitability of a discretionary inline
17619 //
17620 // Arguments:
17621 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17622 //    inlineResult -- InlineResult accumulating information about this inline
17623 //
17624 // Notes:
17625 //    If inlining or prejitting the root, this method also makes
17626 //    various observations about the method that factor into inline
17627 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17628
17629 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17630 {
17631     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17632            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17633            );
17634
17635     // If we're really inlining, we should just have one result in play.
17636     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17637
17638     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17639     // to the trouble of estimating the native code size. Even if it did, it
17640     // shouldn't be relying on the result of this method.
17641     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17642
17643     // Note if the caller contains NEWOBJ or NEWARR.
17644     Compiler* rootCompiler = impInlineRoot();
17645
17646     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17647     {
17648         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17649     }
17650
17651     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17652     {
17653         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17654     }
17655
17656     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17657     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17658
17659     if (isSpecialMethod)
17660     {
17661         if (calleeIsStatic)
17662         {
17663             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17664         }
17665         else
17666         {
17667             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17668         }
17669     }
17670     else if (!calleeIsStatic)
17671     {
17672         // Callee is an instance method.
17673         //
17674         // Check if the callee has the same 'this' as the root.
17675         if (pInlineInfo != nullptr)
17676         {
17677             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17678             assert(thisArg);
17679             bool isSameThis = impIsThis(thisArg);
17680             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17681         }
17682     }
17683
17684     // Note if the callee's class is a promotable struct
17685     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17686     {
17687         lvaStructPromotionInfo structPromotionInfo;
17688         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17689         if (structPromotionInfo.canPromote)
17690         {
17691             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17692         }
17693     }
17694
17695 #ifdef FEATURE_SIMD
17696
17697     // Note if this method is has SIMD args or return value
17698     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17699     {
17700         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17701     }
17702
17703 #endif // FEATURE_SIMD
17704
17705     // Roughly classify callsite frequency.
17706     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17707
17708     // If this is a prejit root, or a maximally hot block...
17709     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17710     {
17711         frequency = InlineCallsiteFrequency::HOT;
17712     }
17713     // No training data.  Look for loop-like things.
17714     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17715     // However, give it to things nearby.
17716     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17717              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17718     {
17719         frequency = InlineCallsiteFrequency::LOOP;
17720     }
17721     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17722     {
17723         frequency = InlineCallsiteFrequency::WARM;
17724     }
17725     // Now modify the multiplier based on where we're called from.
17726     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17727     {
17728         frequency = InlineCallsiteFrequency::RARE;
17729     }
17730     else
17731     {
17732         frequency = InlineCallsiteFrequency::BORING;
17733     }
17734
17735     // Also capture the block weight of the call site.  In the prejit
17736     // root case, assume there's some hot call site for this method.
17737     unsigned weight = 0;
17738
17739     if (pInlineInfo != nullptr)
17740     {
17741         weight = pInlineInfo->iciBlock->bbWeight;
17742     }
17743     else
17744     {
17745         weight = BB_MAX_WEIGHT;
17746     }
17747
17748     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17749     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17750 }
17751
17752 /*****************************************************************************
17753  This method makes STATIC inlining decision based on the IL code.
17754  It should not make any inlining decision based on the context.
17755  If forceInline is true, then the inlining decision should not depend on
17756  performance heuristics (code size, etc.).
17757  */
17758
17759 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17760                               CORINFO_METHOD_INFO*  methInfo,
17761                               bool                  forceInline,
17762                               InlineResult*         inlineResult)
17763 {
17764     unsigned codeSize = methInfo->ILCodeSize;
17765
17766     // We shouldn't have made up our minds yet...
17767     assert(!inlineResult->IsDecided());
17768
17769     if (methInfo->EHcount)
17770     {
17771         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17772         return;
17773     }
17774
17775     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17776     {
17777         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17778         return;
17779     }
17780
17781     // For now we don't inline varargs (import code can't handle it)
17782
17783     if (methInfo->args.isVarArg())
17784     {
17785         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17786         return;
17787     }
17788
17789     // Reject if it has too many locals.
17790     // This is currently an implementation limit due to fixed-size arrays in the
17791     // inline info, rather than a performance heuristic.
17792
17793     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17794
17795     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17796     {
17797         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17798         return;
17799     }
17800
17801     // Make sure there aren't too many arguments.
17802     // This is currently an implementation limit due to fixed-size arrays in the
17803     // inline info, rather than a performance heuristic.
17804
17805     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17806
17807     if (methInfo->args.numArgs > MAX_INL_ARGS)
17808     {
17809         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17810         return;
17811     }
17812
17813     // Note force inline state
17814
17815     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17816
17817     // Note IL code size
17818
17819     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17820
17821     if (inlineResult->IsFailure())
17822     {
17823         return;
17824     }
17825
17826     // Make sure maxstack is not too big
17827
17828     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17829
17830     if (inlineResult->IsFailure())
17831     {
17832         return;
17833     }
17834 }
17835
17836 /*****************************************************************************
17837  */
17838
17839 void Compiler::impCheckCanInline(GenTree*               call,
17840                                  CORINFO_METHOD_HANDLE  fncHandle,
17841                                  unsigned               methAttr,
17842                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17843                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17844                                  InlineResult*          inlineResult)
17845 {
17846     // Either EE or JIT might throw exceptions below.
17847     // If that happens, just don't inline the method.
17848
17849     struct Param
17850     {
17851         Compiler*              pThis;
17852         GenTree*               call;
17853         CORINFO_METHOD_HANDLE  fncHandle;
17854         unsigned               methAttr;
17855         CORINFO_CONTEXT_HANDLE exactContextHnd;
17856         InlineResult*          result;
17857         InlineCandidateInfo**  ppInlineCandidateInfo;
17858     } param;
17859     memset(&param, 0, sizeof(param));
17860
17861     param.pThis                 = this;
17862     param.call                  = call;
17863     param.fncHandle             = fncHandle;
17864     param.methAttr              = methAttr;
17865     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17866     param.result                = inlineResult;
17867     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17868
17869     bool success = eeRunWithErrorTrap<Param>(
17870         [](Param* pParam) {
17871             DWORD                  dwRestrictions = 0;
17872             CorInfoInitClassResult initClassResult;
17873
17874 #ifdef DEBUG
17875             const char* methodName;
17876             const char* className;
17877             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17878
17879             if (JitConfig.JitNoInline())
17880             {
17881                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17882                 goto _exit;
17883             }
17884 #endif
17885
17886             /* Try to get the code address/size for the method */
17887
17888             CORINFO_METHOD_INFO methInfo;
17889             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17890             {
17891                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17892                 goto _exit;
17893             }
17894
17895             bool forceInline;
17896             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17897
17898             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17899
17900             if (pParam->result->IsFailure())
17901             {
17902                 assert(pParam->result->IsNever());
17903                 goto _exit;
17904             }
17905
17906             // Speculatively check if initClass() can be done.
17907             // If it can be done, we will try to inline the method. If inlining
17908             // succeeds, then we will do the non-speculative initClass() and commit it.
17909             // If this speculative call to initClass() fails, there is no point
17910             // trying to inline this method.
17911             initClassResult =
17912                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17913                                                            pParam->exactContextHnd /* context */,
17914                                                            TRUE /* speculative */);
17915
17916             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17917             {
17918                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17919                 goto _exit;
17920             }
17921
17922             // Given the EE the final say in whether to inline or not.
17923             // This should be last since for verifiable code, this can be expensive
17924
17925             /* VM Inline check also ensures that the method is verifiable if needed */
17926             CorInfoInline vmResult;
17927             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17928                                                                   &dwRestrictions);
17929
17930             if (vmResult == INLINE_FAIL)
17931             {
17932                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17933             }
17934             else if (vmResult == INLINE_NEVER)
17935             {
17936                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17937             }
17938
17939             if (pParam->result->IsFailure())
17940             {
17941                 // Make sure not to report this one.  It was already reported by the VM.
17942                 pParam->result->SetReported();
17943                 goto _exit;
17944             }
17945
17946             // check for unsupported inlining restrictions
17947             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17948
17949             if (dwRestrictions & INLINE_SAME_THIS)
17950             {
17951                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17952                 assert(thisArg);
17953
17954                 if (!pParam->pThis->impIsThis(thisArg))
17955                 {
17956                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17957                     goto _exit;
17958                 }
17959             }
17960
17961             /* Get the method properties */
17962
17963             CORINFO_CLASS_HANDLE clsHandle;
17964             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17965             unsigned clsAttr;
17966             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17967
17968             /* Get the return type */
17969
17970             var_types fncRetType;
17971             fncRetType = pParam->call->TypeGet();
17972
17973 #ifdef DEBUG
17974             var_types fncRealRetType;
17975             fncRealRetType = JITtype2varType(methInfo.args.retType);
17976
17977             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17978                    // <BUGNUM> VSW 288602 </BUGNUM>
17979                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17980                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17981                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17982 #endif
17983
17984             //
17985             // Allocate an InlineCandidateInfo structure
17986             //
17987             InlineCandidateInfo* pInfo;
17988             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17989
17990             pInfo->dwRestrictions  = dwRestrictions;
17991             pInfo->methInfo        = methInfo;
17992             pInfo->methAttr        = pParam->methAttr;
17993             pInfo->clsHandle       = clsHandle;
17994             pInfo->clsAttr         = clsAttr;
17995             pInfo->fncRetType      = fncRetType;
17996             pInfo->exactContextHnd = pParam->exactContextHnd;
17997             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17998             pInfo->initClassResult = initClassResult;
17999
18000             *(pParam->ppInlineCandidateInfo) = pInfo;
18001
18002         _exit:;
18003         },
18004         &param);
18005     if (!success)
18006     {
18007         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18008     }
18009 }
18010
18011 //------------------------------------------------------------------------
18012 // impInlineRecordArgInfo: record information about an inline candidate argument
18013 //
18014 // Arguments:
18015 //   pInlineInfo - inline info for the inline candidate
18016 //   curArgVal - tree for the caller actual argument value
18017 //   argNum - logical index of this argument
18018 //   inlineResult - result of ongoing inline evaluation
18019 //
18020 // Notes:
18021 //
18022 //   Checks for various inline blocking conditions and makes notes in
18023 //   the inline info arg table about the properties of the actual. These
18024 //   properties are used later by impFetchArg to determine how best to
18025 //   pass the argument into the inlinee.
18026
18027 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18028                                       GenTree*      curArgVal,
18029                                       unsigned      argNum,
18030                                       InlineResult* inlineResult)
18031 {
18032     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18033
18034     if (curArgVal->gtOper == GT_MKREFANY)
18035     {
18036         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18037         return;
18038     }
18039
18040     inlCurArgInfo->argNode = curArgVal;
18041
18042     GenTree* lclVarTree;
18043     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18044     {
18045         inlCurArgInfo->argIsByRefToStructLocal = true;
18046 #ifdef FEATURE_SIMD
18047         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18048         {
18049             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18050         }
18051 #endif // FEATURE_SIMD
18052     }
18053
18054     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18055     {
18056         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18057         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18058     }
18059
18060     if (curArgVal->gtOper == GT_LCL_VAR)
18061     {
18062         inlCurArgInfo->argIsLclVar = true;
18063
18064         /* Remember the "original" argument number */
18065         curArgVal->gtLclVar.gtLclILoffs = argNum;
18066     }
18067
18068     if ((curArgVal->OperKind() & GTK_CONST) ||
18069         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18070     {
18071         inlCurArgInfo->argIsInvariant = true;
18072         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18073         {
18074             // Abort inlining at this call site
18075             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18076             return;
18077         }
18078     }
18079
18080     // If the arg is a local that is address-taken, we can't safely
18081     // directly substitute it into the inlinee.
18082     //
18083     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18084     // that has a stronger meaning: that the arg value can change in
18085     // the method body. Using that flag prevents type propagation,
18086     // which is safe in this case.
18087     //
18088     // Instead mark the arg as having a caller local ref.
18089     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18090     {
18091         inlCurArgInfo->argHasCallerLocalRef = true;
18092     }
18093
18094 #ifdef DEBUG
18095     if (verbose)
18096     {
18097         if (inlCurArgInfo->argIsThis)
18098         {
18099             printf("thisArg:");
18100         }
18101         else
18102         {
18103             printf("\nArgument #%u:", argNum);
18104         }
18105         if (inlCurArgInfo->argIsLclVar)
18106         {
18107             printf(" is a local var");
18108         }
18109         if (inlCurArgInfo->argIsInvariant)
18110         {
18111             printf(" is a constant");
18112         }
18113         if (inlCurArgInfo->argHasGlobRef)
18114         {
18115             printf(" has global refs");
18116         }
18117         if (inlCurArgInfo->argHasCallerLocalRef)
18118         {
18119             printf(" has caller local ref");
18120         }
18121         if (inlCurArgInfo->argHasSideEff)
18122         {
18123             printf(" has side effects");
18124         }
18125         if (inlCurArgInfo->argHasLdargaOp)
18126         {
18127             printf(" has ldarga effect");
18128         }
18129         if (inlCurArgInfo->argHasStargOp)
18130         {
18131             printf(" has starg effect");
18132         }
18133         if (inlCurArgInfo->argIsByRefToStructLocal)
18134         {
18135             printf(" is byref to a struct local");
18136         }
18137
18138         printf("\n");
18139         gtDispTree(curArgVal);
18140         printf("\n");
18141     }
18142 #endif
18143 }
18144
18145 //------------------------------------------------------------------------
18146 // impInlineInitVars: setup inline information for inlinee args and locals
18147 //
18148 // Arguments:
18149 //    pInlineInfo - inline info for the inline candidate
18150 //
18151 // Notes:
18152 //    This method primarily adds caller-supplied info to the inlArgInfo
18153 //    and sets up the lclVarInfo table.
18154 //
18155 //    For args, the inlArgInfo records properties of the actual argument
18156 //    including the tree node that produces the arg value. This node is
18157 //    usually the tree node present at the call, but may also differ in
18158 //    various ways:
18159 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18160 //      expr chain for the actual node. Note this will either be the original
18161 //      call (which will be a failed inline by this point), or the return
18162 //      expression from some set of inlines.
18163 //    - when argument type casting is needed the necessary casts are added
18164 //      around the argument node.
18165 //    - if an argment can be simplified by folding then the node here is the
18166 //      folded value.
18167 //
18168 //   The method may make observations that lead to marking this candidate as
18169 //   a failed inline. If this happens the initialization is abandoned immediately
18170 //   to try and reduce the jit time cost for a failed inline.
18171
18172 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18173 {
18174     assert(!compIsForInlining());
18175
18176     GenTree*             call         = pInlineInfo->iciCall;
18177     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18178     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18179     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18180     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18181     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18182
18183     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18184
18185     /* init the argument stuct */
18186
18187     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18188
18189     /* Get hold of the 'this' pointer and the argument list proper */
18190
18191     GenTree* thisArg = call->gtCall.gtCallObjp;
18192     GenTree* argList = call->gtCall.gtCallArgs;
18193     unsigned argCnt  = 0; // Count of the arguments
18194
18195     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18196
18197     if (thisArg)
18198     {
18199         inlArgInfo[0].argIsThis = true;
18200         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18201         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18202
18203         if (inlineResult->IsFailure())
18204         {
18205             return;
18206         }
18207
18208         /* Increment the argument count */
18209         argCnt++;
18210     }
18211
18212     /* Record some information about each of the arguments */
18213     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18214
18215 #if USER_ARGS_COME_LAST
18216     unsigned typeCtxtArg = thisArg ? 1 : 0;
18217 #else  // USER_ARGS_COME_LAST
18218     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18219 #endif // USER_ARGS_COME_LAST
18220
18221     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18222     {
18223         if (argTmp == argList && hasRetBuffArg)
18224         {
18225             continue;
18226         }
18227
18228         // Ignore the type context argument
18229         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18230         {
18231             pInlineInfo->typeContextArg = typeCtxtArg;
18232             typeCtxtArg                 = 0xFFFFFFFF;
18233             continue;
18234         }
18235
18236         assert(argTmp->gtOper == GT_LIST);
18237         GenTree* arg       = argTmp->gtOp.gtOp1;
18238         GenTree* actualArg = arg->gtRetExprVal();
18239         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18240
18241         if (inlineResult->IsFailure())
18242         {
18243             return;
18244         }
18245
18246         /* Increment the argument count */
18247         argCnt++;
18248     }
18249
18250     /* Make sure we got the arg number right */
18251     assert(argCnt == methInfo->args.totalILArgs());
18252
18253 #ifdef FEATURE_SIMD
18254     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18255 #endif // FEATURE_SIMD
18256
18257     /* We have typeless opcodes, get type information from the signature */
18258
18259     if (thisArg)
18260     {
18261         var_types sigType;
18262
18263         if (clsAttr & CORINFO_FLG_VALUECLASS)
18264         {
18265             sigType = TYP_BYREF;
18266         }
18267         else
18268         {
18269             sigType = TYP_REF;
18270         }
18271
18272         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18273         lclVarInfo[0].lclHasLdlocaOp = false;
18274
18275 #ifdef FEATURE_SIMD
18276         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18277         // the inlining multiplier) for anything in that assembly.
18278         // But we only need to normalize it if it is a TYP_STRUCT
18279         // (which we need to do even if we have already set foundSIMDType).
18280         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18281         {
18282             if (sigType == TYP_STRUCT)
18283             {
18284                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18285             }
18286             foundSIMDType = true;
18287         }
18288 #endif // FEATURE_SIMD
18289         lclVarInfo[0].lclTypeInfo = sigType;
18290
18291         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18292                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18293                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18294
18295         if (genActualType(thisArg->gtType) != genActualType(sigType))
18296         {
18297             if (sigType == TYP_REF)
18298             {
18299                 /* The argument cannot be bashed into a ref (see bug 750871) */
18300                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18301                 return;
18302             }
18303
18304             /* This can only happen with byrefs <-> ints/shorts */
18305
18306             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18307             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18308
18309             if (sigType == TYP_BYREF)
18310             {
18311                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18312             }
18313             else if (thisArg->gtType == TYP_BYREF)
18314             {
18315                 assert(sigType == TYP_I_IMPL);
18316
18317                 /* If possible change the BYREF to an int */
18318                 if (thisArg->IsVarAddr())
18319                 {
18320                     thisArg->gtType              = TYP_I_IMPL;
18321                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18322                 }
18323                 else
18324                 {
18325                     /* Arguments 'int <- byref' cannot be bashed */
18326                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18327                     return;
18328                 }
18329             }
18330         }
18331     }
18332
18333     /* Init the types of the arguments and make sure the types
18334      * from the trees match the types in the signature */
18335
18336     CORINFO_ARG_LIST_HANDLE argLst;
18337     argLst = methInfo->args.args;
18338
18339     unsigned i;
18340     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18341     {
18342         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18343
18344         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18345
18346 #ifdef FEATURE_SIMD
18347         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18348         {
18349             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18350             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18351             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18352             foundSIMDType = true;
18353             if (sigType == TYP_STRUCT)
18354             {
18355                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18356                 sigType              = structType;
18357             }
18358         }
18359 #endif // FEATURE_SIMD
18360
18361         lclVarInfo[i].lclTypeInfo    = sigType;
18362         lclVarInfo[i].lclHasLdlocaOp = false;
18363
18364         /* Does the tree type match the signature type? */
18365
18366         GenTree* inlArgNode = inlArgInfo[i].argNode;
18367
18368         if (sigType != inlArgNode->gtType)
18369         {
18370             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18371                but in bad IL cases with caller-callee signature mismatches we can see other types.
18372                Intentionally reject cases with mismatches so the jit is more flexible when
18373                encountering bad IL. */
18374
18375             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18376                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18377                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18378
18379             if (!isPlausibleTypeMatch)
18380             {
18381                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18382                 return;
18383             }
18384
18385             /* Is it a narrowing or widening cast?
18386              * Widening casts are ok since the value computed is already
18387              * normalized to an int (on the IL stack) */
18388
18389             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18390             {
18391                 if (sigType == TYP_BYREF)
18392                 {
18393                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18394                 }
18395                 else if (inlArgNode->gtType == TYP_BYREF)
18396                 {
18397                     assert(varTypeIsIntOrI(sigType));
18398
18399                     /* If possible bash the BYREF to an int */
18400                     if (inlArgNode->IsVarAddr())
18401                     {
18402                         inlArgNode->gtType           = TYP_I_IMPL;
18403                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18404                     }
18405                     else
18406                     {
18407                         /* Arguments 'int <- byref' cannot be changed */
18408                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18409                         return;
18410                     }
18411                 }
18412                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18413                 {
18414                     /* Narrowing cast */
18415
18416                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18417                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18418                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18419                     {
18420                         /* We don't need to insert a cast here as the variable
18421                            was assigned a normalized value of the right type */
18422
18423                         continue;
18424                     }
18425
18426                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
18427
18428                     inlArgInfo[i].argIsLclVar = false;
18429
18430                     /* Try to fold the node in case we have constant arguments */
18431
18432                     if (inlArgInfo[i].argIsInvariant)
18433                     {
18434                         inlArgNode            = gtFoldExprConst(inlArgNode);
18435                         inlArgInfo[i].argNode = inlArgNode;
18436                         assert(inlArgNode->OperIsConst());
18437                     }
18438                 }
18439 #ifdef _TARGET_64BIT_
18440                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18441                 {
18442                     // This should only happen for int -> native int widening
18443                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
18444
18445                     inlArgInfo[i].argIsLclVar = false;
18446
18447                     /* Try to fold the node in case we have constant arguments */
18448
18449                     if (inlArgInfo[i].argIsInvariant)
18450                     {
18451                         inlArgNode            = gtFoldExprConst(inlArgNode);
18452                         inlArgInfo[i].argNode = inlArgNode;
18453                         assert(inlArgNode->OperIsConst());
18454                     }
18455                 }
18456 #endif // _TARGET_64BIT_
18457             }
18458         }
18459     }
18460
18461     /* Init the types of the local variables */
18462
18463     CORINFO_ARG_LIST_HANDLE localsSig;
18464     localsSig = methInfo->locals.args;
18465
18466     for (i = 0; i < methInfo->locals.numArgs; i++)
18467     {
18468         bool      isPinned;
18469         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18470
18471         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18472         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18473         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18474
18475         if (varTypeIsGC(type))
18476         {
18477             pInlineInfo->numberOfGcRefLocals++;
18478         }
18479
18480         if (isPinned)
18481         {
18482             // Pinned locals may cause inlines to fail.
18483             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18484             if (inlineResult->IsFailure())
18485             {
18486                 return;
18487             }
18488         }
18489
18490         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18491
18492         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18493         // out on the inline.
18494         if (type == TYP_STRUCT)
18495         {
18496             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18497             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18498             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18499             {
18500                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18501                 if (inlineResult->IsFailure())
18502                 {
18503                     return;
18504                 }
18505
18506                 // Do further notification in the case where the call site is rare; some policies do
18507                 // not track the relative hotness of call sites for "always" inline cases.
18508                 if (pInlineInfo->iciBlock->isRunRarely())
18509                 {
18510                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18511                     if (inlineResult->IsFailure())
18512                     {
18513
18514                         return;
18515                     }
18516                 }
18517             }
18518         }
18519
18520         localsSig = info.compCompHnd->getArgNext(localsSig);
18521
18522 #ifdef FEATURE_SIMD
18523         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18524         {
18525             foundSIMDType = true;
18526             if (featureSIMD && type == TYP_STRUCT)
18527             {
18528                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18529                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18530             }
18531         }
18532 #endif // FEATURE_SIMD
18533     }
18534
18535 #ifdef FEATURE_SIMD
18536     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
18537     {
18538         foundSIMDType = true;
18539     }
18540     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18541 #endif // FEATURE_SIMD
18542 }
18543
18544 //------------------------------------------------------------------------
18545 // impInlineFetchLocal: get a local var that represents an inlinee local
18546 //
18547 // Arguments:
18548 //    lclNum -- number of the inlinee local
18549 //    reason -- debug string describing purpose of the local var
18550 //
18551 // Returns:
18552 //    Number of the local to use
18553 //
18554 // Notes:
18555 //    This method is invoked only for locals actually used in the
18556 //    inlinee body.
18557 //
18558 //    Allocates a new temp if necessary, and copies key properties
18559 //    over from the inlinee local var info.
18560
18561 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18562 {
18563     assert(compIsForInlining());
18564
18565     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18566
18567     if (tmpNum == BAD_VAR_NUM)
18568     {
18569         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18570         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18571
18572         // The lifetime of this local might span multiple BBs.
18573         // So it is a long lifetime local.
18574         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18575
18576         // Copy over key info
18577         lvaTable[tmpNum].lvType                 = lclTyp;
18578         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18579         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18580         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18581         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18582
18583         // Copy over class handle for ref types. Note this may be a
18584         // shared type -- someday perhaps we can get the exact
18585         // signature and pass in a more precise type.
18586         if (lclTyp == TYP_REF)
18587         {
18588             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18589         }
18590
18591         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18592         {
18593             if (varTypeIsStruct(lclTyp))
18594             {
18595                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18596             }
18597             else
18598             {
18599                 // This is a wrapped primitive.  Make sure the verstate knows that
18600                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18601             }
18602         }
18603
18604 #ifdef DEBUG
18605         // Sanity check that we're properly prepared for gc ref locals.
18606         if (varTypeIsGC(lclTyp))
18607         {
18608             // Since there are gc locals we should have seen them earlier
18609             // and if there was a return value, set up the spill temp.
18610             assert(impInlineInfo->HasGcRefLocals());
18611             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18612         }
18613         else
18614         {
18615             // Make sure all pinned locals count as gc refs.
18616             assert(!inlineeLocal.lclIsPinned);
18617         }
18618 #endif // DEBUG
18619     }
18620
18621     return tmpNum;
18622 }
18623
18624 //------------------------------------------------------------------------
18625 // impInlineFetchArg: return tree node for argument value in an inlinee
18626 //
18627 // Arguments:
18628 //    lclNum -- argument number in inlinee IL
18629 //    inlArgInfo -- argument info for inlinee
18630 //    lclVarInfo -- var info for inlinee
18631 //
18632 // Returns:
18633 //    Tree for the argument's value. Often an inlinee-scoped temp
18634 //    GT_LCL_VAR but can be other tree kinds, if the argument
18635 //    expression from the caller can be directly substituted into the
18636 //    inlinee body.
18637 //
18638 // Notes:
18639 //    Must be used only for arguments -- use impInlineFetchLocal for
18640 //    inlinee locals.
18641 //
18642 //    Direct substitution is performed when the formal argument cannot
18643 //    change value in the inlinee body (no starg or ldarga), and the
18644 //    actual argument expression's value cannot be changed if it is
18645 //    substituted it into the inlinee body.
18646 //
18647 //    Even if an inlinee-scoped temp is returned here, it may later be
18648 //    "bashed" to a caller-supplied tree when arguments are actually
18649 //    passed (see fgInlinePrependStatements). Bashing can happen if
18650 //    the argument ends up being single use and other conditions are
18651 //    met. So the contents of the tree returned here may not end up
18652 //    being the ones ultimately used for the argument.
18653 //
18654 //    This method will side effect inlArgInfo. It should only be called
18655 //    for actual uses of the argument in the inlinee.
18656
18657 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18658 {
18659     // Cache the relevant arg and lcl info for this argument.
18660     // We will modify argInfo but not lclVarInfo.
18661     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18662     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18663     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18664     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18665     GenTree*             op1              = nullptr;
18666
18667     if (argInfo.argIsInvariant && !argCanBeModified)
18668     {
18669         // Directly substitute constants or addresses of locals
18670         //
18671         // Clone the constant. Note that we cannot directly use
18672         // argNode in the trees even if !argInfo.argIsUsed as this
18673         // would introduce aliasing between inlArgInfo[].argNode and
18674         // impInlineExpr. Then gtFoldExpr() could change it, causing
18675         // further references to the argument working off of the
18676         // bashed copy.
18677         op1 = gtCloneExpr(argInfo.argNode);
18678         PREFIX_ASSUME(op1 != nullptr);
18679         argInfo.argTmpNum = BAD_VAR_NUM;
18680     }
18681     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18682     {
18683         // Directly substitute unaliased caller locals for args that cannot be modified
18684         //
18685         // Use the caller-supplied node if this is the first use.
18686         op1               = argInfo.argNode;
18687         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18688
18689         // Use an equivalent copy if this is the second or subsequent
18690         // use, or if we need to retype.
18691         //
18692         // Note argument type mismatches that prevent inlining should
18693         // have been caught in impInlineInitVars.
18694         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18695         {
18696             assert(op1->gtOper == GT_LCL_VAR);
18697             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18698
18699             var_types newTyp = lclTyp;
18700
18701             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18702             {
18703                 newTyp = genActualType(lclTyp);
18704             }
18705
18706             // Create a new lcl var node - remember the argument lclNum
18707             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18708         }
18709     }
18710     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18711     {
18712         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18713            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18714            This way we will increase the chance for this byref to be optimized away by
18715            a subsequent "dereference" operation.
18716
18717            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18718            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18719            For example, if the caller is:
18720                 ldloca.s   V_1  // V_1 is a local struct
18721                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18722            and the callee being inlined has:
18723                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18724                     ldarga.s   ptrToInts
18725                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18726            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18727            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18728         */
18729         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18730         op1 = gtCloneExpr(argInfo.argNode);
18731     }
18732     else
18733     {
18734         /* Argument is a complex expression - it must be evaluated into a temp */
18735
18736         if (argInfo.argHasTmp)
18737         {
18738             assert(argInfo.argIsUsed);
18739             assert(argInfo.argTmpNum < lvaCount);
18740
18741             /* Create a new lcl var node - remember the argument lclNum */
18742             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18743
18744             /* This is the second or later use of the this argument,
18745             so we have to use the temp (instead of the actual arg) */
18746             argInfo.argBashTmpNode = nullptr;
18747         }
18748         else
18749         {
18750             /* First time use */
18751             assert(!argInfo.argIsUsed);
18752
18753             /* Reserve a temp for the expression.
18754             * Use a large size node as we may change it later */
18755
18756             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18757
18758             lvaTable[tmpNum].lvType = lclTyp;
18759
18760             // For ref types, determine the type of the temp.
18761             if (lclTyp == TYP_REF)
18762             {
18763                 if (!argCanBeModified)
18764                 {
18765                     // If the arg can't be modified in the method
18766                     // body, use the type of the value, if
18767                     // known. Otherwise, use the declared type.
18768                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18769                 }
18770                 else
18771                 {
18772                     // Arg might be modified, use the declared type of
18773                     // the argument.
18774                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18775                 }
18776             }
18777
18778             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18779             if (argInfo.argHasLdargaOp)
18780             {
18781                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18782             }
18783
18784             if (lclInfo.lclVerTypeInfo.IsStruct())
18785             {
18786                 if (varTypeIsStruct(lclTyp))
18787                 {
18788                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18789                 }
18790                 else
18791                 {
18792                     // This is a wrapped primitive.  Make sure the verstate knows that
18793                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18794                 }
18795             }
18796
18797             argInfo.argHasTmp = true;
18798             argInfo.argTmpNum = tmpNum;
18799
18800             // If we require strict exception order, then arguments must
18801             // be evaluated in sequence before the body of the inlined method.
18802             // So we need to evaluate them to a temp.
18803             // Also, if arguments have global or local references, we need to
18804             // evaluate them to a temp before the inlined body as the
18805             // inlined body may be modifying the global ref.
18806             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18807             // if it is a struct, because it requires some additional handling.
18808
18809             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18810                 !argInfo.argHasCallerLocalRef)
18811             {
18812                 /* Get a *LARGE* LCL_VAR node */
18813                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18814
18815                 /* Record op1 as the very first use of this argument.
18816                 If there are no further uses of the arg, we may be
18817                 able to use the actual arg node instead of the temp.
18818                 If we do see any further uses, we will clear this. */
18819                 argInfo.argBashTmpNode = op1;
18820             }
18821             else
18822             {
18823                 /* Get a small LCL_VAR node */
18824                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18825                 /* No bashing of this argument */
18826                 argInfo.argBashTmpNode = nullptr;
18827             }
18828         }
18829     }
18830
18831     // Mark this argument as used.
18832     argInfo.argIsUsed = true;
18833
18834     return op1;
18835 }
18836
18837 /******************************************************************************
18838  Is this the original "this" argument to the call being inlined?
18839
18840  Note that we do not inline methods with "starg 0", and so we do not need to
18841  worry about it.
18842 */
18843
18844 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18845 {
18846     assert(compIsForInlining());
18847     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18848 }
18849
18850 //-----------------------------------------------------------------------------
18851 // This function checks if a dereference in the inlinee can guarantee that
18852 // the "this" is non-NULL.
18853 // If we haven't hit a branch or a side effect, and we are dereferencing
18854 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18855 // then we can avoid a separate null pointer check.
18856 //
18857 // "additionalTreesToBeEvaluatedBefore"
18858 // is the set of pending trees that have not yet been added to the statement list,
18859 // and which have been removed from verCurrentState.esStack[]
18860
18861 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
18862                                                                   GenTree*    variableBeingDereferenced,
18863                                                                   InlArgInfo* inlArgInfo)
18864 {
18865     assert(compIsForInlining());
18866     assert(opts.OptEnabled(CLFLG_INLINING));
18867
18868     BasicBlock* block = compCurBB;
18869
18870     GenTree* stmt;
18871     GenTree* expr;
18872
18873     if (block != fgFirstBB)
18874     {
18875         return FALSE;
18876     }
18877
18878     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18879     {
18880         return FALSE;
18881     }
18882
18883     if (additionalTreesToBeEvaluatedBefore &&
18884         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18885     {
18886         return FALSE;
18887     }
18888
18889     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18890     {
18891         expr = stmt->gtStmt.gtStmtExpr;
18892
18893         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18894         {
18895             return FALSE;
18896         }
18897     }
18898
18899     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18900     {
18901         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18902         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18903         {
18904             return FALSE;
18905         }
18906     }
18907
18908     return TRUE;
18909 }
18910
18911 //------------------------------------------------------------------------
18912 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18913 //
18914 // Arguments:
18915 //    callNode -- call under scrutiny
18916 //    exactContextHnd -- context handle for inlining
18917 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18918 //    callInfo -- call info from VM
18919 //
18920 // Notes:
18921 //    If callNode is an inline candidate, this method sets the flag
18922 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18923 //    filled in the associated InlineCandidateInfo.
18924 //
18925 //    If callNode is not an inline candidate, and the reason is
18926 //    something that is inherent to the method being called, the
18927 //    method may be marked as "noinline" to short-circuit any
18928 //    future assessments of calls to this method.
18929
18930 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
18931                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18932                                       bool                   exactContextNeedsRuntimeLookup,
18933                                       CORINFO_CALL_INFO*     callInfo)
18934 {
18935     // Let the strategy know there's another call
18936     impInlineRoot()->m_inlineStrategy->NoteCall();
18937
18938     if (!opts.OptEnabled(CLFLG_INLINING))
18939     {
18940         /* XXX Mon 8/18/2008
18941          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18942          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18943          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18944          * figure out why we did not set MAXOPT for this compile.
18945          */
18946         assert(!compIsForInlining());
18947         return;
18948     }
18949
18950     if (compIsForImportOnly())
18951     {
18952         // Don't bother creating the inline candidate during verification.
18953         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18954         // that leads to the creation of multiple instances of Compiler.
18955         return;
18956     }
18957
18958     GenTreeCall* call = callNode->AsCall();
18959     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18960
18961     // Don't inline if not optimizing root method
18962     if (opts.compDbgCode)
18963     {
18964         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18965         return;
18966     }
18967
18968     // Don't inline if inlining into root method is disabled.
18969     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18970     {
18971         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18972         return;
18973     }
18974
18975     // Inlining candidate determination needs to honor only IL tail prefix.
18976     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18977     if (call->IsTailPrefixedCall())
18978     {
18979         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18980         return;
18981     }
18982
18983     // Tail recursion elimination takes precedence over inlining.
18984     // TODO: We may want to do some of the additional checks from fgMorphCall
18985     // here to reduce the chance we don't inline a call that won't be optimized
18986     // as a fast tail call or turned into a loop.
18987     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18988     {
18989         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18990         return;
18991     }
18992
18993     if (call->IsVirtual())
18994     {
18995         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18996         return;
18997     }
18998
18999     /* Ignore helper calls */
19000
19001     if (call->gtCallType == CT_HELPER)
19002     {
19003         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19004         return;
19005     }
19006
19007     /* Ignore indirect calls */
19008     if (call->gtCallType == CT_INDIRECT)
19009     {
19010         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19011         return;
19012     }
19013
19014     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19015      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19016      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19017
19018     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19019     unsigned              methAttr;
19020
19021     // Reuse method flags from the original callInfo if possible
19022     if (fncHandle == callInfo->hMethod)
19023     {
19024         methAttr = callInfo->methodFlags;
19025     }
19026     else
19027     {
19028         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19029     }
19030
19031 #ifdef DEBUG
19032     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19033     {
19034         methAttr |= CORINFO_FLG_FORCEINLINE;
19035     }
19036 #endif
19037
19038     // Check for COMPlus_AggressiveInlining
19039     if (compDoAggressiveInlining)
19040     {
19041         methAttr |= CORINFO_FLG_FORCEINLINE;
19042     }
19043
19044     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19045     {
19046         /* Don't bother inline blocks that are in the filter region */
19047         if (bbInCatchHandlerILRange(compCurBB))
19048         {
19049 #ifdef DEBUG
19050             if (verbose)
19051             {
19052                 printf("\nWill not inline blocks that are in the catch handler region\n");
19053             }
19054
19055 #endif
19056
19057             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19058             return;
19059         }
19060
19061         if (bbInFilterILRange(compCurBB))
19062         {
19063 #ifdef DEBUG
19064             if (verbose)
19065             {
19066                 printf("\nWill not inline blocks that are in the filter region\n");
19067             }
19068 #endif
19069
19070             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19071             return;
19072         }
19073     }
19074
19075     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19076
19077     if (opts.compNeedSecurityCheck)
19078     {
19079         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19080         return;
19081     }
19082
19083     /* Check if we tried to inline this method before */
19084
19085     if (methAttr & CORINFO_FLG_DONT_INLINE)
19086     {
19087         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19088         return;
19089     }
19090
19091     /* Cannot inline synchronized methods */
19092
19093     if (methAttr & CORINFO_FLG_SYNCH)
19094     {
19095         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19096         return;
19097     }
19098
19099     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19100
19101     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19102     {
19103         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19104         return;
19105     }
19106
19107     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19108     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19109
19110     if (inlineResult.IsFailure())
19111     {
19112         return;
19113     }
19114
19115     // The old value should be NULL
19116     assert(call->gtInlineCandidateInfo == nullptr);
19117
19118     // The new value should not be NULL.
19119     assert(inlineCandidateInfo != nullptr);
19120     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19121
19122     call->gtInlineCandidateInfo = inlineCandidateInfo;
19123
19124     // Mark the call node as inline candidate.
19125     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19126
19127     // Let the strategy know there's another candidate.
19128     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19129
19130     // Since we're not actually inlining yet, and this call site is
19131     // still just an inline candidate, there's nothing to report.
19132     inlineResult.SetReported();
19133 }
19134
19135 /******************************************************************************/
19136 // Returns true if the given intrinsic will be implemented by target-specific
19137 // instructions
19138
19139 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19140 {
19141 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19142     switch (intrinsicId)
19143     {
19144         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19145         // instructions to directly compute round/ceiling/floor.
19146         //
19147         // TODO: Because the x86 backend only targets SSE for floating-point code,
19148         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19149         //       implemented those intrinsics as x87 instructions). If this poses
19150         //       a CQ problem, it may be necessary to change the implementation of
19151         //       the helper calls to decrease call overhead or switch back to the
19152         //       x87 instructions. This is tracked by #7097.
19153         case CORINFO_INTRINSIC_Sqrt:
19154         case CORINFO_INTRINSIC_Abs:
19155             return true;
19156
19157         case CORINFO_INTRINSIC_Round:
19158         case CORINFO_INTRINSIC_Ceiling:
19159         case CORINFO_INTRINSIC_Floor:
19160             return compSupports(InstructionSet_SSE41);
19161
19162         default:
19163             return false;
19164     }
19165 #elif defined(_TARGET_ARM64_)
19166     switch (intrinsicId)
19167     {
19168         case CORINFO_INTRINSIC_Sqrt:
19169         case CORINFO_INTRINSIC_Abs:
19170         case CORINFO_INTRINSIC_Round:
19171         case CORINFO_INTRINSIC_Floor:
19172         case CORINFO_INTRINSIC_Ceiling:
19173             return true;
19174
19175         default:
19176             return false;
19177     }
19178 #elif defined(_TARGET_ARM_)
19179     switch (intrinsicId)
19180     {
19181         case CORINFO_INTRINSIC_Sqrt:
19182         case CORINFO_INTRINSIC_Abs:
19183         case CORINFO_INTRINSIC_Round:
19184             return true;
19185
19186         default:
19187             return false;
19188     }
19189 #elif defined(_TARGET_X86_)
19190     switch (intrinsicId)
19191     {
19192         case CORINFO_INTRINSIC_Sin:
19193         case CORINFO_INTRINSIC_Cos:
19194         case CORINFO_INTRINSIC_Sqrt:
19195         case CORINFO_INTRINSIC_Abs:
19196         case CORINFO_INTRINSIC_Round:
19197             return true;
19198
19199         default:
19200             return false;
19201     }
19202 #else
19203     // TODO: This portion of logic is not implemented for other arch.
19204     // The reason for returning true is that on all other arch the only intrinsic
19205     // enabled are target intrinsics.
19206     return true;
19207 #endif //_TARGET_AMD64_
19208 }
19209
19210 /******************************************************************************/
19211 // Returns true if the given intrinsic will be implemented by calling System.Math
19212 // methods.
19213
19214 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19215 {
19216     // Currently, if an math intrisic is not implemented by target-specific
19217     // intructions, it will be implemented by a System.Math call. In the
19218     // future, if we turn to implementing some of them with helper callers,
19219     // this predicate needs to be revisited.
19220     return !IsTargetIntrinsic(intrinsicId);
19221 }
19222
19223 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19224 {
19225     switch (intrinsicId)
19226     {
19227         case CORINFO_INTRINSIC_Sin:
19228         case CORINFO_INTRINSIC_Cbrt:
19229         case CORINFO_INTRINSIC_Sqrt:
19230         case CORINFO_INTRINSIC_Abs:
19231         case CORINFO_INTRINSIC_Cos:
19232         case CORINFO_INTRINSIC_Round:
19233         case CORINFO_INTRINSIC_Cosh:
19234         case CORINFO_INTRINSIC_Sinh:
19235         case CORINFO_INTRINSIC_Tan:
19236         case CORINFO_INTRINSIC_Tanh:
19237         case CORINFO_INTRINSIC_Asin:
19238         case CORINFO_INTRINSIC_Asinh:
19239         case CORINFO_INTRINSIC_Acos:
19240         case CORINFO_INTRINSIC_Acosh:
19241         case CORINFO_INTRINSIC_Atan:
19242         case CORINFO_INTRINSIC_Atan2:
19243         case CORINFO_INTRINSIC_Atanh:
19244         case CORINFO_INTRINSIC_Log10:
19245         case CORINFO_INTRINSIC_Pow:
19246         case CORINFO_INTRINSIC_Exp:
19247         case CORINFO_INTRINSIC_Ceiling:
19248         case CORINFO_INTRINSIC_Floor:
19249             return true;
19250         default:
19251             return false;
19252     }
19253 }
19254
19255 bool Compiler::IsMathIntrinsic(GenTree* tree)
19256 {
19257     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19258 }
19259
19260 //------------------------------------------------------------------------
19261 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19262 //   normal call
19263 //
19264 // Arguments:
19265 //     call -- the call node to examine/modify
19266 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19267 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19268 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19269 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19270 //
19271 // Notes:
19272 //     Virtual calls in IL will always "invoke" the base class method.
19273 //
19274 //     This transformation looks for evidence that the type of 'this'
19275 //     in the call is exactly known, is a final class or would invoke
19276 //     a final method, and if that and other safety checks pan out,
19277 //     modifies the call and the call info to create a direct call.
19278 //
19279 //     This transformation is initially done in the importer and not
19280 //     in some subsequent optimization pass because we want it to be
19281 //     upstream of inline candidate identification.
19282 //
19283 //     However, later phases may supply improved type information that
19284 //     can enable further devirtualization. We currently reinvoke this
19285 //     code after inlining, if the return value of the inlined call is
19286 //     the 'this obj' of a subsequent virtual call.
19287 //
19288 //     If devirtualization succeeds and the call's this object is the
19289 //     result of a box, the jit will ask the EE for the unboxed entry
19290 //     point. If this exists, the jit will see if it can rework the box
19291 //     to instead make a local copy. If that is doable, the call is
19292 //     updated to invoke the unboxed entry on the local copy.
19293 //
19294 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19295                                    CORINFO_METHOD_HANDLE*  method,
19296                                    unsigned*               methodFlags,
19297                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19298                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19299 {
19300     assert(call != nullptr);
19301     assert(method != nullptr);
19302     assert(methodFlags != nullptr);
19303     assert(contextHandle != nullptr);
19304
19305     // This should be a virtual vtable or virtual stub call.
19306     assert(call->IsVirtual());
19307
19308     // Bail if not optimizing
19309     if (opts.MinOpts())
19310     {
19311         return;
19312     }
19313
19314     // Bail if debuggable codegen
19315     if (opts.compDbgCode)
19316     {
19317         return;
19318     }
19319
19320 #if defined(DEBUG)
19321     // Bail if devirt is disabled.
19322     if (JitConfig.JitEnableDevirtualization() == 0)
19323     {
19324         return;
19325     }
19326
19327     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19328 #endif // DEBUG
19329
19330     // Fetch information about the virtual method we're calling.
19331     CORINFO_METHOD_HANDLE baseMethod        = *method;
19332     unsigned              baseMethodAttribs = *methodFlags;
19333
19334     if (baseMethodAttribs == 0)
19335     {
19336         // For late devirt we may not have method attributes, so fetch them.
19337         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19338     }
19339     else
19340     {
19341 #if defined(DEBUG)
19342         // Validate that callInfo has up to date method flags
19343         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19344
19345         // All the base method attributes should agree, save that
19346         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19347         // because of concurrent jitting activity.
19348         //
19349         // Note we don't look at this particular flag bit below, and
19350         // later on (if we do try and inline) we will rediscover why
19351         // the method can't be inlined, so there's no danger here in
19352         // seeing this particular flag bit in different states between
19353         // the cached and fresh values.
19354         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19355         {
19356             assert(!"mismatched method attributes");
19357         }
19358 #endif // DEBUG
19359     }
19360
19361     // In R2R mode, we might see virtual stub calls to
19362     // non-virtuals. For instance cases where the non-virtual method
19363     // is in a different assembly but is called via CALLVIRT. For
19364     // verison resilience we must allow for the fact that the method
19365     // might become virtual in some update.
19366     //
19367     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19368     // regular call+nullcheck upstream, so we won't reach this
19369     // point.
19370     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19371     {
19372         assert(call->IsVirtualStub());
19373         assert(opts.IsReadyToRun());
19374         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19375         return;
19376     }
19377
19378     // See what we know about the type of 'this' in the call.
19379     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19380     GenTree*             actualThisObj = nullptr;
19381     bool                 isExact       = false;
19382     bool                 objIsNonNull  = false;
19383     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19384
19385     // See if we have special knowlege that can get us a type or a better type.
19386     if ((objClass == nullptr) || !isExact)
19387     {
19388         actualThisObj = thisObj;
19389
19390         // Walk back through any return expression placeholders
19391         while (actualThisObj->OperGet() == GT_RET_EXPR)
19392         {
19393             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19394         }
19395
19396         // See if we landed on a call to a special intrinsic method
19397         if (actualThisObj->IsCall())
19398         {
19399             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19400             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19401             {
19402                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19403                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19404                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19405                 if (specialObjClass != nullptr)
19406                 {
19407                     objClass     = specialObjClass;
19408                     isExact      = true;
19409                     objIsNonNull = true;
19410                 }
19411             }
19412         }
19413     }
19414
19415     // Bail if we know nothing.
19416     if (objClass == nullptr)
19417     {
19418         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19419         return;
19420     }
19421
19422     // Fetch information about the class that introduced the virtual method.
19423     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19424     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19425
19426 #if !defined(FEATURE_CORECLR)
19427     // If base class is not beforefieldinit then devirtualizing may
19428     // cause us to miss a base class init trigger. Spec says we don't
19429     // need a trigger for ref class callvirts but desktop seems to
19430     // have one anyways. So defer.
19431     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19432     {
19433         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19434         return;
19435     }
19436 #endif // FEATURE_CORECLR
19437
19438     // Is the call an interface call?
19439     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19440
19441     // If the objClass is sealed (final), then we may be able to devirtualize.
19442     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19443     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19444
19445 #if defined(DEBUG)
19446     const char* callKind       = isInterface ? "interface" : "virtual";
19447     const char* objClassNote   = "[?]";
19448     const char* objClassName   = "?objClass";
19449     const char* baseClassName  = "?baseClass";
19450     const char* baseMethodName = "?baseMethod";
19451
19452     if (verbose || doPrint)
19453     {
19454         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19455         objClassName   = info.compCompHnd->getClassName(objClass);
19456         baseClassName  = info.compCompHnd->getClassName(baseClass);
19457         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19458
19459         if (verbose)
19460         {
19461             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19462                    "    class for 'this' is %s%s (attrib %08x)\n"
19463                    "    base method is %s::%s\n",
19464                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19465         }
19466     }
19467 #endif // defined(DEBUG)
19468
19469     // Bail if obj class is an interface.
19470     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19471     //   IL_021d:  ldloc.0
19472     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19473     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19474     {
19475         JITDUMP("--- obj class is interface, sorry\n");
19476         return;
19477     }
19478
19479     if (isInterface)
19480     {
19481         assert(call->IsVirtualStub());
19482         JITDUMP("--- base class is interface\n");
19483     }
19484
19485     // Fetch the method that would be called based on the declared type of 'this'
19486     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19487     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19488
19489     // If we failed to get a handle, we can't devirtualize.  This can
19490     // happen when prejitting, if the devirtualization crosses
19491     // servicing bubble boundaries.
19492     if (derivedMethod == nullptr)
19493     {
19494         JITDUMP("--- no derived method, sorry\n");
19495         return;
19496     }
19497
19498     // Fetch method attributes to see if method is marked final.
19499     const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19500     const bool  derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19501
19502 #if defined(DEBUG)
19503     const char* derivedClassName  = "?derivedClass";
19504     const char* derivedMethodName = "?derivedMethod";
19505
19506     const char* note = "speculative";
19507     if (isExact)
19508     {
19509         note = "exact";
19510     }
19511     else if (objClassIsFinal)
19512     {
19513         note = "final class";
19514     }
19515     else if (derivedMethodIsFinal)
19516     {
19517         note = "final method";
19518     }
19519
19520     if (verbose || doPrint)
19521     {
19522         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19523         if (verbose)
19524         {
19525             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19526             gtDispTree(call);
19527         }
19528     }
19529 #endif // defined(DEBUG)
19530
19531     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19532     {
19533         // Type is not exact, and neither class or method is final.
19534         //
19535         // We could speculatively devirtualize, but there's no
19536         // reason to believe the derived method is the one that
19537         // is likely to be invoked.
19538         //
19539         // If there's currently no further overriding (that is, at
19540         // the time of jitting, objClass has no subclasses that
19541         // override this method), then perhaps we'd be willing to
19542         // make a bet...?
19543         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19544         return;
19545     }
19546
19547     // For interface calls we must have an exact type or final class.
19548     if (isInterface && !isExact && !objClassIsFinal)
19549     {
19550         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19551         return;
19552     }
19553
19554     JITDUMP("    %s; can devirtualize\n", note);
19555
19556     // Make the updates.
19557     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19558     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19559     call->gtCallMethHnd = derivedMethod;
19560     call->gtCallType    = CT_USER_FUNC;
19561
19562     // Virtual calls include an implicit null check, which we may
19563     // now need to make explicit.
19564     if (!objIsNonNull)
19565     {
19566         call->gtFlags |= GTF_CALL_NULLCHECK;
19567     }
19568
19569     // Clear the inline candidate info (may be non-null since
19570     // it's a union field used for other things by virtual
19571     // stubs)
19572     call->gtInlineCandidateInfo = nullptr;
19573
19574 #if defined(DEBUG)
19575     if (verbose)
19576     {
19577         printf("... after devirt...\n");
19578         gtDispTree(call);
19579     }
19580
19581     if (doPrint)
19582     {
19583         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19584                baseMethodName, derivedClassName, derivedMethodName, note);
19585     }
19586 #endif // defined(DEBUG)
19587
19588     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19589     if (thisObj->IsBoxedValue())
19590     {
19591         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19592
19593         // Note for some shared methods the unboxed entry point requires an extra parameter.
19594         // We defer optimizing if so.
19595         bool                  requiresInstMethodTableArg = false;
19596         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19597             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19598
19599         if (unboxedEntryMethod != nullptr)
19600         {
19601             // Since the call is the only consumer of the box, we know the box can't escape
19602             // since it is being passed an interior pointer.
19603             //
19604             // So, revise the box to simply create a local copy, use the address of that copy
19605             // as the this pointer, and update the entry point to the unboxed entry.
19606             //
19607             // Ideally, we then inline the boxed method and and if it turns out not to modify
19608             // the copy, we can undo the copy too.
19609             if (requiresInstMethodTableArg)
19610             {
19611                 // We can likely handle this case by grabbing the argument passed to
19612                 // the newobj in the box. But defer for now.
19613                 JITDUMP("Found unboxed entry point, but it needs method table arg, deferring\n");
19614             }
19615             else
19616             {
19617                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19618                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19619
19620                 if (localCopyThis != nullptr)
19621                 {
19622                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19623                     call->gtCallObjp    = localCopyThis;
19624                     call->gtCallMethHnd = unboxedEntryMethod;
19625                     derivedMethod       = unboxedEntryMethod;
19626                 }
19627                 else
19628                 {
19629                     JITDUMP("Sorry, failed to undo the box\n");
19630                 }
19631             }
19632         }
19633         else
19634         {
19635             // Many of the low-level methods on value classes won't have unboxed entries,
19636             // as they need access to the type of the object.
19637             //
19638             // Note this may be a cue for us to stack allocate the boxed object, since
19639             // we probably know that these objects don't escape.
19640             JITDUMP("Sorry, failed to find unboxed entry point\n");
19641         }
19642     }
19643
19644     // Fetch the class that introduced the derived method.
19645     //
19646     // Note this may not equal objClass, if there is a
19647     // final method that objClass inherits.
19648     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19649
19650     // Need to update call info too. This is fragile
19651     // but hopefully the derived method conforms to
19652     // the base in most other ways.
19653     *method        = derivedMethod;
19654     *methodFlags   = derivedMethodAttribs;
19655     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19656
19657     // Update context handle.
19658     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19659     {
19660         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19661     }
19662
19663 #ifdef FEATURE_READYTORUN_COMPILER
19664     if (opts.IsReadyToRun())
19665     {
19666         // For R2R, getCallInfo triggers bookkeeping on the zap
19667         // side so we need to call it here.
19668         //
19669         // First, cons up a suitable resolved token.
19670         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19671
19672         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19673         derivedResolvedToken.tokenContext = *contextHandle;
19674         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19675         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19676         derivedResolvedToken.hClass       = derivedClass;
19677         derivedResolvedToken.hMethod      = derivedMethod;
19678
19679         // Look up the new call info.
19680         CORINFO_CALL_INFO derivedCallInfo;
19681         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19682
19683         // Update the call.
19684         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19685         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19686         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19687     }
19688 #endif // FEATURE_READYTORUN_COMPILER
19689 }
19690
19691 //------------------------------------------------------------------------
19692 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19693 //   to an intrinsic returns an exact type
19694 //
19695 // Arguments:
19696 //     methodHnd -- handle for the special intrinsic method
19697 //
19698 // Returns:
19699 //     Exact class handle returned by the intrinsic call, if known.
19700 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19701
19702 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19703 {
19704     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19705
19706     CORINFO_CLASS_HANDLE result = nullptr;
19707
19708     // See what intrinisc we have...
19709     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19710     switch (ni)
19711     {
19712         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19713         {
19714             // Expect one class generic parameter; figure out which it is.
19715             CORINFO_SIG_INFO sig;
19716             info.compCompHnd->getMethodSig(methodHnd, &sig);
19717             assert(sig.sigInst.classInstCount == 1);
19718             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19719             assert(typeHnd != nullptr);
19720
19721             // Lookup can incorrect when we have __Canon as it won't appear
19722             // to implement any interface types.
19723             //
19724             // And if we do not have a final type, devirt & inlining is
19725             // unlikely to result in much simplification.
19726             //
19727             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19728             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19729             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19730
19731             if (isFinalType)
19732             {
19733                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19734                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19735                         result != nullptr ? eeGetClassName(result) : "unknown");
19736             }
19737             else
19738             {
19739                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19740             }
19741
19742             break;
19743         }
19744
19745         default:
19746         {
19747             JITDUMP("This special intrinsic not handled, sorry...\n");
19748             break;
19749         }
19750     }
19751
19752     return result;
19753 }
19754
19755 //------------------------------------------------------------------------
19756 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19757 //
19758 // Arguments:
19759 //    token - init value for the allocated token.
19760 //
19761 // Return Value:
19762 //    pointer to token into jit-allocated memory.
19763 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19764 {
19765     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19766     *memory                        = token;
19767     return memory;
19768 }
19769
19770 //------------------------------------------------------------------------
19771 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19772 //
19773 class SpillRetExprHelper
19774 {
19775 public:
19776     SpillRetExprHelper(Compiler* comp) : comp(comp)
19777     {
19778     }
19779
19780     void StoreRetExprResultsInArgs(GenTreeCall* call)
19781     {
19782         GenTree* args = call->gtCallArgs;
19783         if (args != nullptr)
19784         {
19785             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19786         }
19787         GenTree* thisArg = call->gtCallObjp;
19788         if (thisArg != nullptr)
19789         {
19790             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19791         }
19792     }
19793
19794 private:
19795     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19796     {
19797         assert((pTree != nullptr) && (*pTree != nullptr));
19798         GenTree* tree = *pTree;
19799         if ((tree->gtFlags & GTF_CALL) == 0)
19800         {
19801             // Trees with ret_expr are marked as GTF_CALL.
19802             return Compiler::WALK_SKIP_SUBTREES;
19803         }
19804         if (tree->OperGet() == GT_RET_EXPR)
19805         {
19806             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19807             walker->StoreRetExprAsLocalVar(pTree);
19808         }
19809         return Compiler::WALK_CONTINUE;
19810     }
19811
19812     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19813     {
19814         GenTree* retExpr = *pRetExpr;
19815         assert(retExpr->OperGet() == GT_RET_EXPR);
19816         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19817         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19818         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19819         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19820     }
19821
19822 private:
19823     Compiler* comp;
19824 };
19825
19826 //------------------------------------------------------------------------
19827 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19828 //                         Spill ret_expr in the call node, because they can't be cloned.
19829 //
19830 // Arguments:
19831 //    call - fat calli candidate
19832 //
19833 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19834 {
19835     setMethodHasFatPointer();
19836     call->SetFatPointerCandidate();
19837     SpillRetExprHelper helper(this);
19838     helper.StoreRetExprResultsInArgs(call);
19839 }