Merge pull request #16413 from briansull/more-vso-566984
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (!varTypeIsSmall(lclTyp))
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTree* tree     = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTree* firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTree* tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTree* stmtPrev  = stmtBefore->gtPrev;
676     stmt->gtPrev       = stmtPrev;
677     stmt->gtNext       = stmtBefore;
678     stmtPrev->gtNext   = stmt;
679     stmtBefore->gtPrev = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTree* expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTree* expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTree*    val,
729                                 unsigned    curLevel,
730                                 GenTree**   pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTree* asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTree*             val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTree**            pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTree* asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTree*   temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855 #ifdef DEBUG
856             if (verbose)
857             {
858                 printf("Calling impNormStructVal on:\n");
859                 gtDispTree(temp);
860             }
861 #endif
862             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
863 #ifdef DEBUG
864             if (verbose)
865             {
866                 printf("resulting tree:\n");
867                 gtDispTree(temp);
868             }
869 #endif
870         }
871
872         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873         treeList = gtNewListNode(temp, treeList);
874     }
875
876     if (sig != nullptr)
877     {
878         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880         {
881             // Make sure that all valuetypes (including enums) that we push are loaded.
882             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883             // all valuetypes in the method signature are already loaded.
884             // We need to be able to find the size of the valuetypes, but we cannot
885             // do a class-load from within GC.
886             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
887         }
888
889         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890         CORINFO_CLASS_HANDLE    argClass;
891         CORINFO_CLASS_HANDLE    argRealClass;
892         GenTreeArgList*         args;
893
894         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895         {
896             PREFIX_ASSUME(args != nullptr);
897
898             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899
900             // insert implied casts (from float to double or double to float)
901
902             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903             {
904                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
905             }
906             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907             {
908                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
909             }
910
911             // insert any widening or narrowing casts for backwards compatibility
912
913             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914
915             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917             {
918                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920                 // primitive types.
921                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922                 // details).
923                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924                 {
925                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
926                 }
927
928                 // Make sure that all valuetypes (including enums) that we push are loaded.
929                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930                 // all valuetypes in the method signature are already loaded.
931                 // We need to be able to find the size of the valuetypes, but we cannot
932                 // do a class-load from within GC.
933                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
934             }
935
936             argLst = info.compCompHnd->getArgNext(argLst);
937         }
938     }
939
940     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941     {
942         // Prepend the prefixTree
943
944         // Simple in-place reversal to place treeList
945         // at the end of a reversed prefixTree
946         while (prefixTree != nullptr)
947         {
948             GenTreeArgList* next = prefixTree->Rest();
949             prefixTree->Rest()   = treeList;
950             treeList             = prefixTree;
951             prefixTree           = next;
952         }
953     }
954     return treeList;
955 }
956
957 /*****************************************************************************
958  *
959  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960  *  The first "skipReverseCount" items are not reversed.
961  */
962
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
964
965 {
966     assert(skipReverseCount <= count);
967
968     GenTreeArgList* list = impPopList(count, sig);
969
970     // reverse the list
971     if (list == nullptr || skipReverseCount == count)
972     {
973         return list;
974     }
975
976     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
977     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978
979     if (skipReverseCount == 0)
980     {
981         ptr = list;
982     }
983     else
984     {
985         lastSkipNode = list;
986         // Get to the first node that needs to be reversed
987         for (unsigned i = 0; i < skipReverseCount - 1; i++)
988         {
989             lastSkipNode = lastSkipNode->Rest();
990         }
991
992         PREFIX_ASSUME(lastSkipNode != nullptr);
993         ptr = lastSkipNode->Rest();
994     }
995
996     GenTreeArgList* reversedList = nullptr;
997
998     do
999     {
1000         GenTreeArgList* tmp = ptr->Rest();
1001         ptr->Rest()         = reversedList;
1002         reversedList        = ptr;
1003         ptr                 = tmp;
1004     } while (ptr != nullptr);
1005
1006     if (skipReverseCount)
1007     {
1008         lastSkipNode->Rest() = reversedList;
1009         return list;
1010     }
1011     else
1012     {
1013         return reversedList;
1014     }
1015 }
1016
1017 /*****************************************************************************
1018    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1019    class of type 'clsHnd'.  It returns the tree that should be appended to the
1020    statement list that represents the assignment.
1021    Temp assignments may be appended to impTreeList if spilling is necessary.
1022    curLevel is the stack level for which a spill may be being done.
1023  */
1024
1025 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1026                                    GenTree*             src,
1027                                    CORINFO_CLASS_HANDLE structHnd,
1028                                    unsigned             curLevel,
1029                                    GenTree**            pAfterStmt, /* = NULL */
1030                                    BasicBlock*          block       /* = NULL */
1031                                    )
1032 {
1033     assert(varTypeIsStruct(dest));
1034
1035     while (dest->gtOper == GT_COMMA)
1036     {
1037         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1038
1039         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1040         if (pAfterStmt)
1041         {
1042             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1043         }
1044         else
1045         {
1046             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1047         }
1048
1049         // set dest to the second thing
1050         dest = dest->gtOp.gtOp2;
1051     }
1052
1053     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1055
1056     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1058     {
1059         // Make this a NOP
1060         return gtNewNothingNode();
1061     }
1062
1063     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064     // or re-creating a Blk node if it is.
1065     GenTree* destAddr;
1066
1067     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1068     {
1069         destAddr = dest->gtOp.gtOp1;
1070     }
1071     else
1072     {
1073         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1074     }
1075
1076     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1077 }
1078
1079 /*****************************************************************************/
1080
1081 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1082                                       GenTree*             src,
1083                                       CORINFO_CLASS_HANDLE structHnd,
1084                                       unsigned             curLevel,
1085                                       GenTree**            pAfterStmt, /* = NULL */
1086                                       BasicBlock*          block       /* = NULL */
1087                                       )
1088 {
1089     var_types destType;
1090     GenTree*  dest      = nullptr;
1091     unsigned  destFlags = 0;
1092
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095     // TODO-ARM-BUG: Does ARM need this?
1096     // TODO-ARM64-BUG: Does ARM64 need this?
1097     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100            (src->TypeGet() != TYP_STRUCT &&
1101             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103     assert(varTypeIsStruct(src));
1104
1105     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107            src->gtOper == GT_COMMA ||
1108            (src->TypeGet() != TYP_STRUCT &&
1109             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111     if (destAddr->OperGet() == GT_ADDR)
1112     {
1113         GenTree* destNode = destAddr->gtGetOp1();
1114         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115         // will be morphed, don't insert an OBJ(ADDR).
1116         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1120                 )
1121         {
1122             dest = destNode;
1123         }
1124         destType = destNode->TypeGet();
1125     }
1126     else
1127     {
1128         destType = src->TypeGet();
1129     }
1130
1131     var_types asgType = src->TypeGet();
1132
1133     if (src->gtOper == GT_CALL)
1134     {
1135         if (src->AsCall()->TreatAsHasRetBufArg(this))
1136         {
1137             // Case of call returning a struct via hidden retbuf arg
1138
1139             // insert the return value buffer into the argument list as first byref parameter
1140             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1141
1142             // now returns void, not a struct
1143             src->gtType = TYP_VOID;
1144
1145             // return the morphed call node
1146             return src;
1147         }
1148         else
1149         {
1150             // Case of call returning a struct in one or more registers.
1151
1152             var_types returnType = (var_types)src->gtCall.gtReturnType;
1153
1154             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155             src->gtType = genActualType(returnType);
1156
1157             // First we try to change this to "LclVar/LclFld = call"
1158             //
1159             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1160             {
1161                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162                 // That is, the IR will be of the form lclVar = call for multi-reg return
1163                 //
1164                 GenTree* lcl = destAddr->gtOp.gtOp1;
1165                 if (src->AsCall()->HasMultiRegRetVal())
1166                 {
1167                     // Mark the struct LclVar as used in a MultiReg return context
1168                     //  which currently makes it non promotable.
1169                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170                     // handle multireg returns.
1171                     lcl->gtFlags |= GTF_DONT_CSE;
1172                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1173                 }
1174                 else // The call result is not a multireg return
1175                 {
1176                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177                     lcl->ChangeOper(GT_LCL_FLD);
1178                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179                     lcl->gtType = src->gtType;
1180                     asgType     = src->gtType;
1181                 }
1182
1183                 dest = lcl;
1184
1185 #if defined(_TARGET_ARM_)
1186                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187                 // but that method has not been updadted to include ARM.
1188                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189                 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1193
1194                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196                 // handle multireg returns.
1197                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198                 // non-multireg returns.
1199                 lcl->gtFlags |= GTF_DONT_CSE;
1200                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1201 #endif
1202             }
1203             else // we don't have a GT_ADDR of a GT_LCL_VAR
1204             {
1205                 // !!! The destination could be on stack. !!!
1206                 // This flag will let us choose the correct write barrier.
1207                 asgType   = returnType;
1208                 destFlags = GTF_IND_TGTANYWHERE;
1209             }
1210         }
1211     }
1212     else if (src->gtOper == GT_RET_EXPR)
1213     {
1214         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215         noway_assert(call->gtOper == GT_CALL);
1216
1217         if (call->HasRetBufArg())
1218         {
1219             // insert the return value buffer into the argument list as first byref parameter
1220             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1221
1222             // now returns void, not a struct
1223             src->gtType  = TYP_VOID;
1224             call->gtType = TYP_VOID;
1225
1226             // We already have appended the write to 'dest' GT_CALL's args
1227             // So now we just return an empty node (pruning the GT_RET_EXPR)
1228             return src;
1229         }
1230         else
1231         {
1232             // Case of inline method returning a struct in one or more registers.
1233             //
1234             var_types returnType = (var_types)call->gtReturnType;
1235
1236             // We won't need a return buffer
1237             asgType      = returnType;
1238             src->gtType  = genActualType(returnType);
1239             call->gtType = src->gtType;
1240
1241             // If we've changed the type, and it no longer matches a local destination,
1242             // we must use an indirection.
1243             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1244             {
1245                 dest = nullptr;
1246             }
1247
1248             // !!! The destination could be on stack. !!!
1249             // This flag will let us choose the correct write barrier.
1250             destFlags = GTF_IND_TGTANYWHERE;
1251         }
1252     }
1253     else if (src->OperIsBlk())
1254     {
1255         asgType = impNormStructType(structHnd);
1256         if (src->gtOper == GT_OBJ)
1257         {
1258             assert(src->gtObj.gtClass == structHnd);
1259         }
1260     }
1261     else if (src->gtOper == GT_INDEX)
1262     {
1263         asgType = impNormStructType(structHnd);
1264         assert(src->gtIndex.gtStructElemClass == structHnd);
1265     }
1266     else if (src->gtOper == GT_MKREFANY)
1267     {
1268         // Since we are assigning the result of a GT_MKREFANY,
1269         // "destAddr" must point to a refany.
1270
1271         GenTree* destAddrClone;
1272         destAddr =
1273             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1274
1275         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1281         GenTree* typeSlot =
1282             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1283
1284         // append the assign of the pointer value
1285         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1286         if (pAfterStmt)
1287         {
1288             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1289         }
1290         else
1291         {
1292             impAppendTree(asg, curLevel, impCurStmtOffs);
1293         }
1294
1295         // return the assign of the type value, to be appended
1296         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1297     }
1298     else if (src->gtOper == GT_COMMA)
1299     {
1300         // The second thing is the struct or its address.
1301         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1302         if (pAfterStmt)
1303         {
1304             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1305         }
1306         else
1307         {
1308             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1309         }
1310
1311         // Evaluate the second thing using recursion.
1312         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1313     }
1314     else if (src->IsLocal())
1315     {
1316         asgType = src->TypeGet();
1317     }
1318     else if (asgType == TYP_STRUCT)
1319     {
1320         asgType     = impNormStructType(structHnd);
1321         src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323         if (asgType == TYP_STRUCT)
1324         {
1325             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1327         }
1328 #endif
1329     }
1330     if (dest == nullptr)
1331     {
1332         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333         // if this is a known struct type.
1334         if (asgType == TYP_STRUCT)
1335         {
1336             dest = gtNewObjNode(structHnd, destAddr);
1337             gtSetObjGcInfo(dest->AsObj());
1338             // Although an obj as a call argument was always assumed to be a globRef
1339             // (which is itself overly conservative), that is not true of the operands
1340             // of a block assignment.
1341             dest->gtFlags &= ~GTF_GLOB_REF;
1342             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1343         }
1344         else if (varTypeIsStruct(asgType))
1345         {
1346             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1347         }
1348         else
1349         {
1350             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1351         }
1352     }
1353     else
1354     {
1355         dest->gtType = asgType;
1356     }
1357
1358     dest->gtFlags |= destFlags;
1359     destFlags = dest->gtFlags;
1360
1361     // return an assignment node, to be appended
1362     GenTree* asgNode = gtNewAssignNode(dest, src);
1363     gtBlockOpInit(asgNode, dest, src, false);
1364
1365     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1366     // of assignments.
1367     if ((destFlags & GTF_DONT_CSE) == 0)
1368     {
1369         dest->gtFlags &= ~(GTF_DONT_CSE);
1370     }
1371     return asgNode;
1372 }
1373
1374 /*****************************************************************************
1375    Given a struct value, and the class handle for that structure, return
1376    the expression for the address for that structure value.
1377
1378    willDeref - does the caller guarantee to dereference the pointer.
1379 */
1380
1381 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1382                                     CORINFO_CLASS_HANDLE structHnd,
1383                                     unsigned             curLevel,
1384                                     bool                 willDeref)
1385 {
1386     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1387
1388     var_types type = structVal->TypeGet();
1389
1390     genTreeOps oper = structVal->gtOper;
1391
1392     if (oper == GT_OBJ && willDeref)
1393     {
1394         assert(structVal->gtObj.gtClass == structHnd);
1395         return (structVal->gtObj.Addr());
1396     }
1397     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1398              structVal->OperIsSimdHWIntrinsic())
1399     {
1400         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1401
1402         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1403
1404         // The 'return value' is now the temp itself
1405
1406         type          = genActualType(lvaTable[tmpNum].TypeGet());
1407         GenTree* temp = gtNewLclvNode(tmpNum, type);
1408         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1409         return temp;
1410     }
1411     else if (oper == GT_COMMA)
1412     {
1413         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1414
1415         GenTree* oldTreeLast  = impTreeLast;
1416         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417         structVal->gtType     = TYP_BYREF;
1418
1419         if (oldTreeLast != impTreeLast)
1420         {
1421             // Some temp assignment statement was placed on the statement list
1422             // for Op2, but that would be out of order with op1, so we need to
1423             // spill op1 onto the statement list after whatever was last
1424             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426             structVal->gtOp.gtOp1 = gtNewNothingNode();
1427         }
1428
1429         return (structVal);
1430     }
1431
1432     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1433 }
1434
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 //                    and optionally determine the GC layout of the struct.
1438 //
1439 // Arguments:
1440 //    structHnd       - The class handle for the struct type of interest.
1441 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 //                      into which the gcLayout will be written.
1443 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 //                      which will be set to the number of GC fields in the struct.
1445 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 //                      type, set to the SIMD base type
1447 //
1448 // Return Value:
1449 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1452 //
1453 // Assumptions:
1454 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1456 //
1457 // Notes:
1458 //    Normalizing the type involves examining the struct type to determine if it should
1459 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1460 //    for full enregistration, e.g. TYP_SIMD16.
1461
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1463                                       BYTE*                gcLayout,
1464                                       unsigned*            pNumGCVars,
1465                                       var_types*           pSimdBaseType)
1466 {
1467     assert(structHnd != NO_CLASS_HANDLE);
1468
1469     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470     var_types   structType  = TYP_STRUCT;
1471
1472     // On coreclr the check for GC includes a "may" to account for the special
1473     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1475     // pointer.
1476     const bool mayContainGCPtrs =
1477         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1478
1479 #ifdef FEATURE_SIMD
1480     // Check to see if this is a SIMD type.
1481     if (featureSIMD && !mayContainGCPtrs)
1482     {
1483         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1484
1485         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1486         {
1487             unsigned int sizeBytes;
1488             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489             if (simdBaseType != TYP_UNKNOWN)
1490             {
1491                 assert(sizeBytes == originalSize);
1492                 structType = getSIMDTypeForSize(sizeBytes);
1493                 if (pSimdBaseType != nullptr)
1494                 {
1495                     *pSimdBaseType = simdBaseType;
1496                 }
1497                 // Also indicate that we use floating point registers.
1498                 compFloatingPointUsed = true;
1499             }
1500         }
1501     }
1502 #endif // FEATURE_SIMD
1503
1504     // Fetch GC layout info if requested
1505     if (gcLayout != nullptr)
1506     {
1507         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1508
1509         // Verify that the quick test up above via the class attributes gave a
1510         // safe view of the type's GCness.
1511         //
1512         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513         // does not report any gc fields.
1514
1515         assert(mayContainGCPtrs || (numGCVars == 0));
1516
1517         if (pNumGCVars != nullptr)
1518         {
1519             *pNumGCVars = numGCVars;
1520         }
1521     }
1522     else
1523     {
1524         // Can't safely ask for number of GC pointers without also
1525         // asking for layout.
1526         assert(pNumGCVars == nullptr);
1527     }
1528
1529     return structType;
1530 }
1531
1532 //****************************************************************************
1533 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1535 //
1536 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1537                                     CORINFO_CLASS_HANDLE structHnd,
1538                                     unsigned             curLevel,
1539                                     bool                 forceNormalization /*=false*/)
1540 {
1541     assert(forceNormalization || varTypeIsStruct(structVal));
1542     assert(structHnd != NO_CLASS_HANDLE);
1543     var_types structType = structVal->TypeGet();
1544     bool      makeTemp   = false;
1545     if (structType == TYP_STRUCT)
1546     {
1547         structType = impNormStructType(structHnd);
1548     }
1549     bool                 alreadyNormalized = false;
1550     GenTreeLclVarCommon* structLcl         = nullptr;
1551
1552     genTreeOps oper = structVal->OperGet();
1553     switch (oper)
1554     {
1555         // GT_RETURN and GT_MKREFANY don't capture the handle.
1556         case GT_RETURN:
1557             break;
1558         case GT_MKREFANY:
1559             alreadyNormalized = true;
1560             break;
1561
1562         case GT_CALL:
1563             structVal->gtCall.gtRetClsHnd = structHnd;
1564             makeTemp                      = true;
1565             break;
1566
1567         case GT_RET_EXPR:
1568             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1569             makeTemp                         = true;
1570             break;
1571
1572         case GT_ARGPLACE:
1573             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1574             break;
1575
1576         case GT_INDEX:
1577             // This will be transformed to an OBJ later.
1578             alreadyNormalized                    = true;
1579             structVal->gtIndex.gtStructElemClass = structHnd;
1580             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1581             break;
1582
1583         case GT_FIELD:
1584             // Wrap it in a GT_OBJ.
1585             structVal->gtType = structType;
1586             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1587             break;
1588
1589         case GT_LCL_VAR:
1590         case GT_LCL_FLD:
1591             structLcl = structVal->AsLclVarCommon();
1592             // Wrap it in a GT_OBJ.
1593             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1594             __fallthrough;
1595
1596         case GT_OBJ:
1597         case GT_BLK:
1598         case GT_DYN_BLK:
1599         case GT_ASG:
1600             // These should already have the appropriate type.
1601             assert(structVal->gtType == structType);
1602             alreadyNormalized = true;
1603             break;
1604
1605         case GT_IND:
1606             assert(structVal->gtType == structType);
1607             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608             alreadyNormalized = true;
1609             break;
1610
1611 #ifdef FEATURE_SIMD
1612         case GT_SIMD:
1613             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1614             break;
1615 #endif // FEATURE_SIMD
1616 #ifdef FEATURE_HW_INTRINSICS
1617         case GT_HWIntrinsic:
1618             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1619             break;
1620 #endif
1621
1622         case GT_COMMA:
1623         {
1624             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625             GenTree* blockNode = structVal->gtOp.gtOp2;
1626             assert(blockNode->gtType == structType);
1627
1628             // Is this GT_COMMA(op1, GT_COMMA())?
1629             GenTree* parent = structVal;
1630             if (blockNode->OperGet() == GT_COMMA)
1631             {
1632                 // Find the last node in the comma chain.
1633                 do
1634                 {
1635                     assert(blockNode->gtType == structType);
1636                     parent    = blockNode;
1637                     blockNode = blockNode->gtOp.gtOp2;
1638                 } while (blockNode->OperGet() == GT_COMMA);
1639             }
1640
1641             if (blockNode->OperGet() == GT_FIELD)
1642             {
1643                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1645             }
1646
1647 #ifdef FEATURE_SIMD
1648             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1649             {
1650                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651                 alreadyNormalized  = true;
1652             }
1653             else
1654 #endif
1655             {
1656                 noway_assert(blockNode->OperIsBlk());
1657
1658                 // Sink the GT_COMMA below the blockNode addr.
1659                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1660                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1661                 //
1662                 // In case of a chained GT_COMMA case, we sink the last
1663                 // GT_COMMA below the blockNode addr.
1664                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1665                 assert(blockNodeAddr->gtType == TYP_BYREF);
1666                 GenTree* commaNode    = parent;
1667                 commaNode->gtType     = TYP_BYREF;
1668                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1669                 blockNode->gtOp.gtOp1 = commaNode;
1670                 if (parent == structVal)
1671                 {
1672                     structVal = blockNode;
1673                 }
1674                 alreadyNormalized = true;
1675             }
1676         }
1677         break;
1678
1679         default:
1680             noway_assert(!"Unexpected node in impNormStructVal()");
1681             break;
1682     }
1683     structVal->gtType  = structType;
1684     GenTree* structObj = structVal;
1685
1686     if (!alreadyNormalized || forceNormalization)
1687     {
1688         if (makeTemp)
1689         {
1690             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1691
1692             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1693
1694             // The structVal is now the temp itself
1695
1696             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1697             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1698             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1699         }
1700         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1701         {
1702             // Wrap it in a GT_OBJ
1703             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1704         }
1705     }
1706
1707     if (structLcl != nullptr)
1708     {
1709         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1710         // so we don't set GTF_EXCEPT here.
1711         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1712         {
1713             structObj->gtFlags &= ~GTF_GLOB_REF;
1714         }
1715     }
1716     else
1717     {
1718         // In general a OBJ is an indirection and could raise an exception.
1719         structObj->gtFlags |= GTF_EXCEPT;
1720     }
1721     return (structObj);
1722 }
1723
1724 /******************************************************************************/
1725 // Given a type token, generate code that will evaluate to the correct
1726 // handle representation of that token (type handle, field handle, or method handle)
1727 //
1728 // For most cases, the handle is determined at compile-time, and the code
1729 // generated is simply an embedded handle.
1730 //
1731 // Run-time lookup is required if the enclosing method is shared between instantiations
1732 // and the token refers to formal type parameters whose instantiation is not known
1733 // at compile-time.
1734 //
1735 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1736                                     BOOL*                   pRuntimeLookup /* = NULL */,
1737                                     BOOL                    mustRestoreHandle /* = FALSE */,
1738                                     BOOL                    importParent /* = FALSE */)
1739 {
1740     assert(!fgGlobalMorph);
1741
1742     CORINFO_GENERICHANDLE_RESULT embedInfo;
1743     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1744
1745     if (pRuntimeLookup)
1746     {
1747         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1748     }
1749
1750     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1751     {
1752         switch (embedInfo.handleType)
1753         {
1754             case CORINFO_HANDLETYPE_CLASS:
1755                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1756                 break;
1757
1758             case CORINFO_HANDLETYPE_METHOD:
1759                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1760                 break;
1761
1762             case CORINFO_HANDLETYPE_FIELD:
1763                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1764                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1765                 break;
1766
1767             default:
1768                 break;
1769         }
1770     }
1771
1772     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1773     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1774                                       embedInfo.compileTimeHandle);
1775
1776     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1777     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1780     }
1781
1782     return result;
1783 }
1784
1785 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786                                    CORINFO_LOOKUP*         pLookup,
1787                                    unsigned                handleFlags,
1788                                    void*                   compileTimeHandle)
1789 {
1790     if (!pLookup->lookupKind.needsRuntimeLookup)
1791     {
1792         // No runtime lookup is required.
1793         // Access is direct or memory-indirect (of a fixed address) reference
1794
1795         CORINFO_GENERIC_HANDLE handle       = nullptr;
1796         void*                  pIndirection = nullptr;
1797         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1798
1799         if (pLookup->constLookup.accessType == IAT_VALUE)
1800         {
1801             handle = pLookup->constLookup.handle;
1802         }
1803         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1804         {
1805             pIndirection = pLookup->constLookup.addr;
1806         }
1807         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1808     }
1809     else if (compIsForInlining())
1810     {
1811         // Don't import runtime lookups when inlining
1812         // Inlining has to be aborted in such a case
1813         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1814         return nullptr;
1815     }
1816     else
1817     {
1818         // Need to use dictionary-based access which depends on the typeContext
1819         // which is only available at runtime, not at compile-time.
1820
1821         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1822     }
1823 }
1824
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827                                              unsigned              handleFlags,
1828                                              void*                 compileTimeHandle)
1829 {
1830     CORINFO_GENERIC_HANDLE handle       = nullptr;
1831     void*                  pIndirection = nullptr;
1832     assert(pLookup->accessType != IAT_PPVALUE);
1833
1834     if (pLookup->accessType == IAT_VALUE)
1835     {
1836         handle = pLookup->handle;
1837     }
1838     else if (pLookup->accessType == IAT_PVALUE)
1839     {
1840         pIndirection = pLookup->addr;
1841     }
1842     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1843 }
1844
1845 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1846     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847     CorInfoHelpFunc         helper,
1848     var_types               type,
1849     GenTreeArgList*         args /* =NULL*/,
1850     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1851 {
1852     CORINFO_CONST_LOOKUP lookup;
1853     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1854     {
1855         return nullptr;
1856     }
1857
1858     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1859
1860     op1->setEntryPoint(lookup);
1861
1862     return op1;
1863 }
1864 #endif
1865
1866 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1867 {
1868     GenTree* op1 = nullptr;
1869
1870     switch (pCallInfo->kind)
1871     {
1872         case CORINFO_CALL:
1873             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1874
1875 #ifdef FEATURE_READYTORUN_COMPILER
1876             if (opts.IsReadyToRun())
1877             {
1878                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1879             }
1880             else
1881             {
1882                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1883             }
1884 #endif
1885             break;
1886
1887         case CORINFO_CALL_CODE_POINTER:
1888             if (compIsForInlining())
1889             {
1890                 // Don't import runtime lookups when inlining
1891                 // Inlining has to be aborted in such a case
1892                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1893                 return nullptr;
1894             }
1895
1896             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1897             break;
1898
1899         default:
1900             noway_assert(!"unknown call kind");
1901             break;
1902     }
1903
1904     return op1;
1905 }
1906
1907 //------------------------------------------------------------------------
1908 // getRuntimeContextTree: find pointer to context for runtime lookup.
1909 //
1910 // Arguments:
1911 //    kind - lookup kind.
1912 //
1913 // Return Value:
1914 //    Return GenTree pointer to generic shared context.
1915 //
1916 // Notes:
1917 //    Reports about generic context using.
1918
1919 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1920 {
1921     GenTree* ctxTree = nullptr;
1922
1923     // Collectible types requires that for shared generic code, if we use the generic context parameter
1924     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1925     // context parameter is this that we don't need the eager reporting logic.)
1926     lvaGenericsContextUseCount++;
1927
1928     if (kind == CORINFO_LOOKUP_THISOBJ)
1929     {
1930         // this Object
1931         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1932
1933         // Vtable pointer of this object
1934         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1935         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1936         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1937     }
1938     else
1939     {
1940         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1941
1942         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1943     }
1944     return ctxTree;
1945 }
1946
1947 /*****************************************************************************/
1948 /* Import a dictionary lookup to access a handle in code shared between
1949    generic instantiations.
1950    The lookup depends on the typeContext which is only available at
1951    runtime, and not at compile-time.
1952    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1953    The cases are:
1954
1955    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1956       instantiation-specific handle, and the tokens to lookup the handle.
1957    2. pLookup->indirections != CORINFO_USEHELPER :
1958       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1959           to get the handle.
1960       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1961           If it is non-NULL, it is the handle required. Else, call a helper
1962           to lookup the handle.
1963  */
1964
1965 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1966                                           CORINFO_LOOKUP*         pLookup,
1967                                           void*                   compileTimeHandle)
1968 {
1969
1970     // This method can only be called from the importer instance of the Compiler.
1971     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1972     assert(!compIsForInlining());
1973
1974     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1975
1976     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1977     // It's available only via the run-time helper function
1978     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1979     {
1980 #ifdef FEATURE_READYTORUN_COMPILER
1981         if (opts.IsReadyToRun())
1982         {
1983             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1984                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1985         }
1986 #endif
1987         GenTree* argNode =
1988             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1989         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1990
1991         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1992     }
1993
1994     // Slot pointer
1995     GenTree* slotPtrTree = ctxTree;
1996
1997     if (pRuntimeLookup->testForNull)
1998     {
1999         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2000                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2001     }
2002
2003     GenTree* indOffTree = nullptr;
2004
2005     // Applied repeated indirections
2006     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2007     {
2008         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2009         {
2010             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2011                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2012         }
2013
2014         if (i != 0)
2015         {
2016             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2017             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2018             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2019         }
2020
2021         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2022         {
2023             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2024         }
2025
2026         if (pRuntimeLookup->offsets[i] != 0)
2027         {
2028             slotPtrTree =
2029                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2030         }
2031     }
2032
2033     // No null test required
2034     if (!pRuntimeLookup->testForNull)
2035     {
2036         if (pRuntimeLookup->indirections == 0)
2037         {
2038             return slotPtrTree;
2039         }
2040
2041         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2042         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2043
2044         if (!pRuntimeLookup->testForFixup)
2045         {
2046             return slotPtrTree;
2047         }
2048
2049         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2050
2051         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2052         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2053
2054         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2055         // downcast the pointer to a TYP_INT on 64-bit targets
2056         slot = impImplicitIorI4Cast(slot, TYP_INT);
2057         // Use a GT_AND to check for the lowest bit and indirect if it is set
2058         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2059         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2060         relop->gtFlags |= GTF_RELOP_QMARK;
2061
2062         // slot = GT_IND(slot - 1)
2063         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2064         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2065         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2066         indir->gtFlags |= GTF_IND_NONFAULTING;
2067         indir->gtFlags |= GTF_IND_INVARIANT;
2068
2069         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2070         GenTree* asg   = gtNewAssignNode(slot, indir);
2071         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2072         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2073         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2074
2075         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2076     }
2077
2078     assert(pRuntimeLookup->indirections != 0);
2079
2080     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2081
2082     // Extract the handle
2083     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2084     handle->gtFlags |= GTF_IND_NONFAULTING;
2085
2086     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2087                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2088
2089     // Call to helper
2090     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2091
2092     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2093     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2094
2095     // Check for null and possibly call helper
2096     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2097     relop->gtFlags |= GTF_RELOP_QMARK;
2098
2099     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2100                                                        gtNewNothingNode(), // do nothing if nonnull
2101                                                        helperCall);
2102
2103     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2104
2105     unsigned tmp;
2106     if (handleCopy->IsLocal())
2107     {
2108         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2109     }
2110     else
2111     {
2112         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2113     }
2114
2115     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2116     return gtNewLclvNode(tmp, TYP_I_IMPL);
2117 }
2118
2119 /******************************************************************************
2120  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2121  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2122  *     else, grab a new temp.
2123  *  For structs (which can be pushed on the stack using obj, etc),
2124  *  special handling is needed
2125  */
2126
2127 struct RecursiveGuard
2128 {
2129 public:
2130     RecursiveGuard()
2131     {
2132         m_pAddress = nullptr;
2133     }
2134
2135     ~RecursiveGuard()
2136     {
2137         if (m_pAddress)
2138         {
2139             *m_pAddress = false;
2140         }
2141     }
2142
2143     void Init(bool* pAddress, bool bInitialize)
2144     {
2145         assert(pAddress && *pAddress == false && "Recursive guard violation");
2146         m_pAddress = pAddress;
2147
2148         if (bInitialize)
2149         {
2150             *m_pAddress = true;
2151         }
2152     }
2153
2154 protected:
2155     bool* m_pAddress;
2156 };
2157
2158 bool Compiler::impSpillStackEntry(unsigned level,
2159                                   unsigned tnum
2160 #ifdef DEBUG
2161                                   ,
2162                                   bool        bAssertOnRecursion,
2163                                   const char* reason
2164 #endif
2165                                   )
2166 {
2167
2168 #ifdef DEBUG
2169     RecursiveGuard guard;
2170     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2171 #endif
2172
2173     GenTree* tree = verCurrentState.esStack[level].val;
2174
2175     /* Allocate a temp if we haven't been asked to use a particular one */
2176
2177     if (tiVerificationNeeded)
2178     {
2179         // Ignore bad temp requests (they will happen with bad code and will be
2180         // catched when importing the destblock)
2181         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2182         {
2183             return false;
2184         }
2185     }
2186     else
2187     {
2188         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2189         {
2190             return false;
2191         }
2192     }
2193
2194     bool isNewTemp = false;
2195
2196     if (tnum == BAD_VAR_NUM)
2197     {
2198         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2199         isNewTemp = true;
2200     }
2201     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2202     {
2203         // if verification is needed and tnum's type is incompatible with
2204         // type on that stack, we grab a new temp. This is safe since
2205         // we will throw a verification exception in the dest block.
2206
2207         var_types valTyp = tree->TypeGet();
2208         var_types dstTyp = lvaTable[tnum].TypeGet();
2209
2210         // if the two types are different, we return. This will only happen with bad code and will
2211         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2212         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2213             !(
2214 #ifndef _TARGET_64BIT_
2215                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2216 #endif // !_TARGET_64BIT_
2217                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2218         {
2219             if (verNeedsVerification())
2220             {
2221                 return false;
2222             }
2223         }
2224     }
2225
2226     /* Assign the spilled entry to the temp */
2227     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2228
2229     // If temp is newly introduced and a ref type, grab what type info we can.
2230     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2231     {
2232         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2233         lvaSetClass(tnum, tree, stkHnd);
2234     }
2235
2236     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2237     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2238     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2239     verCurrentState.esStack[level].val = temp;
2240
2241     return true;
2242 }
2243
2244 /*****************************************************************************
2245  *
2246  *  Ensure that the stack has only spilled values
2247  */
2248
2249 void Compiler::impSpillStackEnsure(bool spillLeaves)
2250 {
2251     assert(!spillLeaves || opts.compDbgCode);
2252
2253     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2254     {
2255         GenTree* tree = verCurrentState.esStack[level].val;
2256
2257         if (!spillLeaves && tree->OperIsLeaf())
2258         {
2259             continue;
2260         }
2261
2262         // Temps introduced by the importer itself don't need to be spilled
2263
2264         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2265
2266         if (isTempLcl)
2267         {
2268             continue;
2269         }
2270
2271         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2272     }
2273 }
2274
2275 void Compiler::impSpillEvalStack()
2276 {
2277     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2278     {
2279         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2280     }
2281 }
2282
2283 /*****************************************************************************
2284  *
2285  *  If the stack contains any trees with side effects in them, assign those
2286  *  trees to temps and append the assignments to the statement list.
2287  *  On return the stack is guaranteed to be empty.
2288  */
2289
2290 inline void Compiler::impEvalSideEffects()
2291 {
2292     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2293     verCurrentState.esStackDepth = 0;
2294 }
2295
2296 /*****************************************************************************
2297  *
2298  *  If the stack contains any trees with side effects in them, assign those
2299  *  trees to temps and replace them on the stack with refs to their temps.
2300  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2301  */
2302
2303 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2304 {
2305     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2306
2307     /* Before we make any appends to the tree list we must spill the
2308      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2309
2310     impSpillSpecialSideEff();
2311
2312     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2313     {
2314         chkLevel = verCurrentState.esStackDepth;
2315     }
2316
2317     assert(chkLevel <= verCurrentState.esStackDepth);
2318
2319     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2320
2321     for (unsigned i = 0; i < chkLevel; i++)
2322     {
2323         GenTree* tree = verCurrentState.esStack[i].val;
2324
2325         GenTree* lclVarTree;
2326
2327         if ((tree->gtFlags & spillFlags) != 0 ||
2328             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2329              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2330              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2331                                            // lvAddrTaken flag.
2332         {
2333             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2334         }
2335     }
2336 }
2337
2338 /*****************************************************************************
2339  *
2340  *  If the stack contains any trees with special side effects in them, assign
2341  *  those trees to temps and replace them on the stack with refs to their temps.
2342  */
2343
2344 inline void Compiler::impSpillSpecialSideEff()
2345 {
2346     // Only exception objects need to be carefully handled
2347
2348     if (!compCurBB->bbCatchTyp)
2349     {
2350         return;
2351     }
2352
2353     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2354     {
2355         GenTree* tree = verCurrentState.esStack[level].val;
2356         // Make sure if we have an exception object in the sub tree we spill ourselves.
2357         if (gtHasCatchArg(tree))
2358         {
2359             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2360         }
2361     }
2362 }
2363
2364 /*****************************************************************************
2365  *
2366  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2367  */
2368
2369 void Compiler::impSpillValueClasses()
2370 {
2371     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2372     {
2373         GenTree* tree = verCurrentState.esStack[level].val;
2374
2375         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2376         {
2377             // Tree walk was aborted, which means that we found a
2378             // value class on the stack.  Need to spill that
2379             // stack entry.
2380
2381             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2382         }
2383     }
2384 }
2385
2386 /*****************************************************************************
2387  *
2388  *  Callback that checks if a tree node is TYP_STRUCT
2389  */
2390
2391 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2392 {
2393     fgWalkResult walkResult = WALK_CONTINUE;
2394
2395     if ((*pTree)->gtType == TYP_STRUCT)
2396     {
2397         // Abort the walk and indicate that we found a value class
2398
2399         walkResult = WALK_ABORT;
2400     }
2401
2402     return walkResult;
2403 }
2404
2405 /*****************************************************************************
2406  *
2407  *  If the stack contains any trees with references to local #lclNum, assign
2408  *  those trees to temps and replace their place on the stack with refs to
2409  *  their temps.
2410  */
2411
2412 void Compiler::impSpillLclRefs(ssize_t lclNum)
2413 {
2414     /* Before we make any appends to the tree list we must spill the
2415      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2416
2417     impSpillSpecialSideEff();
2418
2419     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2420     {
2421         GenTree* tree = verCurrentState.esStack[level].val;
2422
2423         /* If the tree may throw an exception, and the block has a handler,
2424            then we need to spill assignments to the local if the local is
2425            live on entry to the handler.
2426            Just spill 'em all without considering the liveness */
2427
2428         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2429
2430         /* Skip the tree if it doesn't have an affected reference,
2431            unless xcptnCaught */
2432
2433         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2434         {
2435             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2436         }
2437     }
2438 }
2439
2440 /*****************************************************************************
2441  *
2442  *  Push catch arg onto the stack.
2443  *  If there are jumps to the beginning of the handler, insert basic block
2444  *  and spill catch arg to a temp. Update the handler block if necessary.
2445  *
2446  *  Returns the basic block of the actual handler.
2447  */
2448
2449 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2450 {
2451     // Do not inject the basic block twice on reimport. This should be
2452     // hit only under JIT stress. See if the block is the one we injected.
2453     // Note that EH canonicalization can inject internal blocks here. We might
2454     // be able to re-use such a block (but we don't, right now).
2455     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2456         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2457     {
2458         GenTree* tree = hndBlk->bbTreeList;
2459
2460         if (tree != nullptr && tree->gtOper == GT_STMT)
2461         {
2462             tree = tree->gtStmt.gtStmtExpr;
2463             assert(tree != nullptr);
2464
2465             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2466                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2467             {
2468                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2469
2470                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2471
2472                 return hndBlk->bbNext;
2473             }
2474         }
2475
2476         // If we get here, it must have been some other kind of internal block. It's possible that
2477         // someone prepended something to our injected block, but that's unlikely.
2478     }
2479
2480     /* Push the exception address value on the stack */
2481     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2482
2483     /* Mark the node as having a side-effect - i.e. cannot be
2484      * moved around since it is tied to a fixed location (EAX) */
2485     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2486
2487 #if defined(JIT32_GCENCODER)
2488     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2489 #else
2490     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2491 #endif // defined(JIT32_GCENCODER)
2492
2493     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2494     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2495     {
2496         if (hndBlk->bbRefs == 1)
2497         {
2498             hndBlk->bbRefs++;
2499         }
2500
2501         /* Create extra basic block for the spill */
2502         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2503         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2504         newBlk->setBBWeight(hndBlk->bbWeight);
2505         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2506
2507         /* Account for the new link we are about to create */
2508         hndBlk->bbRefs++;
2509
2510         /* Spill into a temp */
2511         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2512         lvaTable[tempNum].lvType = TYP_REF;
2513         arg                      = gtNewTempAssign(tempNum, arg);
2514
2515         hndBlk->bbStkTempsIn = tempNum;
2516
2517         /* Report the debug info. impImportBlockCode won't treat
2518          * the actual handler as exception block and thus won't do it for us. */
2519         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2520         {
2521             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2522             arg            = gtNewStmt(arg, impCurStmtOffs);
2523         }
2524
2525         fgInsertStmtAtEnd(newBlk, arg);
2526
2527         arg = gtNewLclvNode(tempNum, TYP_REF);
2528     }
2529
2530     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2531
2532     return hndBlk;
2533 }
2534
2535 /*****************************************************************************
2536  *
2537  *  Given a tree, clone it. *pClone is set to the cloned tree.
2538  *  Returns the original tree if the cloning was easy,
2539  *   else returns the temp to which the tree had to be spilled to.
2540  *  If the tree has side-effects, it will be spilled to a temp.
2541  */
2542
2543 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2544                                 GenTree**            pClone,
2545                                 CORINFO_CLASS_HANDLE structHnd,
2546                                 unsigned             curLevel,
2547                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2548 {
2549     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2550     {
2551         GenTree* clone = gtClone(tree, true);
2552
2553         if (clone)
2554         {
2555             *pClone = clone;
2556             return tree;
2557         }
2558     }
2559
2560     /* Store the operand in a temp and return the temp */
2561
2562     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2563
2564     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2565     // return a struct type. It also may modify the struct type to a more
2566     // specialized type (e.g. a SIMD type).  So we will get the type from
2567     // the lclVar AFTER calling impAssignTempGen().
2568
2569     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2570     var_types type = genActualType(lvaTable[temp].TypeGet());
2571
2572     *pClone = gtNewLclvNode(temp, type);
2573     return gtNewLclvNode(temp, type);
2574 }
2575
2576 /*****************************************************************************
2577  * Remember the IL offset (including stack-empty info) for the trees we will
2578  * generate now.
2579  */
2580
2581 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2582 {
2583     if (compIsForInlining())
2584     {
2585         GenTree* callStmt = impInlineInfo->iciStmt;
2586         assert(callStmt->gtOper == GT_STMT);
2587         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2588     }
2589     else
2590     {
2591         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2592         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2593         impCurStmtOffs    = offs | stkBit;
2594     }
2595 }
2596
2597 /*****************************************************************************
2598  * Returns current IL offset with stack-empty and call-instruction info incorporated
2599  */
2600 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2601 {
2602     if (compIsForInlining())
2603     {
2604         return BAD_IL_OFFSET;
2605     }
2606     else
2607     {
2608         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2609         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2610         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2611         return offs | stkBit | callInstructionBit;
2612     }
2613 }
2614
2615 //------------------------------------------------------------------------
2616 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2617 //
2618 // Arguments:
2619 //    prevOpcode - last importer opcode
2620 //
2621 // Return Value:
2622 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2623 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2624 {
2625     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2626     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2627     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2628 }
2629
2630 /*****************************************************************************
2631  *
2632  *  Remember the instr offset for the statements
2633  *
2634  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2635  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2636  *  as some of the trees corresponding to code up to impCurOpcOffs might
2637  *  still be sitting on the stack.
2638  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2639  *  This should be called when an opcode finally/explicitly causes
2640  *  impAppendTree(tree) to be called (as opposed to being called because of
2641  *  a spill caused by the opcode)
2642  */
2643
2644 #ifdef DEBUG
2645
2646 void Compiler::impNoteLastILoffs()
2647 {
2648     if (impLastILoffsStmt == nullptr)
2649     {
2650         // We should have added a statement for the current basic block
2651         // Is this assert correct ?
2652
2653         assert(impTreeLast);
2654         assert(impTreeLast->gtOper == GT_STMT);
2655
2656         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2657     }
2658     else
2659     {
2660         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2661         impLastILoffsStmt                          = nullptr;
2662     }
2663 }
2664
2665 #endif // DEBUG
2666
2667 /*****************************************************************************
2668  * We don't create any GenTree (excluding spills) for a branch.
2669  * For debugging info, we need a placeholder so that we can note
2670  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2671  */
2672
2673 void Compiler::impNoteBranchOffs()
2674 {
2675     if (opts.compDbgCode)
2676     {
2677         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2678     }
2679 }
2680
2681 /*****************************************************************************
2682  * Locate the next stmt boundary for which we need to record info.
2683  * We will have to spill the stack at such boundaries if it is not
2684  * already empty.
2685  * Returns the next stmt boundary (after the start of the block)
2686  */
2687
2688 unsigned Compiler::impInitBlockLineInfo()
2689 {
2690     /* Assume the block does not correspond with any IL offset. This prevents
2691        us from reporting extra offsets. Extra mappings can cause confusing
2692        stepping, especially if the extra mapping is a jump-target, and the
2693        debugger does not ignore extra mappings, but instead rewinds to the
2694        nearest known offset */
2695
2696     impCurStmtOffsSet(BAD_IL_OFFSET);
2697
2698     if (compIsForInlining())
2699     {
2700         return ~0;
2701     }
2702
2703     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2704
2705     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2706     {
2707         impCurStmtOffsSet(blockOffs);
2708     }
2709
2710     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2711     {
2712         impCurStmtOffsSet(blockOffs);
2713     }
2714
2715     /* Always report IL offset 0 or some tests get confused.
2716        Probably a good idea anyways */
2717
2718     if (blockOffs == 0)
2719     {
2720         impCurStmtOffsSet(blockOffs);
2721     }
2722
2723     if (!info.compStmtOffsetsCount)
2724     {
2725         return ~0;
2726     }
2727
2728     /* Find the lowest explicit stmt boundary within the block */
2729
2730     /* Start looking at an entry that is based on our instr offset */
2731
2732     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2733
2734     if (index >= info.compStmtOffsetsCount)
2735     {
2736         index = info.compStmtOffsetsCount - 1;
2737     }
2738
2739     /* If we've guessed too far, back up */
2740
2741     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2742     {
2743         index--;
2744     }
2745
2746     /* If we guessed short, advance ahead */
2747
2748     while (info.compStmtOffsets[index] < blockOffs)
2749     {
2750         index++;
2751
2752         if (index == info.compStmtOffsetsCount)
2753         {
2754             return info.compStmtOffsetsCount;
2755         }
2756     }
2757
2758     assert(index < info.compStmtOffsetsCount);
2759
2760     if (info.compStmtOffsets[index] == blockOffs)
2761     {
2762         /* There is an explicit boundary for the start of this basic block.
2763            So we will start with bbCodeOffs. Else we will wait until we
2764            get to the next explicit boundary */
2765
2766         impCurStmtOffsSet(blockOffs);
2767
2768         index++;
2769     }
2770
2771     return index;
2772 }
2773
2774 /*****************************************************************************/
2775
2776 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2777 {
2778     switch (opcode)
2779     {
2780         case CEE_CALL:
2781         case CEE_CALLI:
2782         case CEE_CALLVIRT:
2783             return true;
2784
2785         default:
2786             return false;
2787     }
2788 }
2789
2790 /*****************************************************************************/
2791
2792 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2793 {
2794     switch (opcode)
2795     {
2796         case CEE_CALL:
2797         case CEE_CALLI:
2798         case CEE_CALLVIRT:
2799         case CEE_JMP:
2800         case CEE_NEWOBJ:
2801         case CEE_NEWARR:
2802             return true;
2803
2804         default:
2805             return false;
2806     }
2807 }
2808
2809 /*****************************************************************************/
2810
2811 // One might think it is worth caching these values, but results indicate
2812 // that it isn't.
2813 // In addition, caching them causes SuperPMI to be unable to completely
2814 // encapsulate an individual method context.
2815 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2816 {
2817     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2818     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2819     return refAnyClass;
2820 }
2821
2822 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2823 {
2824     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2825     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2826     return typeHandleClass;
2827 }
2828
2829 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2830 {
2831     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2832     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2833     return argIteratorClass;
2834 }
2835
2836 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2837 {
2838     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2839     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2840     return stringClass;
2841 }
2842
2843 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2844 {
2845     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2846     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2847     return objectClass;
2848 }
2849
2850 /*****************************************************************************
2851  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2852  *  set its type to TYP_BYREF when we create it. We know if it can be
2853  *  changed to TYP_I_IMPL only at the point where we use it
2854  */
2855
2856 /* static */
2857 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2858 {
2859     if (tree1->IsVarAddr())
2860     {
2861         tree1->gtType = TYP_I_IMPL;
2862     }
2863
2864     if (tree2 && tree2->IsVarAddr())
2865     {
2866         tree2->gtType = TYP_I_IMPL;
2867     }
2868 }
2869
2870 /*****************************************************************************
2871  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2872  *  to make that an explicit cast in our trees, so any implicit casts that
2873  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2874  *  turned into explicit casts here.
2875  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2876  */
2877
2878 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2879 {
2880     var_types currType   = genActualType(tree->gtType);
2881     var_types wantedType = genActualType(dstTyp);
2882
2883     if (wantedType != currType)
2884     {
2885         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2886         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2887         {
2888             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2889             {
2890                 tree->gtType = TYP_I_IMPL;
2891             }
2892         }
2893 #ifdef _TARGET_64BIT_
2894         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2895         {
2896             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2897             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2898         }
2899         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2900         {
2901             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2902             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2903         }
2904 #endif // _TARGET_64BIT_
2905     }
2906
2907     return tree;
2908 }
2909
2910 /*****************************************************************************
2911  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2912  *  but we want to make that an explicit cast in our trees, so any implicit casts
2913  *  that exist in the IL are turned into explicit casts here.
2914  */
2915
2916 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2917 {
2918 #ifndef LEGACY_BACKEND
2919     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2920     {
2921         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2922     }
2923 #endif // !LEGACY_BACKEND
2924
2925     return tree;
2926 }
2927
2928 //------------------------------------------------------------------------
2929 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2930 //    with a GT_COPYBLK node.
2931 //
2932 // Arguments:
2933 //    sig - The InitializeArray signature.
2934 //
2935 // Return Value:
2936 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2937 //    nullptr otherwise.
2938 //
2939 // Notes:
2940 //    The function recognizes the following IL pattern:
2941 //      ldc <length> or a list of ldc <lower bound>/<length>
2942 //      newarr or newobj
2943 //      dup
2944 //      ldtoken <field handle>
2945 //      call InitializeArray
2946 //    The lower bounds need not be constant except when the array rank is 1.
2947 //    The function recognizes all kinds of arrays thus enabling a small runtime
2948 //    such as CoreRT to skip providing an implementation for InitializeArray.
2949
2950 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2951 {
2952     assert(sig->numArgs == 2);
2953
2954     GenTree* fieldTokenNode = impStackTop(0).val;
2955     GenTree* arrayLocalNode = impStackTop(1).val;
2956
2957     //
2958     // Verify that the field token is known and valid.  Note that It's also
2959     // possible for the token to come from reflection, in which case we cannot do
2960     // the optimization and must therefore revert to calling the helper.  You can
2961     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2962     //
2963
2964     // Check to see if the ldtoken helper call is what we see here.
2965     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2966         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2967     {
2968         return nullptr;
2969     }
2970
2971     // Strip helper call away
2972     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2973
2974     if (fieldTokenNode->gtOper == GT_IND)
2975     {
2976         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2977     }
2978
2979     // Check for constant
2980     if (fieldTokenNode->gtOper != GT_CNS_INT)
2981     {
2982         return nullptr;
2983     }
2984
2985     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2986     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2987     {
2988         return nullptr;
2989     }
2990
2991     //
2992     // We need to get the number of elements in the array and the size of each element.
2993     // We verify that the newarr statement is exactly what we expect it to be.
2994     // If it's not then we just return NULL and we don't optimize this call
2995     //
2996
2997     //
2998     // It is possible the we don't have any statements in the block yet
2999     //
3000     if (impTreeLast->gtOper != GT_STMT)
3001     {
3002         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3003         return nullptr;
3004     }
3005
3006     //
3007     // We start by looking at the last statement, making sure it's an assignment, and
3008     // that the target of the assignment is the array passed to InitializeArray.
3009     //
3010     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3011     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3012         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3013         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3014     {
3015         return nullptr;
3016     }
3017
3018     //
3019     // Make sure that the object being assigned is a helper call.
3020     //
3021
3022     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3023     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3024     {
3025         return nullptr;
3026     }
3027
3028     //
3029     // Verify that it is one of the new array helpers.
3030     //
3031
3032     bool isMDArray = false;
3033
3034     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3035         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3036         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3037         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3038 #ifdef FEATURE_READYTORUN_COMPILER
3039         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3040         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3041 #endif
3042             )
3043     {
3044         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3045         {
3046             return nullptr;
3047         }
3048
3049         isMDArray = true;
3050     }
3051
3052     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3053
3054     //
3055     // Make sure we found a compile time handle to the array
3056     //
3057
3058     if (!arrayClsHnd)
3059     {
3060         return nullptr;
3061     }
3062
3063     unsigned rank = 0;
3064     S_UINT32 numElements;
3065
3066     if (isMDArray)
3067     {
3068         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3069
3070         if (rank == 0)
3071         {
3072             return nullptr;
3073         }
3074
3075         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3076         assert(tokenArg != nullptr);
3077         GenTreeArgList* numArgsArg = tokenArg->Rest();
3078         assert(numArgsArg != nullptr);
3079         GenTreeArgList* argsArg = numArgsArg->Rest();
3080         assert(argsArg != nullptr);
3081
3082         //
3083         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3084         // so at least one length must be present and the rank can't exceed 32 so there can
3085         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3086         //
3087
3088         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3089             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3090         {
3091             return nullptr;
3092         }
3093
3094         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3095         bool     lowerBoundsSpecified;
3096
3097         if (numArgs == rank * 2)
3098         {
3099             lowerBoundsSpecified = true;
3100         }
3101         else if (numArgs == rank)
3102         {
3103             lowerBoundsSpecified = false;
3104
3105             //
3106             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3107             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3108             // we get a SDArray as well, see the for loop below.
3109             //
3110
3111             if (rank == 1)
3112             {
3113                 isMDArray = false;
3114             }
3115         }
3116         else
3117         {
3118             return nullptr;
3119         }
3120
3121         //
3122         // The rank is known to be at least 1 so we can start with numElements being 1
3123         // to avoid the need to special case the first dimension.
3124         //
3125
3126         numElements = S_UINT32(1);
3127
3128         struct Match
3129         {
3130             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3131             {
3132                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3133                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3134             }
3135
3136             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3137             {
3138                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3139                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3140                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3141             }
3142
3143             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3144             {
3145                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3146                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3147             }
3148
3149             static bool IsComma(GenTree* tree)
3150             {
3151                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3152             }
3153         };
3154
3155         unsigned argIndex = 0;
3156         GenTree* comma;
3157
3158         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3159         {
3160             if (lowerBoundsSpecified)
3161             {
3162                 //
3163                 // In general lower bounds can be ignored because they're not needed to
3164                 // calculate the total number of elements. But for single dimensional arrays
3165                 // we need to know if the lower bound is 0 because in this case the runtime
3166                 // creates a SDArray and this affects the way the array data offset is calculated.
3167                 //
3168
3169                 if (rank == 1)
3170                 {
3171                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3172                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3173                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3174
3175                     if (lowerBoundNode->IsIntegralConst(0))
3176                     {
3177                         isMDArray = false;
3178                     }
3179                 }
3180
3181                 comma = comma->gtGetOp2();
3182                 argIndex++;
3183             }
3184
3185             GenTree* lengthNodeAssign = comma->gtGetOp1();
3186             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3187             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3188
3189             if (!lengthNode->IsCnsIntOrI())
3190             {
3191                 return nullptr;
3192             }
3193
3194             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3195             argIndex++;
3196         }
3197
3198         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3199
3200         if (argIndex != numArgs)
3201         {
3202             return nullptr;
3203         }
3204     }
3205     else
3206     {
3207         //
3208         // Make sure there are exactly two arguments:  the array class and
3209         // the number of elements.
3210         //
3211
3212         GenTree* arrayLengthNode;
3213
3214         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3215 #ifdef FEATURE_READYTORUN_COMPILER
3216         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3217         {
3218             // Array length is 1st argument for readytorun helper
3219             arrayLengthNode = args->Current();
3220         }
3221         else
3222 #endif
3223         {
3224             // Array length is 2nd argument for regular helper
3225             arrayLengthNode = args->Rest()->Current();
3226         }
3227
3228         //
3229         // Make sure that the number of elements look valid.
3230         //
3231         if (arrayLengthNode->gtOper != GT_CNS_INT)
3232         {
3233             return nullptr;
3234         }
3235
3236         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3237
3238         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3239         {
3240             return nullptr;
3241         }
3242     }
3243
3244     CORINFO_CLASS_HANDLE elemClsHnd;
3245     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3246
3247     //
3248     // Note that genTypeSize will return zero for non primitive types, which is exactly
3249     // what we want (size will then be 0, and we will catch this in the conditional below).
3250     // Note that we don't expect this to fail for valid binaries, so we assert in the
3251     // non-verification case (the verification case should not assert but rather correctly
3252     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3253     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3254     // why.
3255     //
3256
3257     S_UINT32 elemSize(genTypeSize(elementType));
3258     S_UINT32 size = elemSize * S_UINT32(numElements);
3259
3260     if (size.IsOverflow())
3261     {
3262         return nullptr;
3263     }
3264
3265     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3266     {
3267         assert(verNeedsVerification());
3268         return nullptr;
3269     }
3270
3271     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3272     if (!initData)
3273     {
3274         return nullptr;
3275     }
3276
3277     //
3278     // At this point we are ready to commit to implementing the InitializeArray
3279     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3280     // return the struct assignment node.
3281     //
3282
3283     impPopStack();
3284     impPopStack();
3285
3286     const unsigned blkSize = size.Value();
3287     unsigned       dataOffset;
3288
3289     if (isMDArray)
3290     {
3291         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3292     }
3293     else
3294     {
3295         dataOffset = eeGetArrayDataOffset(elementType);
3296     }
3297
3298     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3299     GenTree* blk = gtNewBlockVal(dst, blkSize);
3300     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3301
3302     return gtNewBlkOpNode(blk,     // dst
3303                           src,     // src
3304                           blkSize, // size
3305                           false,   // volatil
3306                           true);   // copyBlock
3307 }
3308
3309 //------------------------------------------------------------------------
3310 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3311 //
3312 // Arguments:
3313 //    newobjThis - for constructor calls, the tree for the newly allocated object
3314 //    clsHnd - handle for the intrinsic method's class
3315 //    method - handle for the intrinsic method
3316 //    sig    - signature of the intrinsic method
3317 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3318 //    memberRef - the token for the intrinsic method
3319 //    readonlyCall - true if call has a readonly prefix
3320 //    tailCall - true if call is in tail position
3321 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3322 //       if call is not constrained
3323 //    constraintCallThisTransform -- this transform to apply for a constrained call
3324 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3325 //       for "traditional" jit intrinsics
3326 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3327 //       that is amenable to special downstream optimization opportunities
3328 //
3329 // Returns:
3330 //    IR tree to use in place of the call, or nullptr if the jit should treat
3331 //    the intrinsic call like a normal call.
3332 //
3333 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3334 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3335 //
3336 //    isSpecial set true if the expansion is subject to special
3337 //    optimizations later in the jit processing
3338 //
3339 // Notes:
3340 //    On success the IR tree may be a call to a different method or an inline
3341 //    sequence. If it is a call, then the intrinsic processing here is responsible
3342 //    for handling all the special cases, as upon return to impImportCall
3343 //    expanded intrinsics bypass most of the normal call processing.
3344 //
3345 //    Intrinsics are generally not recognized in minopts and debug codegen.
3346 //
3347 //    However, certain traditional intrinsics are identifed as "must expand"
3348 //    if there is no fallback implmentation to invoke; these must be handled
3349 //    in all codegen modes.
3350 //
3351 //    New style intrinsics (where the fallback implementation is in IL) are
3352 //    identified as "must expand" if they are invoked from within their
3353 //    own method bodies.
3354 //
3355
3356 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3357                                 CORINFO_CLASS_HANDLE    clsHnd,
3358                                 CORINFO_METHOD_HANDLE   method,
3359                                 CORINFO_SIG_INFO*       sig,
3360                                 unsigned                methodFlags,
3361                                 int                     memberRef,
3362                                 bool                    readonlyCall,
3363                                 bool                    tailCall,
3364                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3365                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3366                                 CorInfoIntrinsics*      pIntrinsicID,
3367                                 bool*                   isSpecialIntrinsic)
3368 {
3369     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3370
3371     bool              mustExpand  = false;
3372     bool              isSpecial   = false;
3373     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3374     NamedIntrinsic    ni          = NI_Illegal;
3375
3376     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3377     {
3378         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3379     }
3380
3381     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3382     {
3383         // The recursive calls to Jit intrinsics are must-expand by convention.
3384         mustExpand = mustExpand || gtIsRecursiveCall(method);
3385
3386         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3387         {
3388             ni = lookupNamedIntrinsic(method);
3389
3390 #ifdef FEATURE_HW_INTRINSICS
3391             if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3392             {
3393                 return impHWIntrinsic(ni, method, sig, mustExpand);
3394             }
3395 #endif // FEATURE_HW_INTRINSICS
3396         }
3397     }
3398
3399     *pIntrinsicID = intrinsicID;
3400
3401 #ifndef _TARGET_ARM_
3402     genTreeOps interlockedOperator;
3403 #endif
3404
3405     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3406     {
3407         // must be done regardless of DbgCode and MinOpts
3408         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3409     }
3410 #ifdef _TARGET_64BIT_
3411     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3412     {
3413         // must be done regardless of DbgCode and MinOpts
3414         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3415     }
3416 #else
3417     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3418 #endif
3419
3420     GenTree* retNode = nullptr;
3421
3422     // Under debug and minopts, only expand what is required.
3423     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3424     {
3425         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3426         return retNode;
3427     }
3428
3429     var_types callType = JITtype2varType(sig->retType);
3430
3431     /* First do the intrinsics which are always smaller than a call */
3432
3433     switch (intrinsicID)
3434     {
3435         GenTree* op1;
3436         GenTree* op2;
3437
3438         case CORINFO_INTRINSIC_Sin:
3439         case CORINFO_INTRINSIC_Cbrt:
3440         case CORINFO_INTRINSIC_Sqrt:
3441         case CORINFO_INTRINSIC_Abs:
3442         case CORINFO_INTRINSIC_Cos:
3443         case CORINFO_INTRINSIC_Round:
3444         case CORINFO_INTRINSIC_Cosh:
3445         case CORINFO_INTRINSIC_Sinh:
3446         case CORINFO_INTRINSIC_Tan:
3447         case CORINFO_INTRINSIC_Tanh:
3448         case CORINFO_INTRINSIC_Asin:
3449         case CORINFO_INTRINSIC_Asinh:
3450         case CORINFO_INTRINSIC_Acos:
3451         case CORINFO_INTRINSIC_Acosh:
3452         case CORINFO_INTRINSIC_Atan:
3453         case CORINFO_INTRINSIC_Atan2:
3454         case CORINFO_INTRINSIC_Atanh:
3455         case CORINFO_INTRINSIC_Log10:
3456         case CORINFO_INTRINSIC_Pow:
3457         case CORINFO_INTRINSIC_Exp:
3458         case CORINFO_INTRINSIC_Ceiling:
3459         case CORINFO_INTRINSIC_Floor:
3460             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3461             break;
3462
3463 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3464         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3465         case CORINFO_INTRINSIC_InterlockedAdd32:
3466             interlockedOperator = GT_LOCKADD;
3467             goto InterlockedBinOpCommon;
3468         case CORINFO_INTRINSIC_InterlockedXAdd32:
3469             interlockedOperator = GT_XADD;
3470             goto InterlockedBinOpCommon;
3471         case CORINFO_INTRINSIC_InterlockedXchg32:
3472             interlockedOperator = GT_XCHG;
3473             goto InterlockedBinOpCommon;
3474
3475 #ifdef _TARGET_64BIT_
3476         case CORINFO_INTRINSIC_InterlockedAdd64:
3477             interlockedOperator = GT_LOCKADD;
3478             goto InterlockedBinOpCommon;
3479         case CORINFO_INTRINSIC_InterlockedXAdd64:
3480             interlockedOperator = GT_XADD;
3481             goto InterlockedBinOpCommon;
3482         case CORINFO_INTRINSIC_InterlockedXchg64:
3483             interlockedOperator = GT_XCHG;
3484             goto InterlockedBinOpCommon;
3485 #endif // _TARGET_AMD64_
3486
3487         InterlockedBinOpCommon:
3488             assert(callType != TYP_STRUCT);
3489             assert(sig->numArgs == 2);
3490
3491             op2 = impPopStack().val;
3492             op1 = impPopStack().val;
3493
3494             // This creates:
3495             //   val
3496             // XAdd
3497             //   addr
3498             //     field (for example)
3499             //
3500             // In the case where the first argument is the address of a local, we might
3501             // want to make this *not* make the var address-taken -- but atomic instructions
3502             // on a local are probably pretty useless anyway, so we probably don't care.
3503
3504             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3505             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3506             retNode = op1;
3507             break;
3508 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3509
3510         case CORINFO_INTRINSIC_MemoryBarrier:
3511
3512             assert(sig->numArgs == 0);
3513
3514             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3515             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3516             retNode = op1;
3517             break;
3518
3519 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3520         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3521         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3522 #ifdef _TARGET_64BIT_
3523         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3524 #endif
3525         {
3526             assert(callType != TYP_STRUCT);
3527             assert(sig->numArgs == 3);
3528             GenTree* op3;
3529
3530             op3 = impPopStack().val; // comparand
3531             op2 = impPopStack().val; // value
3532             op1 = impPopStack().val; // location
3533
3534             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3535
3536             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3537             retNode = node;
3538             break;
3539         }
3540 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3541
3542         case CORINFO_INTRINSIC_StringLength:
3543             op1 = impPopStack().val;
3544             if (!opts.MinOpts() && !opts.compDbgCode)
3545             {
3546                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3547                 op1                   = arrLen;
3548             }
3549             else
3550             {
3551                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3552                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3553                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3554                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3555             }
3556
3557             // Getting the length of a null string should throw
3558             op1->gtFlags |= GTF_EXCEPT;
3559
3560             retNode = op1;
3561             break;
3562
3563         case CORINFO_INTRINSIC_StringGetChar:
3564             op2 = impPopStack().val;
3565             op1 = impPopStack().val;
3566             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3567             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3568             retNode = op1;
3569             break;
3570
3571         case CORINFO_INTRINSIC_InitializeArray:
3572             retNode = impInitializeArrayIntrinsic(sig);
3573             break;
3574
3575         case CORINFO_INTRINSIC_Array_Address:
3576         case CORINFO_INTRINSIC_Array_Get:
3577         case CORINFO_INTRINSIC_Array_Set:
3578             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3579             break;
3580
3581         case CORINFO_INTRINSIC_GetTypeFromHandle:
3582             op1 = impStackTop(0).val;
3583             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3584                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3585             {
3586                 op1 = impPopStack().val;
3587                 // Change call to return RuntimeType directly.
3588                 op1->gtType = TYP_REF;
3589                 retNode     = op1;
3590             }
3591             // Call the regular function.
3592             break;
3593
3594         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3595             op1 = impStackTop(0).val;
3596             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3597                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3598             {
3599                 // Old tree
3600                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3601                 //
3602                 // New tree
3603                 // TreeToGetNativeTypeHandle
3604
3605                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3606                 // to that helper.
3607
3608                 op1 = impPopStack().val;
3609
3610                 // Get native TypeHandle argument to old helper
3611                 op1 = op1->gtCall.gtCallArgs;
3612                 assert(op1->OperIsList());
3613                 assert(op1->gtOp.gtOp2 == nullptr);
3614                 op1     = op1->gtOp.gtOp1;
3615                 retNode = op1;
3616             }
3617             // Call the regular function.
3618             break;
3619
3620 #ifndef LEGACY_BACKEND
3621         case CORINFO_INTRINSIC_Object_GetType:
3622         {
3623             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3624             op1 = impStackTop(0).val;
3625
3626             // If we're calling GetType on a boxed value, just get the type directly.
3627             if (op1->IsBoxedValue())
3628             {
3629                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3630
3631                 // Try and clean up the box. Obtain the handle we
3632                 // were going to pass to the newobj.
3633                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3634
3635                 if (boxTypeHandle != nullptr)
3636                 {
3637                     // Note we don't need to play the TYP_STRUCT games here like
3638                     // do for LDTOKEN since the return value of this operator is Type,
3639                     // not RuntimeTypeHandle.
3640                     impPopStack();
3641                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3642                     GenTree*        runtimeType =
3643                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3644                     retNode = runtimeType;
3645                 }
3646             }
3647
3648             // If we have a constrained callvirt with a "box this" transform
3649             // we know we have a value class and hence an exact type.
3650             //
3651             // If so, instead of boxing and then extracting the type, just
3652             // construct the type directly.
3653             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3654                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3655             {
3656                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3657                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3658                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3659
3660                 if (isSafeToOptimize)
3661                 {
3662                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3663                     impPopStack();
3664                     GenTree* typeHandleOp =
3665                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3666                     if (typeHandleOp == nullptr)
3667                     {
3668                         assert(compDonotInline());
3669                         return nullptr;
3670                     }
3671                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3672                     GenTree*        runtimeType =
3673                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3674                     retNode = runtimeType;
3675                 }
3676             }
3677
3678 #ifdef DEBUG
3679             if (retNode != nullptr)
3680             {
3681                 JITDUMP("Optimized result for call to GetType is\n");
3682                 if (verbose)
3683                 {
3684                     gtDispTree(retNode);
3685                 }
3686             }
3687 #endif
3688
3689             // Else expand as an intrinsic, unless the call is constrained,
3690             // in which case we defer expansion to allow impImportCall do the
3691             // special constraint processing.
3692             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3693             {
3694                 JITDUMP("Expanding as special intrinsic\n");
3695                 impPopStack();
3696                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3697
3698                 // Set the CALL flag to indicate that the operator is implemented by a call.
3699                 // Set also the EXCEPTION flag because the native implementation of
3700                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3701                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3702                 retNode = op1;
3703                 // Might be further optimizable, so arrange to leave a mark behind
3704                 isSpecial = true;
3705             }
3706
3707             if (retNode == nullptr)
3708             {
3709                 JITDUMP("Leaving as normal call\n");
3710                 // Might be further optimizable, so arrange to leave a mark behind
3711                 isSpecial = true;
3712             }
3713
3714             break;
3715         }
3716
3717 #endif
3718         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3719         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3720         // substitution.  The parameter byref will be assigned into the newly allocated object.
3721         case CORINFO_INTRINSIC_ByReference_Ctor:
3722         {
3723             // Remove call to constructor and directly assign the byref passed
3724             // to the call to the first slot of the ByReference struct.
3725             op1                                    = impPopStack().val;
3726             GenTree*             thisptr           = newobjThis;
3727             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3728             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3729             GenTree*             assign            = gtNewAssignNode(field, op1);
3730             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3731             assert(byReferenceStruct != nullptr);
3732             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3733             retNode = assign;
3734             break;
3735         }
3736         // Implement ptr value getter for ByReference struct.
3737         case CORINFO_INTRINSIC_ByReference_Value:
3738         {
3739             op1                         = impPopStack().val;
3740             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3741             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3742             retNode                     = field;
3743             break;
3744         }
3745         case CORINFO_INTRINSIC_Span_GetItem:
3746         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3747         {
3748             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3749             //
3750             // For Span<T>
3751             //   Comma
3752             //     BoundsCheck(index, s->_length)
3753             //     s->_pointer + index * sizeof(T)
3754             //
3755             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3756             //
3757             // Signature should show one class type parameter, which
3758             // we need to examine.
3759             assert(sig->sigInst.classInstCount == 1);
3760             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3761             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3762             assert(elemSize > 0);
3763
3764             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3765
3766             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3767                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3768
3769             GenTree* index          = impPopStack().val;
3770             GenTree* ptrToSpan      = impPopStack().val;
3771             GenTree* indexClone     = nullptr;
3772             GenTree* ptrToSpanClone = nullptr;
3773
3774 #if defined(DEBUG)
3775             if (verbose)
3776             {
3777                 printf("with ptr-to-span\n");
3778                 gtDispTree(ptrToSpan);
3779                 printf("and index\n");
3780                 gtDispTree(index);
3781             }
3782 #endif // defined(DEBUG)
3783
3784             // We need to use both index and ptr-to-span twice, so clone or spill.
3785             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3786                                  nullptr DEBUGARG("Span.get_Item index"));
3787             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3788                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3789
3790             // Bounds check
3791             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3792             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3793             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3794             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3795                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3796
3797             // Element access
3798             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3799             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3800             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3801             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3802             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3803             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3804             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3805
3806             // Prepare result
3807             var_types resultType = JITtype2varType(sig->retType);
3808             assert(resultType == result->TypeGet());
3809             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3810
3811             break;
3812         }
3813
3814         case CORINFO_INTRINSIC_GetRawHandle:
3815         {
3816             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3817             CORINFO_RESOLVED_TOKEN resolvedToken;
3818             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3819             resolvedToken.tokenScope   = info.compScopeHnd;
3820             resolvedToken.token        = memberRef;
3821             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3822
3823             CORINFO_GENERICHANDLE_RESULT embedInfo;
3824             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3825
3826             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3827                                                  embedInfo.compileTimeHandle);
3828             if (rawHandle == nullptr)
3829             {
3830                 return nullptr;
3831             }
3832
3833             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3834
3835             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3836             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3837
3838             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3839             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3840             var_types resultType = JITtype2varType(sig->retType);
3841             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3842
3843             break;
3844         }
3845
3846         case CORINFO_INTRINSIC_TypeEQ:
3847         case CORINFO_INTRINSIC_TypeNEQ:
3848         {
3849             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3850             op1              = impStackTop(1).val;
3851             op2              = impStackTop(0).val;
3852             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3853             if (optTree != nullptr)
3854             {
3855                 // Success, clean up the evaluation stack.
3856                 impPopStack();
3857                 impPopStack();
3858
3859                 // See if we can optimize even further, to a handle compare.
3860                 optTree = gtFoldTypeCompare(optTree);
3861
3862                 // See if we can now fold a handle compare to a constant.
3863                 optTree = gtFoldExpr(optTree);
3864
3865                 retNode = optTree;
3866             }
3867             else
3868             {
3869                 // Retry optimizing these later
3870                 isSpecial = true;
3871             }
3872             break;
3873         }
3874
3875         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3876         case CORINFO_INTRINSIC_GetManagedThreadId:
3877         {
3878             // Retry optimizing these during morph
3879             isSpecial = true;
3880             break;
3881         }
3882
3883         default:
3884             /* Unknown intrinsic */
3885             intrinsicID = CORINFO_INTRINSIC_Illegal;
3886             break;
3887     }
3888
3889     // Look for new-style jit intrinsics by name
3890     if (ni != NI_Illegal)
3891     {
3892         assert(retNode == nullptr);
3893         switch (ni)
3894         {
3895             case NI_System_Enum_HasFlag:
3896             {
3897                 GenTree* thisOp  = impStackTop(1).val;
3898                 GenTree* flagOp  = impStackTop(0).val;
3899                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3900
3901                 if (optTree != nullptr)
3902                 {
3903                     // Optimization successful. Pop the stack for real.
3904                     impPopStack();
3905                     impPopStack();
3906                     retNode = optTree;
3907                 }
3908                 else
3909                 {
3910                     // Retry optimizing this during morph.
3911                     isSpecial = true;
3912                 }
3913
3914                 break;
3915             }
3916
3917             case NI_MathF_Round:
3918             case NI_Math_Round:
3919             {
3920                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3921                 // to simplify the transition, we will just treat it as if it was still the
3922                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3923                 // everywhere else.
3924
3925                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3926                 break;
3927             }
3928
3929             case NI_System_Collections_Generic_EqualityComparer_get_Default:
3930             {
3931                 // Flag for later handling during devirtualization.
3932                 isSpecial = true;
3933                 break;
3934             }
3935
3936             default:
3937                 break;
3938         }
3939     }
3940
3941     if (mustExpand)
3942     {
3943         if (retNode == nullptr)
3944         {
3945             NO_WAY("JIT must expand the intrinsic!");
3946         }
3947     }
3948
3949     // Optionally report if this intrinsic is special
3950     // (that is, potentially re-optimizable during morph).
3951     if (isSpecialIntrinsic != nullptr)
3952     {
3953         *isSpecialIntrinsic = isSpecial;
3954     }
3955
3956     return retNode;
3957 }
3958
3959 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3960                                     CORINFO_SIG_INFO*     sig,
3961                                     var_types             callType,
3962                                     CorInfoIntrinsics     intrinsicID,
3963                                     bool                  tailCall)
3964 {
3965     GenTree* op1;
3966     GenTree* op2;
3967
3968     assert(callType != TYP_STRUCT);
3969     assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3970            (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3971            (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3972            (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3973            (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3974            (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3975            (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3976            (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3977            (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3978            (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3979            (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3980
3981     op1 = nullptr;
3982
3983 #if defined(LEGACY_BACKEND)
3984     if (IsTargetIntrinsic(intrinsicID))
3985 #elif !defined(_TARGET_X86_)
3986     // Intrinsics that are not implemented directly by target instructions will
3987     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3988     // don't do this optimization, because
3989     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3990     //  b) It will be non-trivial task or too late to re-materialize a surviving
3991     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3992     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3993 #else
3994     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3995     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3996     // code generation for certain EH constructs.
3997     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3998 #endif
3999     {
4000         switch (sig->numArgs)
4001         {
4002             case 1:
4003                 op1 = impPopStack().val;
4004
4005 #if FEATURE_X87_DOUBLES
4006
4007                 // X87 stack doesn't differentiate between float/double
4008                 // so it doesn't need a cast, but everybody else does
4009                 // Just double check it is at least a FP type
4010                 noway_assert(varTypeIsFloating(op1));
4011
4012 #else // FEATURE_X87_DOUBLES
4013
4014                 if (op1->TypeGet() != callType)
4015                 {
4016                     op1 = gtNewCastNode(callType, op1, callType);
4017                 }
4018
4019 #endif // FEATURE_X87_DOUBLES
4020
4021                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4022                 break;
4023
4024             case 2:
4025                 op2 = impPopStack().val;
4026                 op1 = impPopStack().val;
4027
4028 #if FEATURE_X87_DOUBLES
4029
4030                 // X87 stack doesn't differentiate between float/double
4031                 // so it doesn't need a cast, but everybody else does
4032                 // Just double check it is at least a FP type
4033                 noway_assert(varTypeIsFloating(op2));
4034                 noway_assert(varTypeIsFloating(op1));
4035
4036 #else // FEATURE_X87_DOUBLES
4037
4038                 if (op2->TypeGet() != callType)
4039                 {
4040                     op2 = gtNewCastNode(callType, op2, callType);
4041                 }
4042                 if (op1->TypeGet() != callType)
4043                 {
4044                     op1 = gtNewCastNode(callType, op1, callType);
4045                 }
4046
4047 #endif // FEATURE_X87_DOUBLES
4048
4049                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4050                 break;
4051
4052             default:
4053                 NO_WAY("Unsupported number of args for Math Instrinsic");
4054         }
4055
4056 #ifndef LEGACY_BACKEND
4057         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4058         {
4059             op1->gtFlags |= GTF_CALL;
4060         }
4061 #endif
4062     }
4063
4064     return op1;
4065 }
4066
4067 //------------------------------------------------------------------------
4068 // lookupNamedIntrinsic: map method to jit named intrinsic value
4069 //
4070 // Arguments:
4071 //    method -- method handle for method
4072 //
4073 // Return Value:
4074 //    Id for the named intrinsic, or Illegal if none.
4075 //
4076 // Notes:
4077 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4078 //    otherwise it is not a named jit intrinsic.
4079 //
4080
4081 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4082 {
4083     NamedIntrinsic result = NI_Illegal;
4084
4085     const char* className     = nullptr;
4086     const char* namespaceName = nullptr;
4087     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4088
4089     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4090     {
4091         return result;
4092     }
4093
4094     if (strcmp(namespaceName, "System") == 0)
4095     {
4096         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4097         {
4098             result = NI_System_Enum_HasFlag;
4099         }
4100         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4101         {
4102             result = NI_MathF_Round;
4103         }
4104         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4105         {
4106             result = NI_Math_Round;
4107         }
4108     }
4109     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4110     {
4111         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4112         {
4113             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4114         }
4115     }
4116
4117 #ifdef FEATURE_HW_INTRINSICS
4118 #if defined(_TARGET_XARCH_)
4119     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4120     {
4121         InstructionSet isa = lookupHWIntrinsicISA(className);
4122         result             = lookupHWIntrinsic(methodName, isa);
4123     }
4124 #elif defined(_TARGET_ARM64_)
4125     if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4126     {
4127         result = lookupHWIntrinsic(className, methodName);
4128     }
4129 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4130 #error Unsupported platform
4131 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4132 #endif // FEATURE_HW_INTRINSICS
4133     return result;
4134 }
4135
4136 /*****************************************************************************/
4137
4138 GenTree* Compiler::impArrayAccessIntrinsic(
4139     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4140 {
4141     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4142        the following, as it generates fatter code.
4143     */
4144
4145     if (compCodeOpt() == SMALL_CODE)
4146     {
4147         return nullptr;
4148     }
4149
4150     /* These intrinsics generate fatter (but faster) code and are only
4151        done if we don't need SMALL_CODE */
4152
4153     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4154
4155     // The rank 1 case is special because it has to handle two array formats
4156     // we will simply not do that case
4157     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4158     {
4159         return nullptr;
4160     }
4161
4162     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4163     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4164
4165     // For the ref case, we will only be able to inline if the types match
4166     // (verifier checks for this, we don't care for the nonverified case and the
4167     // type is final (so we don't need to do the cast)
4168     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4169     {
4170         // Get the call site signature
4171         CORINFO_SIG_INFO LocalSig;
4172         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4173         assert(LocalSig.hasThis());
4174
4175         CORINFO_CLASS_HANDLE actualElemClsHnd;
4176
4177         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4178         {
4179             // Fetch the last argument, the one that indicates the type we are setting.
4180             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4181             for (unsigned r = 0; r < rank; r++)
4182             {
4183                 argType = info.compCompHnd->getArgNext(argType);
4184             }
4185
4186             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4187             actualElemClsHnd = argInfo.GetClassHandle();
4188         }
4189         else
4190         {
4191             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4192
4193             // Fetch the return type
4194             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4195             assert(retInfo.IsByRef());
4196             actualElemClsHnd = retInfo.GetClassHandle();
4197         }
4198
4199         // if it's not final, we can't do the optimization
4200         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4201         {
4202             return nullptr;
4203         }
4204     }
4205
4206     unsigned arrayElemSize;
4207     if (elemType == TYP_STRUCT)
4208     {
4209         assert(arrElemClsHnd);
4210
4211         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4212     }
4213     else
4214     {
4215         arrayElemSize = genTypeSize(elemType);
4216     }
4217
4218     if ((unsigned char)arrayElemSize != arrayElemSize)
4219     {
4220         // arrayElemSize would be truncated as an unsigned char.
4221         // This means the array element is too large. Don't do the optimization.
4222         return nullptr;
4223     }
4224
4225     GenTree* val = nullptr;
4226
4227     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4228     {
4229         // Assignment of a struct is more work, and there are more gets than sets.
4230         if (elemType == TYP_STRUCT)
4231         {
4232             return nullptr;
4233         }
4234
4235         val = impPopStack().val;
4236         assert(genActualType(elemType) == genActualType(val->gtType) ||
4237                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4238                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4239                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4240     }
4241
4242     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4243
4244     GenTree* inds[GT_ARR_MAX_RANK];
4245     for (unsigned k = rank; k > 0; k--)
4246     {
4247         inds[k - 1] = impPopStack().val;
4248     }
4249
4250     GenTree* arr = impPopStack().val;
4251     assert(arr->gtType == TYP_REF);
4252
4253     GenTree* arrElem =
4254         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4255                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4256
4257     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4258     {
4259         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4260     }
4261
4262     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4263     {
4264         assert(val != nullptr);
4265         return gtNewAssignNode(arrElem, val);
4266     }
4267     else
4268     {
4269         return arrElem;
4270     }
4271 }
4272
4273 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4274 {
4275     unsigned i;
4276
4277     // do some basic checks first
4278     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4279     {
4280         return FALSE;
4281     }
4282
4283     if (verCurrentState.esStackDepth > 0)
4284     {
4285         // merge stack types
4286         StackEntry* parentStack = block->bbStackOnEntry();
4287         StackEntry* childStack  = verCurrentState.esStack;
4288
4289         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4290         {
4291             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4292             {
4293                 return FALSE;
4294             }
4295         }
4296     }
4297
4298     // merge initialization status of this ptr
4299
4300     if (verTrackObjCtorInitState)
4301     {
4302         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4303         assert(verCurrentState.thisInitialized != TIS_Bottom);
4304
4305         // If the successor block's thisInit state is unknown, copy it from the current state.
4306         if (block->bbThisOnEntry() == TIS_Bottom)
4307         {
4308             *changed = true;
4309             verSetThisInit(block, verCurrentState.thisInitialized);
4310         }
4311         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4312         {
4313             if (block->bbThisOnEntry() != TIS_Top)
4314             {
4315                 *changed = true;
4316                 verSetThisInit(block, TIS_Top);
4317
4318                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4319                 {
4320                     // The block is bad. Control can flow through the block to any handler that catches the
4321                     // verification exception, but the importer ignores bad blocks and therefore won't model
4322                     // this flow in the normal way. To complete the merge into the bad block, the new state
4323                     // needs to be manually pushed to the handlers that may be reached after the verification
4324                     // exception occurs.
4325                     //
4326                     // Usually, the new state was already propagated to the relevant handlers while processing
4327                     // the predecessors of the bad block. The exception is when the bad block is at the start
4328                     // of a try region, meaning it is protected by additional handlers that do not protect its
4329                     // predecessors.
4330                     //
4331                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4332                     {
4333                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4334                         // recursive calls back into this code path (if successors of the current bad block are
4335                         // also bad blocks).
4336                         //
4337                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4338                         verCurrentState.thisInitialized = TIS_Top;
4339                         impVerifyEHBlock(block, true);
4340                         verCurrentState.thisInitialized = origTIS;
4341                     }
4342                 }
4343             }
4344         }
4345     }
4346     else
4347     {
4348         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4349     }
4350
4351     return TRUE;
4352 }
4353
4354 /*****************************************************************************
4355  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4356  *   already logged it (presumably in a more detailed fashion than done here)
4357  * 'bVerificationException' is true for a verification exception, false for a
4358  *   "call unauthorized by host" exception.
4359  */
4360
4361 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4362 {
4363     block->bbJumpKind = BBJ_THROW;
4364     block->bbFlags |= BBF_FAILED_VERIFICATION;
4365
4366     impCurStmtOffsSet(block->bbCodeOffs);
4367
4368 #ifdef DEBUG
4369     // we need this since BeginTreeList asserts otherwise
4370     impTreeList = impTreeLast = nullptr;
4371     block->bbFlags &= ~BBF_IMPORTED;
4372
4373     if (logMsg)
4374     {
4375         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4376                 block->bbCodeOffs, block->bbCodeOffsEnd));
4377         if (verbose)
4378         {
4379             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4380         }
4381     }
4382
4383     if (JitConfig.DebugBreakOnVerificationFailure())
4384     {
4385         DebugBreak();
4386     }
4387 #endif
4388
4389     impBeginTreeList();
4390
4391     // if the stack is non-empty evaluate all the side-effects
4392     if (verCurrentState.esStackDepth > 0)
4393     {
4394         impEvalSideEffects();
4395     }
4396     assert(verCurrentState.esStackDepth == 0);
4397
4398     GenTree* op1 =
4399         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4400     // verCurrentState.esStackDepth = 0;
4401     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4402
4403     // The inliner is not able to handle methods that require throw block, so
4404     // make sure this methods never gets inlined.
4405     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4406 }
4407
4408 /*****************************************************************************
4409  *
4410  */
4411 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4412
4413 {
4414     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4415     // slightly different mechanism in which it calls the JIT to perform IL verification:
4416     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4417     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4418     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4419     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4420     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4421     // to fail upon runtime of the jitted method.
4422     //
4423     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4424     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4425     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4426     // we detect these two conditions, instead of generating a throw statement inside the offending
4427     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4428     // to return false and make RyuJIT behave the same way JIT64 does.
4429     //
4430     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4431     // RyuJIT for the time being until we completely replace JIT64.
4432     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4433
4434     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4435     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4436     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4437     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4438     // be turned off during importation).
4439     CLANG_FORMAT_COMMENT_ANCHOR;
4440
4441 #ifdef _TARGET_64BIT_
4442
4443 #ifdef DEBUG
4444     bool canSkipVerificationResult =
4445         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4446     assert(tiVerificationNeeded || canSkipVerificationResult);
4447 #endif // DEBUG
4448
4449     // Add the non verifiable flag to the compiler
4450     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4451     {
4452         tiIsVerifiableCode = FALSE;
4453     }
4454 #endif //_TARGET_64BIT_
4455     verResetCurrentState(block, &verCurrentState);
4456     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4457
4458 #ifdef DEBUG
4459     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4460 #endif                   // DEBUG
4461 }
4462
4463 /******************************************************************************/
4464 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4465 {
4466     assert(ciType < CORINFO_TYPE_COUNT);
4467
4468     typeInfo tiResult;
4469     switch (ciType)
4470     {
4471         case CORINFO_TYPE_STRING:
4472         case CORINFO_TYPE_CLASS:
4473             tiResult = verMakeTypeInfo(clsHnd);
4474             if (!tiResult.IsType(TI_REF))
4475             { // type must be consistent with element type
4476                 return typeInfo();
4477             }
4478             break;
4479
4480 #ifdef _TARGET_64BIT_
4481         case CORINFO_TYPE_NATIVEINT:
4482         case CORINFO_TYPE_NATIVEUINT:
4483             if (clsHnd)
4484             {
4485                 // If we have more precise information, use it
4486                 return verMakeTypeInfo(clsHnd);
4487             }
4488             else
4489             {
4490                 return typeInfo::nativeInt();
4491             }
4492             break;
4493 #endif // _TARGET_64BIT_
4494
4495         case CORINFO_TYPE_VALUECLASS:
4496         case CORINFO_TYPE_REFANY:
4497             tiResult = verMakeTypeInfo(clsHnd);
4498             // type must be constant with element type;
4499             if (!tiResult.IsValueClass())
4500             {
4501                 return typeInfo();
4502             }
4503             break;
4504         case CORINFO_TYPE_VAR:
4505             return verMakeTypeInfo(clsHnd);
4506
4507         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4508         case CORINFO_TYPE_VOID:
4509             return typeInfo();
4510             break;
4511
4512         case CORINFO_TYPE_BYREF:
4513         {
4514             CORINFO_CLASS_HANDLE childClassHandle;
4515             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4516             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4517         }
4518         break;
4519
4520         default:
4521             if (clsHnd)
4522             { // If we have more precise information, use it
4523                 return typeInfo(TI_STRUCT, clsHnd);
4524             }
4525             else
4526             {
4527                 return typeInfo(JITtype2tiType(ciType));
4528             }
4529     }
4530     return tiResult;
4531 }
4532
4533 /******************************************************************************/
4534
4535 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4536 {
4537     if (clsHnd == nullptr)
4538     {
4539         return typeInfo();
4540     }
4541
4542     // Byrefs should only occur in method and local signatures, which are accessed
4543     // using ICorClassInfo and ICorClassInfo.getChildType.
4544     // So findClass() and getClassAttribs() should not be called for byrefs
4545
4546     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4547     {
4548         assert(!"Did findClass() return a Byref?");
4549         return typeInfo();
4550     }
4551
4552     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4553
4554     if (attribs & CORINFO_FLG_VALUECLASS)
4555     {
4556         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4557
4558         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4559         // not occur here, so we may want to change this to an assert instead.
4560         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4561         {
4562             return typeInfo();
4563         }
4564
4565 #ifdef _TARGET_64BIT_
4566         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4567         {
4568             return typeInfo::nativeInt();
4569         }
4570 #endif // _TARGET_64BIT_
4571
4572         if (t != CORINFO_TYPE_UNDEF)
4573         {
4574             return (typeInfo(JITtype2tiType(t)));
4575         }
4576         else if (bashStructToRef)
4577         {
4578             return (typeInfo(TI_REF, clsHnd));
4579         }
4580         else
4581         {
4582             return (typeInfo(TI_STRUCT, clsHnd));
4583         }
4584     }
4585     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4586     {
4587         // See comment in _typeInfo.h for why we do it this way.
4588         return (typeInfo(TI_REF, clsHnd, true));
4589     }
4590     else
4591     {
4592         return (typeInfo(TI_REF, clsHnd));
4593     }
4594 }
4595
4596 /******************************************************************************/
4597 BOOL Compiler::verIsSDArray(typeInfo ti)
4598 {
4599     if (ti.IsNullObjRef())
4600     { // nulls are SD arrays
4601         return TRUE;
4602     }
4603
4604     if (!ti.IsType(TI_REF))
4605     {
4606         return FALSE;
4607     }
4608
4609     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4610     {
4611         return FALSE;
4612     }
4613     return TRUE;
4614 }
4615
4616 /******************************************************************************/
4617 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4618 /* Returns an error type if anything goes wrong */
4619
4620 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4621 {
4622     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4623
4624     if (!verIsSDArray(arrayObjectType))
4625     {
4626         return typeInfo();
4627     }
4628
4629     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4630     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4631
4632     return verMakeTypeInfo(ciType, childClassHandle);
4633 }
4634
4635 /*****************************************************************************
4636  */
4637 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4638 {
4639     CORINFO_CLASS_HANDLE classHandle;
4640     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4641
4642     var_types type = JITtype2varType(ciType);
4643     if (varTypeIsGC(type))
4644     {
4645         // For efficiency, getArgType only returns something in classHandle for
4646         // value types.  For other types that have addition type info, you
4647         // have to call back explicitly
4648         classHandle = info.compCompHnd->getArgClass(sig, args);
4649         if (!classHandle)
4650         {
4651             NO_WAY("Could not figure out Class specified in argument or local signature");
4652         }
4653     }
4654
4655     return verMakeTypeInfo(ciType, classHandle);
4656 }
4657
4658 /*****************************************************************************/
4659
4660 // This does the expensive check to figure out whether the method
4661 // needs to be verified. It is called only when we fail verification,
4662 // just before throwing the verification exception.
4663
4664 BOOL Compiler::verNeedsVerification()
4665 {
4666     // If we have previously determined that verification is NOT needed
4667     // (for example in Compiler::compCompile), that means verification is really not needed.
4668     // Return the same decision we made before.
4669     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4670
4671     if (!tiVerificationNeeded)
4672     {
4673         return tiVerificationNeeded;
4674     }
4675
4676     assert(tiVerificationNeeded);
4677
4678     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4679     // obtain the answer.
4680     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4681         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4682
4683     // canSkipVerification will return one of the following three values:
4684     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4685     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4686     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4687     //     but need to insert a callout to the VM to ask during runtime
4688     //     whether to skip verification or not.
4689
4690     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4691     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4692     {
4693         tiRuntimeCalloutNeeded = true;
4694     }
4695
4696     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4697     {
4698         // Dev10 706080 - Testers don't like the assert, so just silence it
4699         // by not using the macros that invoke debugAssert.
4700         badCode();
4701     }
4702
4703     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4704     // The following line means we will NOT do jit time verification if canSkipVerification
4705     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4706     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4707     return tiVerificationNeeded;
4708 }
4709
4710 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4711 {
4712     if (ti.IsByRef())
4713     {
4714         return TRUE;
4715     }
4716     if (!ti.IsType(TI_STRUCT))
4717     {
4718         return FALSE;
4719     }
4720     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4721 }
4722
4723 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4724 {
4725     if (ti.IsPermanentHomeByRef())
4726     {
4727         return TRUE;
4728     }
4729     else
4730     {
4731         return FALSE;
4732     }
4733 }
4734
4735 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4736 {
4737     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4738             || ti.IsUnboxedGenericTypeVar() ||
4739             (ti.IsType(TI_STRUCT) &&
4740              // exclude byreflike structs
4741              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4742 }
4743
4744 // Is it a boxed value type?
4745 bool Compiler::verIsBoxedValueType(typeInfo ti)
4746 {
4747     if (ti.GetType() == TI_REF)
4748     {
4749         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4750         return !!eeIsValueClass(clsHnd);
4751     }
4752     else
4753     {
4754         return false;
4755     }
4756 }
4757
4758 /*****************************************************************************
4759  *
4760  *  Check if a TailCall is legal.
4761  */
4762
4763 bool Compiler::verCheckTailCallConstraint(
4764     OPCODE                  opcode,
4765     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4766     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4767     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4768                                                        // return false to the caller.
4769                                                        // If false, it will throw.
4770     )
4771 {
4772     DWORD            mflags;
4773     CORINFO_SIG_INFO sig;
4774     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4775                                    // this counter is used to keep track of how many items have been
4776                                    // virtually popped
4777
4778     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4779     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4780     unsigned              methodClassFlgs = 0;
4781
4782     assert(impOpcodeIsCallOpcode(opcode));
4783
4784     if (compIsForInlining())
4785     {
4786         return false;
4787     }
4788
4789     // for calli, VerifyOrReturn that this is not a virtual method
4790     if (opcode == CEE_CALLI)
4791     {
4792         /* Get the call sig */
4793         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4794
4795         // We don't know the target method, so we have to infer the flags, or
4796         // assume the worst-case.
4797         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4798     }
4799     else
4800     {
4801         methodHnd = pResolvedToken->hMethod;
4802
4803         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4804
4805         // When verifying generic code we pair the method handle with its
4806         // owning class to get the exact method signature.
4807         methodClassHnd = pResolvedToken->hClass;
4808         assert(methodClassHnd);
4809
4810         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4811
4812         // opcode specific check
4813         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4814     }
4815
4816     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4817     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4818
4819     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4820     {
4821         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4822     }
4823
4824     // check compatibility of the arguments
4825     unsigned int argCount;
4826     argCount = sig.numArgs;
4827     CORINFO_ARG_LIST_HANDLE args;
4828     args = sig.args;
4829     while (argCount--)
4830     {
4831         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4832
4833         // check that the argument is not a byref for tailcalls
4834         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4835
4836         // For unsafe code, we might have parameters containing pointer to the stack location.
4837         // Disallow the tailcall for this kind.
4838         CORINFO_CLASS_HANDLE classHandle;
4839         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4840         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4841
4842         args = info.compCompHnd->getArgNext(args);
4843     }
4844
4845     // update popCount
4846     popCount += sig.numArgs;
4847
4848     // check for 'this' which is on non-static methods, not called via NEWOBJ
4849     if (!(mflags & CORINFO_FLG_STATIC))
4850     {
4851         // Always update the popCount.
4852         // This is crucial for the stack calculation to be correct.
4853         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4854         popCount++;
4855
4856         if (opcode == CEE_CALLI)
4857         {
4858             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4859             // on the stack.
4860             if (tiThis.IsValueClass())
4861             {
4862                 tiThis.MakeByRef();
4863             }
4864             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4865         }
4866         else
4867         {
4868             // Check type compatibility of the this argument
4869             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4870             if (tiDeclaredThis.IsValueClass())
4871             {
4872                 tiDeclaredThis.MakeByRef();
4873             }
4874
4875             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4876         }
4877     }
4878
4879     // Tail calls on constrained calls should be illegal too:
4880     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4881     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4882
4883     // Get the exact view of the signature for an array method
4884     if (sig.retType != CORINFO_TYPE_VOID)
4885     {
4886         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4887         {
4888             assert(opcode != CEE_CALLI);
4889             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4890         }
4891     }
4892
4893     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4894     typeInfo tiCallerRetType =
4895         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4896
4897     // void return type gets morphed into the error type, so we have to treat them specially here
4898     if (sig.retType == CORINFO_TYPE_VOID)
4899     {
4900         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4901                                   speculative);
4902     }
4903     else
4904     {
4905         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4906                                                    NormaliseForStack(tiCallerRetType), true),
4907                                   "tailcall return mismatch", speculative);
4908     }
4909
4910     // for tailcall, stack must be empty
4911     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4912
4913     return true; // Yes, tailcall is legal
4914 }
4915
4916 /*****************************************************************************
4917  *
4918  *  Checks the IL verification rules for the call
4919  */
4920
4921 void Compiler::verVerifyCall(OPCODE                  opcode,
4922                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4923                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4924                              bool                    tailCall,
4925                              bool                    readonlyCall,
4926                              const BYTE*             delegateCreateStart,
4927                              const BYTE*             codeAddr,
4928                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4929 {
4930     DWORD             mflags;
4931     CORINFO_SIG_INFO* sig      = nullptr;
4932     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4933                                     // this counter is used to keep track of how many items have been
4934                                     // virtually popped
4935
4936     // for calli, VerifyOrReturn that this is not a virtual method
4937     if (opcode == CEE_CALLI)
4938     {
4939         Verify(false, "Calli not verifiable");
4940         return;
4941     }
4942
4943     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4944     mflags = callInfo->verMethodFlags;
4945
4946     sig = &callInfo->verSig;
4947
4948     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4949     {
4950         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4951     }
4952
4953     // opcode specific check
4954     unsigned methodClassFlgs = callInfo->classFlags;
4955     switch (opcode)
4956     {
4957         case CEE_CALLVIRT:
4958             // cannot do callvirt on valuetypes
4959             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4960             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4961             break;
4962
4963         case CEE_NEWOBJ:
4964         {
4965             assert(!tailCall); // Importer should not allow this
4966             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4967                            "newobj must be on instance");
4968
4969             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4970             {
4971                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4972                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4973                 typeInfo tiDeclaredFtn =
4974                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4975                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4976
4977                 assert(popCount == 0);
4978                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4979                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4980
4981                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4982                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4983                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4984                                "delegate object type mismatch");
4985
4986                 CORINFO_CLASS_HANDLE objTypeHandle =
4987                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4988
4989                 // the method signature must be compatible with the delegate's invoke method
4990
4991                 // check that for virtual functions, the type of the object used to get the
4992                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4993                 // since this is a bit of work to determine in general, we pattern match stylized
4994                 // code sequences
4995
4996                 // the delegate creation code check, which used to be done later, is now done here
4997                 // so we can read delegateMethodRef directly from
4998                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4999                 // we then use it in our call to isCompatibleDelegate().
5000
5001                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5002                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5003                                "must create delegates with certain IL");
5004
5005                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5006                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5007                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5008                 delegateResolvedToken.token        = delegateMethodRef;
5009                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5010                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5011
5012                 CORINFO_CALL_INFO delegateCallInfo;
5013                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5014                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5015
5016                 BOOL isOpenDelegate = FALSE;
5017                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5018                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5019                                                                       &isOpenDelegate),
5020                                "function incompatible with delegate");
5021
5022                 // check the constraints on the target method
5023                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5024                                "delegate target has unsatisfied class constraints");
5025                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5026                                                                             tiActualFtn.GetMethod()),
5027                                "delegate target has unsatisfied method constraints");
5028
5029                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5030                 // for additional verification rules for delegates
5031                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5032                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5033                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5034                 {
5035
5036                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5037 #ifdef DEBUG
5038                         && StrictCheckForNonVirtualCallToVirtualMethod()
5039 #endif
5040                             )
5041                     {
5042                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5043                         {
5044                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5045                                                verIsBoxedValueType(tiActualObj),
5046                                            "The 'this' parameter to the call must be either the calling method's "
5047                                            "'this' parameter or "
5048                                            "a boxed value type.");
5049                         }
5050                     }
5051                 }
5052
5053                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5054                 {
5055                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5056
5057                     Verify(targetIsStatic || !isOpenDelegate,
5058                            "Unverifiable creation of an open instance delegate for a protected member.");
5059
5060                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5061                                                                 ? info.compClassHnd
5062                                                                 : tiActualObj.GetClassHandleForObjRef();
5063
5064                     // In the case of protected methods, it is a requirement that the 'this'
5065                     // pointer be a subclass of the current context.  Perform this check.
5066                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5067                            "Accessing protected method through wrong type.");
5068                 }
5069                 goto DONE_ARGS;
5070             }
5071         }
5072         // fall thru to default checks
5073         default:
5074             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5075     }
5076     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5077                    "can only newobj a delegate constructor");
5078
5079     // check compatibility of the arguments
5080     unsigned int argCount;
5081     argCount = sig->numArgs;
5082     CORINFO_ARG_LIST_HANDLE args;
5083     args = sig->args;
5084     while (argCount--)
5085     {
5086         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5087
5088         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5089         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5090
5091         args = info.compCompHnd->getArgNext(args);
5092     }
5093
5094 DONE_ARGS:
5095
5096     // update popCount
5097     popCount += sig->numArgs;
5098
5099     // check for 'this' which are is non-static methods, not called via NEWOBJ
5100     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5101     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5102     {
5103         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5104         popCount++;
5105
5106         // If it is null, we assume we can access it (since it will AV shortly)
5107         // If it is anything but a reference class, there is no hierarchy, so
5108         // again, we don't need the precise instance class to compute 'protected' access
5109         if (tiThis.IsType(TI_REF))
5110         {
5111             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5112         }
5113
5114         // Check type compatibility of the this argument
5115         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5116         if (tiDeclaredThis.IsValueClass())
5117         {
5118             tiDeclaredThis.MakeByRef();
5119         }
5120
5121         // If this is a call to the base class .ctor, set thisPtr Init for
5122         // this block.
5123         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5124         {
5125             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5126                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5127             {
5128                 assert(verCurrentState.thisInitialized !=
5129                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5130                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5131                                "Call to base class constructor when 'this' is possibly initialized");
5132                 // Otherwise, 'this' is now initialized.
5133                 verCurrentState.thisInitialized = TIS_Init;
5134                 tiThis.SetInitialisedObjRef();
5135             }
5136             else
5137             {
5138                 // We allow direct calls to value type constructors
5139                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5140                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5141                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5142                                "Bad call to a constructor");
5143             }
5144         }
5145
5146         if (pConstrainedResolvedToken != nullptr)
5147         {
5148             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5149
5150             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5151
5152             // We just dereference this and test for equality
5153             tiThis.DereferenceByRef();
5154             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5155                            "this type mismatch with constrained type operand");
5156
5157             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5158             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5159         }
5160
5161         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5162         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5163         {
5164             tiDeclaredThis.SetIsReadonlyByRef();
5165         }
5166
5167         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5168
5169         if (tiThis.IsByRef())
5170         {
5171             // Find the actual type where the method exists (as opposed to what is declared
5172             // in the metadata). This is to prevent passing a byref as the "this" argument
5173             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5174
5175             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5176             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5177                            "Call to base type of valuetype (which is never a valuetype)");
5178         }
5179
5180         // Rules for non-virtual call to a non-final virtual method:
5181
5182         // Define:
5183         // The "this" pointer is considered to be "possibly written" if
5184         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5185         //   (or)
5186         //   2. It has been stored to (STARG.0) anywhere in the method.
5187
5188         // A non-virtual call to a non-final virtual method is only allowed if
5189         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5190         //   (or)
5191         //   2. The this pointer passed to the callee is the current method's this pointer.
5192         //      (and) The current method's this pointer is not "possibly written".
5193
5194         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5195         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5196         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5197         // hard and more error prone.
5198
5199         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5200 #ifdef DEBUG
5201             && StrictCheckForNonVirtualCallToVirtualMethod()
5202 #endif
5203                 )
5204         {
5205             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5206             {
5207                 VerifyOrReturn(
5208                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5209                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5210                     "a boxed value type.");
5211             }
5212         }
5213     }
5214
5215     // check any constraints on the callee's class and type parameters
5216     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5217                    "method has unsatisfied class constraints");
5218     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5219                    "method has unsatisfied method constraints");
5220
5221     if (mflags & CORINFO_FLG_PROTECTED)
5222     {
5223         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5224                        "Can't access protected method");
5225     }
5226
5227     // Get the exact view of the signature for an array method
5228     if (sig->retType != CORINFO_TYPE_VOID)
5229     {
5230         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5231     }
5232
5233     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5234     // The methods supported by array types are under the control of the EE
5235     // so we can trust that only the Address operation returns a byref.
5236     if (readonlyCall)
5237     {
5238         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5239         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5240                        "unexpected use of readonly prefix");
5241     }
5242
5243     // Verify the tailcall
5244     if (tailCall)
5245     {
5246         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5247     }
5248 }
5249
5250 /*****************************************************************************
5251  *  Checks that a delegate creation is done using the following pattern:
5252  *     dup
5253  *     ldvirtftn targetMemberRef
5254  *  OR
5255  *     ldftn targetMemberRef
5256  *
5257  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5258  *  not in this basic block)
5259  *
5260  *  targetMemberRef is read from the code sequence.
5261  *  targetMemberRef is validated iff verificationNeeded.
5262  */
5263
5264 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5265                                         const BYTE*  codeAddr,
5266                                         mdMemberRef& targetMemberRef)
5267 {
5268     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5269     {
5270         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5271         return TRUE;
5272     }
5273     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5274     {
5275         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5276         return TRUE;
5277     }
5278
5279     return FALSE;
5280 }
5281
5282 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5283 {
5284     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5285     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5286     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5287     if (!tiCompatibleWith(value, normPtrVal, true))
5288     {
5289         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5290         compUnsafeCastUsed = true;
5291     }
5292     return ptrVal;
5293 }
5294
5295 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5296 {
5297     assert(!instrType.IsStruct());
5298
5299     typeInfo ptrVal;
5300     if (ptr.IsByRef())
5301     {
5302         ptrVal = DereferenceByRef(ptr);
5303         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5304         {
5305             Verify(false, "bad pointer");
5306             compUnsafeCastUsed = true;
5307         }
5308         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5309         {
5310             Verify(false, "pointer not consistent with instr");
5311             compUnsafeCastUsed = true;
5312         }
5313     }
5314     else
5315     {
5316         Verify(false, "pointer not byref");
5317         compUnsafeCastUsed = true;
5318     }
5319
5320     return ptrVal;
5321 }
5322
5323 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5324 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5325 // ld*flda or a st*fld.
5326 // 'enclosingClass' is given if we are accessing a field in some specific type.
5327
5328 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5329                               const CORINFO_FIELD_INFO& fieldInfo,
5330                               const typeInfo*           tiThis,
5331                               BOOL                      mutator,
5332                               BOOL                      allowPlainStructAsThis)
5333 {
5334     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5335     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5336     CORINFO_CLASS_HANDLE instanceClass =
5337         info.compClassHnd; // for statics, we imagine the instance is the current class.
5338
5339     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5340     if (mutator)
5341     {
5342         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5343         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5344         {
5345             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5346                        info.compIsStatic == isStaticField,
5347                    "bad use of initonly field (set or address taken)");
5348         }
5349     }
5350
5351     if (tiThis == nullptr)
5352     {
5353         Verify(isStaticField, "used static opcode with non-static field");
5354     }
5355     else
5356     {
5357         typeInfo tThis = *tiThis;
5358
5359         if (allowPlainStructAsThis && tThis.IsValueClass())
5360         {
5361             tThis.MakeByRef();
5362         }
5363
5364         // If it is null, we assume we can access it (since it will AV shortly)
5365         // If it is anything but a refernce class, there is no hierarchy, so
5366         // again, we don't need the precise instance class to compute 'protected' access
5367         if (tiThis->IsType(TI_REF))
5368         {
5369             instanceClass = tiThis->GetClassHandleForObjRef();
5370         }
5371
5372         // Note that even if the field is static, we require that the this pointer
5373         // satisfy the same constraints as a non-static field  This happens to
5374         // be simpler and seems reasonable
5375         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5376         if (tiDeclaredThis.IsValueClass())
5377         {
5378             tiDeclaredThis.MakeByRef();
5379
5380             // we allow read-only tThis, on any field access (even stores!), because if the
5381             // class implementor wants to prohibit stores he should make the field private.
5382             // we do this by setting the read-only bit on the type we compare tThis to.
5383             tiDeclaredThis.SetIsReadonlyByRef();
5384         }
5385         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5386         {
5387             // Any field access is legal on "uninitialized" this pointers.
5388             // The easiest way to implement this is to simply set the
5389             // initialized bit for the duration of the type check on the
5390             // field access only.  It does not change the state of the "this"
5391             // for the function as a whole. Note that the "tThis" is a copy
5392             // of the original "this" type (*tiThis) passed in.
5393             tThis.SetInitialisedObjRef();
5394         }
5395
5396         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5397     }
5398
5399     // Presently the JIT does not check that we don't store or take the address of init-only fields
5400     // since we cannot guarantee their immutability and it is not a security issue.
5401
5402     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5403     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5404                    "field has unsatisfied class constraints");
5405     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5406     {
5407         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5408                "Accessing protected method through wrong type.");
5409     }
5410 }
5411
5412 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5413 {
5414     if (tiOp1.IsNumberType())
5415     {
5416 #ifdef _TARGET_64BIT_
5417         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5418 #else  // _TARGET_64BIT
5419         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5420         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5421         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5422         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5423 #endif // !_TARGET_64BIT_
5424     }
5425     else if (tiOp1.IsObjRef())
5426     {
5427         switch (opcode)
5428         {
5429             case CEE_BEQ_S:
5430             case CEE_BEQ:
5431             case CEE_BNE_UN_S:
5432             case CEE_BNE_UN:
5433             case CEE_CEQ:
5434             case CEE_CGT_UN:
5435                 break;
5436             default:
5437                 Verify(FALSE, "Cond not allowed on object types");
5438         }
5439         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5440     }
5441     else if (tiOp1.IsByRef())
5442     {
5443         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5444     }
5445     else
5446     {
5447         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5448     }
5449 }
5450
5451 void Compiler::verVerifyThisPtrInitialised()
5452 {
5453     if (verTrackObjCtorInitState)
5454     {
5455         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5456     }
5457 }
5458
5459 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5460 {
5461     // Either target == context, in this case calling an alternate .ctor
5462     // Or target is the immediate parent of context
5463
5464     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5465 }
5466
5467 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5468                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5469                                       CORINFO_CALL_INFO*      pCallInfo)
5470 {
5471     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5472     {
5473         NO_WAY("Virtual call to a function added via EnC is not supported");
5474     }
5475
5476     // CoreRT generic virtual method
5477     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5478     {
5479         GenTree* runtimeMethodHandle = nullptr;
5480         if (pCallInfo->exactContextNeedsRuntimeLookup)
5481         {
5482             runtimeMethodHandle =
5483                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5484         }
5485         else
5486         {
5487             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5488         }
5489         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5490                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5491     }
5492
5493 #ifdef FEATURE_READYTORUN_COMPILER
5494     if (opts.IsReadyToRun())
5495     {
5496         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5497         {
5498             GenTreeCall* call =
5499                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5500
5501             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5502
5503             return call;
5504         }
5505
5506         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5507         if (IsTargetAbi(CORINFO_CORERT_ABI))
5508         {
5509             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5510
5511             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5512                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5513         }
5514     }
5515 #endif
5516
5517     // Get the exact descriptor for the static callsite
5518     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5519     if (exactTypeDesc == nullptr)
5520     { // compDonotInline()
5521         return nullptr;
5522     }
5523
5524     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5525     if (exactMethodDesc == nullptr)
5526     { // compDonotInline()
5527         return nullptr;
5528     }
5529
5530     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5531
5532     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5533
5534     helpArgs = gtNewListNode(thisPtr, helpArgs);
5535
5536     // Call helper function.  This gets the target address of the final destination callsite.
5537
5538     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5539 }
5540
5541 //------------------------------------------------------------------------
5542 // impImportAndPushBox: build and import a value-type box
5543 //
5544 // Arguments:
5545 //   pResolvedToken - resolved token from the box operation
5546 //
5547 // Return Value:
5548 //   None.
5549 //
5550 // Side Effects:
5551 //   The value to be boxed is popped from the stack, and a tree for
5552 //   the boxed value is pushed. This method may create upstream
5553 //   statements, spill side effecting trees, and create new temps.
5554 //
5555 //   If importing an inlinee, we may also discover the inline must
5556 //   fail. If so there is no new value pushed on the stack. Callers
5557 //   should use CompDoNotInline after calling this method to see if
5558 //   ongoing importation should be aborted.
5559 //
5560 // Notes:
5561 //   Boxing of ref classes results in the same value as the value on
5562 //   the top of the stack, so is handled inline in impImportBlockCode
5563 //   for the CEE_BOX case. Only value or primitive type boxes make it
5564 //   here.
5565 //
5566 //   Boxing for nullable types is done via a helper call; boxing
5567 //   of other value types is expanded inline or handled via helper
5568 //   call, depending on the jit's codegen mode.
5569 //
5570 //   When the jit is operating in size and time constrained modes,
5571 //   using a helper call here can save jit time and code size. But it
5572 //   also may inhibit cleanup optimizations that could have also had a
5573 //   even greater benefit effect on code size and jit time. An optimal
5574 //   strategy may need to peek ahead and see if it is easy to tell how
5575 //   the box is being used. For now, we defer.
5576
5577 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5578 {
5579     // Spill any special side effects
5580     impSpillSpecialSideEff();
5581
5582     // Get get the expression to box from the stack.
5583     GenTree*             op1       = nullptr;
5584     GenTree*             op2       = nullptr;
5585     StackEntry           se        = impPopStack();
5586     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5587     GenTree*             exprToBox = se.val;
5588
5589     // Look at what helper we should use.
5590     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5591
5592     // Determine what expansion to prefer.
5593     //
5594     // In size/time/debuggable constrained modes, the helper call
5595     // expansion for box is generally smaller and is preferred, unless
5596     // the value to box is a struct that comes from a call. In that
5597     // case the call can construct its return value directly into the
5598     // box payload, saving possibly some up-front zeroing.
5599     //
5600     // Currently primitive type boxes always get inline expanded. We may
5601     // want to do the same for small structs if they don't come from
5602     // calls and don't have GC pointers, since explicitly copying such
5603     // structs is cheap.
5604     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5605     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5606     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5607     bool expandInline    = canExpandInline && !optForSize;
5608
5609     if (expandInline)
5610     {
5611         JITDUMP(" inline allocate/copy sequence\n");
5612
5613         // we are doing 'normal' boxing.  This means that we can inline the box operation
5614         // Box(expr) gets morphed into
5615         // temp = new(clsHnd)
5616         // cpobj(temp+4, expr, clsHnd)
5617         // push temp
5618         // The code paths differ slightly below for structs and primitives because
5619         // "cpobj" differs in these cases.  In one case you get
5620         //    impAssignStructPtr(temp+4, expr, clsHnd)
5621         // and the other you get
5622         //    *(temp+4) = expr
5623
5624         if (opts.MinOpts() || opts.compDbgCode)
5625         {
5626             // For minopts/debug code, try and minimize the total number
5627             // of box temps by reusing an existing temp when possible.
5628             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5629             {
5630                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5631             }
5632         }
5633         else
5634         {
5635             // When optimizing, use a new temp for each box operation
5636             // since we then know the exact class of the box temp.
5637             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5638             lvaTable[impBoxTemp].lvType = TYP_REF;
5639             const bool isExact          = true;
5640             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5641         }
5642
5643         // needs to stay in use until this box expression is appended
5644         // some other node.  We approximate this by keeping it alive until
5645         // the opcode stack becomes empty
5646         impBoxTempInUse = true;
5647
5648 #ifdef FEATURE_READYTORUN_COMPILER
5649         bool usingReadyToRunHelper = false;
5650
5651         if (opts.IsReadyToRun())
5652         {
5653             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5654             usingReadyToRunHelper = (op1 != nullptr);
5655         }
5656
5657         if (!usingReadyToRunHelper)
5658 #endif
5659         {
5660             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5661             // and the newfast call with a single call to a dynamic R2R cell that will:
5662             //      1) Load the context
5663             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5664             //      3) Allocate and return the new object for boxing
5665             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5666
5667             // Ensure that the value class is restored
5668             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5669             if (op2 == nullptr)
5670             {
5671                 // We must be backing out of an inline.
5672                 assert(compDonotInline());
5673                 return;
5674             }
5675
5676             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5677                                     pResolvedToken->hClass, TYP_REF, op2);
5678         }
5679
5680         /* Remember that this basic block contains 'new' of an object, and so does this method */
5681         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5682         optMethodFlags |= OMF_HAS_NEWOBJ;
5683
5684         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5685
5686         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5687
5688         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5689         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5690         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5691
5692         if (varTypeIsStruct(exprToBox))
5693         {
5694             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5695             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5696         }
5697         else
5698         {
5699             var_types lclTyp = exprToBox->TypeGet();
5700             if (lclTyp == TYP_BYREF)
5701             {
5702                 lclTyp = TYP_I_IMPL;
5703             }
5704             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5705             if (impIsPrimitive(jitType))
5706             {
5707                 lclTyp = JITtype2varType(jitType);
5708             }
5709             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5710                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5711             var_types srcTyp = exprToBox->TypeGet();
5712             var_types dstTyp = lclTyp;
5713
5714             if (srcTyp != dstTyp)
5715             {
5716                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5717                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5718                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5719             }
5720             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5721         }
5722
5723         // Spill eval stack to flush out any pending side effects.
5724         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5725
5726         // Set up this copy as a second assignment.
5727         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5728
5729         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5730
5731         // Record that this is a "box" node and keep track of the matching parts.
5732         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5733
5734         // If it is a value class, mark the "box" node.  We can use this information
5735         // to optimise several cases:
5736         //    "box(x) == null" --> false
5737         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5738         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5739
5740         op1->gtFlags |= GTF_BOX_VALUE;
5741         assert(op1->IsBoxedValue());
5742         assert(asg->gtOper == GT_ASG);
5743     }
5744     else
5745     {
5746         // Don't optimize, just call the helper and be done with it.
5747         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5748         assert(operCls != nullptr);
5749
5750         // Ensure that the value class is restored
5751         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5752         if (op2 == nullptr)
5753         {
5754             // We must be backing out of an inline.
5755             assert(compDonotInline());
5756             return;
5757         }
5758
5759         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5760         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5761     }
5762
5763     /* Push the result back on the stack, */
5764     /* even if clsHnd is a value class we want the TI_REF */
5765     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5766     impPushOnStack(op1, tiRetVal);
5767 }
5768
5769 //------------------------------------------------------------------------
5770 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5771 //
5772 // Arguments:
5773 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5774 //                     by a call to CEEInfo::resolveToken().
5775 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5776 //                by a call to CEEInfo::getCallInfo().
5777 //
5778 // Assumptions:
5779 //    The multi-dimensional array constructor arguments (array dimensions) are
5780 //    pushed on the IL stack on entry to this method.
5781 //
5782 // Notes:
5783 //    Multi-dimensional array constructors are imported as calls to a JIT
5784 //    helper, not as regular calls.
5785
5786 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5787 {
5788     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5789     if (classHandle == nullptr)
5790     { // compDonotInline()
5791         return;
5792     }
5793
5794     assert(pCallInfo->sig.numArgs);
5795
5796     GenTree*        node;
5797     GenTreeArgList* args;
5798
5799     //
5800     // There are two different JIT helpers that can be used to allocate
5801     // multi-dimensional arrays:
5802     //
5803     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5804     //      This variant is deprecated. It should be eventually removed.
5805     //
5806     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5807     //      pointer to block of int32s. This variant is more portable.
5808     //
5809     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5810     // unconditionally would require ReadyToRun version bump.
5811     //
5812     CLANG_FORMAT_COMMENT_ANCHOR;
5813
5814     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5815     {
5816
5817         // Reuse the temp used to pass the array dimensions to avoid bloating
5818         // the stack frame in case there are multiple calls to multi-dim array
5819         // constructors within a single method.
5820         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5821         {
5822             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5823             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5824             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5825         }
5826
5827         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5828         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5829         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5830             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5831
5832         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5833         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5834         // to one allocation at a time.
5835         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5836
5837         //
5838         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5839         //  - Array class handle
5840         //  - Number of dimension arguments
5841         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5842         //
5843
5844         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5845         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5846
5847         // Pop dimension arguments from the stack one at a time and store it
5848         // into lvaNewObjArrayArgs temp.
5849         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5850         {
5851             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5852
5853             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5854             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5855             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5856                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5857             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5858
5859             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5860         }
5861
5862         args = gtNewArgList(node);
5863
5864         // pass number of arguments to the helper
5865         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5866
5867         args = gtNewListNode(classHandle, args);
5868
5869         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5870     }
5871     else
5872     {
5873         //
5874         // The varargs helper needs the type and method handles as last
5875         // and  last-1 param (this is a cdecl call, so args will be
5876         // pushed in reverse order on the CPU stack)
5877         //
5878
5879         args = gtNewArgList(classHandle);
5880
5881         // pass number of arguments to the helper
5882         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5883
5884         unsigned argFlags = 0;
5885         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5886
5887         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5888
5889         // varargs, so we pop the arguments
5890         node->gtFlags |= GTF_CALL_POP_ARGS;
5891
5892 #ifdef DEBUG
5893         // At the present time we don't track Caller pop arguments
5894         // that have GC references in them
5895         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5896         {
5897             assert(temp->Current()->gtType != TYP_REF);
5898         }
5899 #endif
5900     }
5901
5902     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5903     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5904
5905     // Remember that this basic block contains 'new' of a md array
5906     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5907
5908     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5909 }
5910
5911 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
5912                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5913                                     CORINFO_THIS_TRANSFORM  transform)
5914 {
5915     switch (transform)
5916     {
5917         case CORINFO_DEREF_THIS:
5918         {
5919             GenTree* obj = thisPtr;
5920
5921             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5922             impBashVarAddrsToI(obj);
5923             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5924             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5925
5926             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5927             // ldind could point anywhere, example a boxed class static int
5928             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5929
5930             return obj;
5931         }
5932
5933         case CORINFO_BOX_THIS:
5934         {
5935             // Constraint calls where there might be no
5936             // unboxed entry point require us to implement the call via helper.
5937             // These only occur when a possible target of the call
5938             // may have inherited an implementation of an interface
5939             // method from System.Object or System.ValueType.  The EE does not provide us with
5940             // "unboxed" versions of these methods.
5941
5942             GenTree* obj = thisPtr;
5943
5944             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5945             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5946             obj->gtFlags |= GTF_EXCEPT;
5947
5948             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5949             var_types   objType = JITtype2varType(jitTyp);
5950             if (impIsPrimitive(jitTyp))
5951             {
5952                 if (obj->OperIsBlk())
5953                 {
5954                     obj->ChangeOperUnchecked(GT_IND);
5955
5956                     // Obj could point anywhere, example a boxed class static int
5957                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5958                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5959                 }
5960
5961                 obj->gtType = JITtype2varType(jitTyp);
5962                 assert(varTypeIsArithmetic(obj->gtType));
5963             }
5964
5965             // This pushes on the dereferenced byref
5966             // This is then used immediately to box.
5967             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5968
5969             // This pops off the byref-to-a-value-type remaining on the stack and
5970             // replaces it with a boxed object.
5971             // This is then used as the object to the virtual call immediately below.
5972             impImportAndPushBox(pConstrainedResolvedToken);
5973             if (compDonotInline())
5974             {
5975                 return nullptr;
5976             }
5977
5978             obj = impPopStack().val;
5979             return obj;
5980         }
5981         case CORINFO_NO_THIS_TRANSFORM:
5982         default:
5983             return thisPtr;
5984     }
5985 }
5986
5987 //------------------------------------------------------------------------
5988 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5989 //
5990 // Return Value:
5991 //    true if PInvoke inlining should be enabled in current method, false otherwise
5992 //
5993 // Notes:
5994 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5995
5996 bool Compiler::impCanPInvokeInline()
5997 {
5998     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5999            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6000         ;
6001 }
6002
6003 //------------------------------------------------------------------------
6004 // impCanPInvokeInlineCallSite: basic legality checks using information
6005 // from a call to see if the call qualifies as an inline pinvoke.
6006 //
6007 // Arguments:
6008 //    block      - block contaning the call, or for inlinees, block
6009 //                 containing the call being inlined
6010 //
6011 // Return Value:
6012 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6013 //
6014 // Notes:
6015 //    For runtimes that support exception handling interop there are
6016 //    restrictions on using inline pinvoke in handler regions.
6017 //
6018 //    * We have to disable pinvoke inlining inside of filters because
6019 //    in case the main execution (i.e. in the try block) is inside
6020 //    unmanaged code, we cannot reuse the inlined stub (we still need
6021 //    the original state until we are in the catch handler)
6022 //
6023 //    * We disable pinvoke inlining inside handlers since the GSCookie
6024 //    is in the inlined Frame (see
6025 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6026 //    this would not protect framelets/return-address of handlers.
6027 //
6028 //    These restrictions are currently also in place for CoreCLR but
6029 //    can be relaxed when coreclr/#8459 is addressed.
6030
6031 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6032 {
6033     if (block->hasHndIndex())
6034     {
6035         return false;
6036     }
6037
6038     // The remaining limitations do not apply to CoreRT
6039     if (IsTargetAbi(CORINFO_CORERT_ABI))
6040     {
6041         return true;
6042     }
6043
6044 #ifdef _TARGET_AMD64_
6045     // On x64, we disable pinvoke inlining inside of try regions.
6046     // Here is the comment from JIT64 explaining why:
6047     //
6048     //   [VSWhidbey: 611015] - because the jitted code links in the
6049     //   Frame (instead of the stub) we rely on the Frame not being
6050     //   'active' until inside the stub.  This normally happens by the
6051     //   stub setting the return address pointer in the Frame object
6052     //   inside the stub.  On a normal return, the return address
6053     //   pointer is zeroed out so the Frame can be safely re-used, but
6054     //   if an exception occurs, nobody zeros out the return address
6055     //   pointer.  Thus if we re-used the Frame object, it would go
6056     //   'active' as soon as we link it into the Frame chain.
6057     //
6058     //   Technically we only need to disable PInvoke inlining if we're
6059     //   in a handler or if we're in a try body with a catch or
6060     //   filter/except where other non-handler code in this method
6061     //   might run and try to re-use the dirty Frame object.
6062     //
6063     //   A desktop test case where this seems to matter is
6064     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6065     if (block->hasTryIndex())
6066     {
6067         return false;
6068     }
6069 #endif // _TARGET_AMD64_
6070
6071     return true;
6072 }
6073
6074 //------------------------------------------------------------------------
6075 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6076 // if it can be expressed as an inline pinvoke.
6077 //
6078 // Arguments:
6079 //    call       - tree for the call
6080 //    methHnd    - handle for the method being called (may be null)
6081 //    sig        - signature of the method being called
6082 //    mflags     - method flags for the method being called
6083 //    block      - block contaning the call, or for inlinees, block
6084 //                 containing the call being inlined
6085 //
6086 // Notes:
6087 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6088 //
6089 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6090 //   call passes a combination of legality and profitabilty checks.
6091 //
6092 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6093
6094 void Compiler::impCheckForPInvokeCall(
6095     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6096 {
6097     CorInfoUnmanagedCallConv unmanagedCallConv;
6098
6099     // If VM flagged it as Pinvoke, flag the call node accordingly
6100     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6101     {
6102         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6103     }
6104
6105     if (methHnd)
6106     {
6107         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6108         {
6109             return;
6110         }
6111
6112         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6113     }
6114     else
6115     {
6116         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6117         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6118         {
6119             // Used by the IL Stubs.
6120             callConv = CORINFO_CALLCONV_C;
6121         }
6122         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6123         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6124         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6125         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6126
6127         assert(!call->gtCallCookie);
6128     }
6129
6130     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6131         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6132     {
6133         return;
6134     }
6135     optNativeCallCount++;
6136
6137     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6138     {
6139         // PInvoke CALLI in IL stubs must be inlined
6140     }
6141     else
6142     {
6143         // Check legality
6144         if (!impCanPInvokeInlineCallSite(block))
6145         {
6146             return;
6147         }
6148
6149         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6150         // profitability checks
6151         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6152         {
6153             if (!impCanPInvokeInline())
6154             {
6155                 return;
6156             }
6157
6158             // Size-speed tradeoff: don't use inline pinvoke at rarely
6159             // executed call sites.  The non-inline version is more
6160             // compact.
6161             if (block->isRunRarely())
6162             {
6163                 return;
6164             }
6165         }
6166
6167         // The expensive check should be last
6168         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6169         {
6170             return;
6171         }
6172     }
6173
6174     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6175
6176     call->gtFlags |= GTF_CALL_UNMANAGED;
6177     info.compCallUnmanaged++;
6178
6179     // AMD64 convention is same for native and managed
6180     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6181     {
6182         call->gtFlags |= GTF_CALL_POP_ARGS;
6183     }
6184
6185     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6186     {
6187         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6188     }
6189 }
6190
6191 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6192 {
6193     var_types callRetTyp = JITtype2varType(sig->retType);
6194
6195     /* The function pointer is on top of the stack - It may be a
6196      * complex expression. As it is evaluated after the args,
6197      * it may cause registered args to be spilled. Simply spill it.
6198      */
6199
6200     // Ignore this trivial case.
6201     if (impStackTop().val->gtOper != GT_LCL_VAR)
6202     {
6203         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6204                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6205     }
6206
6207     /* Get the function pointer */
6208
6209     GenTree* fptr = impPopStack().val;
6210
6211     // The function pointer is typically a sized to match the target pointer size
6212     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6213     // See ILCodeStream::LowerOpcode
6214     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6215
6216 #ifdef DEBUG
6217     // This temporary must never be converted to a double in stress mode,
6218     // because that can introduce a call to the cast helper after the
6219     // arguments have already been evaluated.
6220
6221     if (fptr->OperGet() == GT_LCL_VAR)
6222     {
6223         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6224     }
6225 #endif
6226
6227     /* Create the call node */
6228
6229     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6230
6231     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6232
6233     return call;
6234 }
6235
6236 /*****************************************************************************/
6237
6238 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6239 {
6240     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6241
6242     /* Since we push the arguments in reverse order (i.e. right -> left)
6243      * spill any side effects from the stack
6244      *
6245      * OBS: If there is only one side effect we do not need to spill it
6246      *      thus we have to spill all side-effects except last one
6247      */
6248
6249     unsigned lastLevelWithSideEffects = UINT_MAX;
6250
6251     unsigned argsToReverse = sig->numArgs;
6252
6253     // For "thiscall", the first argument goes in a register. Since its
6254     // order does not need to be changed, we do not need to spill it
6255
6256     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6257     {
6258         assert(argsToReverse);
6259         argsToReverse--;
6260     }
6261
6262 #ifndef _TARGET_X86_
6263     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6264     argsToReverse = 0;
6265 #endif
6266
6267     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6268     {
6269         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6270         {
6271             assert(lastLevelWithSideEffects == UINT_MAX);
6272
6273             impSpillStackEntry(level,
6274                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6275         }
6276         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6277         {
6278             if (lastLevelWithSideEffects != UINT_MAX)
6279             {
6280                 /* We had a previous side effect - must spill it */
6281                 impSpillStackEntry(lastLevelWithSideEffects,
6282                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6283
6284                 /* Record the level for the current side effect in case we will spill it */
6285                 lastLevelWithSideEffects = level;
6286             }
6287             else
6288             {
6289                 /* This is the first side effect encountered - record its level */
6290
6291                 lastLevelWithSideEffects = level;
6292             }
6293         }
6294     }
6295
6296     /* The argument list is now "clean" - no out-of-order side effects
6297      * Pop the argument list in reverse order */
6298
6299     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6300
6301     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6302     {
6303         GenTree* thisPtr = args->Current();
6304         impBashVarAddrsToI(thisPtr);
6305         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6306     }
6307
6308     if (args)
6309     {
6310         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6311     }
6312 }
6313
6314 //------------------------------------------------------------------------
6315 // impInitClass: Build a node to initialize the class before accessing the
6316 //               field if necessary
6317 //
6318 // Arguments:
6319 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6320 //                     by a call to CEEInfo::resolveToken().
6321 //
6322 // Return Value: If needed, a pointer to the node that will perform the class
6323 //               initializtion.  Otherwise, nullptr.
6324 //
6325
6326 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6327 {
6328     CorInfoInitClassResult initClassResult =
6329         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6330
6331     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6332     {
6333         return nullptr;
6334     }
6335     BOOL runtimeLookup;
6336
6337     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6338
6339     if (node == nullptr)
6340     {
6341         assert(compDonotInline());
6342         return nullptr;
6343     }
6344
6345     if (runtimeLookup)
6346     {
6347         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6348     }
6349     else
6350     {
6351         // Call the shared non gc static helper, as its the fastest
6352         node = fgGetSharedCCtor(pResolvedToken->hClass);
6353     }
6354
6355     return node;
6356 }
6357
6358 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6359 {
6360     GenTree* op1 = nullptr;
6361
6362     switch (lclTyp)
6363     {
6364         int     ival;
6365         __int64 lval;
6366         double  dval;
6367
6368         case TYP_BOOL:
6369             ival = *((bool*)fldAddr);
6370             goto IVAL_COMMON;
6371
6372         case TYP_BYTE:
6373             ival = *((signed char*)fldAddr);
6374             goto IVAL_COMMON;
6375
6376         case TYP_UBYTE:
6377             ival = *((unsigned char*)fldAddr);
6378             goto IVAL_COMMON;
6379
6380         case TYP_SHORT:
6381             ival = *((short*)fldAddr);
6382             goto IVAL_COMMON;
6383
6384         case TYP_USHORT:
6385             ival = *((unsigned short*)fldAddr);
6386             goto IVAL_COMMON;
6387
6388         case TYP_UINT:
6389         case TYP_INT:
6390             ival = *((int*)fldAddr);
6391         IVAL_COMMON:
6392             op1 = gtNewIconNode(ival);
6393             break;
6394
6395         case TYP_LONG:
6396         case TYP_ULONG:
6397             lval = *((__int64*)fldAddr);
6398             op1  = gtNewLconNode(lval);
6399             break;
6400
6401         case TYP_FLOAT:
6402             dval = *((float*)fldAddr);
6403             op1  = gtNewDconNode(dval);
6404 #if !FEATURE_X87_DOUBLES
6405             // X87 stack doesn't differentiate between float/double
6406             // so R4 is treated as R8, but everybody else does
6407             op1->gtType = TYP_FLOAT;
6408 #endif // FEATURE_X87_DOUBLES
6409             break;
6410
6411         case TYP_DOUBLE:
6412             dval = *((double*)fldAddr);
6413             op1  = gtNewDconNode(dval);
6414             break;
6415
6416         default:
6417             assert(!"Unexpected lclTyp");
6418             break;
6419     }
6420
6421     return op1;
6422 }
6423
6424 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6425                                               CORINFO_ACCESS_FLAGS    access,
6426                                               CORINFO_FIELD_INFO*     pFieldInfo,
6427                                               var_types               lclTyp)
6428 {
6429     GenTree* op1;
6430
6431     switch (pFieldInfo->fieldAccessor)
6432     {
6433         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6434         {
6435             assert(!compIsForInlining());
6436
6437             // We first call a special helper to get the statics base pointer
6438             op1 = impParentClassTokenToHandle(pResolvedToken);
6439
6440             // compIsForInlining() is false so we should not neve get NULL here
6441             assert(op1 != nullptr);
6442
6443             var_types type = TYP_BYREF;
6444
6445             switch (pFieldInfo->helper)
6446             {
6447                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6448                     type = TYP_I_IMPL;
6449                     break;
6450                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6451                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6452                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6453                     break;
6454                 default:
6455                     assert(!"unknown generic statics helper");
6456                     break;
6457             }
6458
6459             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6460
6461             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6462             op1              = gtNewOperNode(GT_ADD, type, op1,
6463                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6464         }
6465         break;
6466
6467         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6468         {
6469 #ifdef FEATURE_READYTORUN_COMPILER
6470             if (opts.IsReadyToRun())
6471             {
6472                 unsigned callFlags = 0;
6473
6474                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6475                 {
6476                     callFlags |= GTF_CALL_HOISTABLE;
6477                 }
6478
6479                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6480                 op1->gtFlags |= callFlags;
6481
6482                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6483             }
6484             else
6485 #endif
6486             {
6487                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6488             }
6489
6490             {
6491                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6492                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6493                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6494             }
6495             break;
6496         }
6497
6498         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6499         {
6500 #ifdef FEATURE_READYTORUN_COMPILER
6501             noway_assert(opts.IsReadyToRun());
6502             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6503             assert(kind.needsRuntimeLookup);
6504
6505             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6506             GenTreeArgList* args    = gtNewArgList(ctxTree);
6507
6508             unsigned callFlags = 0;
6509
6510             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6511             {
6512                 callFlags |= GTF_CALL_HOISTABLE;
6513             }
6514             var_types type = TYP_BYREF;
6515             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6516             op1->gtFlags |= callFlags;
6517
6518             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6519             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6520             op1              = gtNewOperNode(GT_ADD, type, op1,
6521                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6522 #else
6523             unreached();
6524 #endif // FEATURE_READYTORUN_COMPILER
6525         }
6526         break;
6527
6528         default:
6529         {
6530             if (!(access & CORINFO_ACCESS_ADDRESS))
6531             {
6532                 // In future, it may be better to just create the right tree here instead of folding it later.
6533                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6534
6535                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6536                 {
6537                     op1->gtFlags |= GTF_FLD_INITCLASS;
6538                 }
6539
6540                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6541                 {
6542                     op1->gtType = TYP_REF; // points at boxed object
6543                     FieldSeqNode* firstElemFldSeq =
6544                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6545                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6546                                         new (this, GT_CNS_INT)
6547                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6548
6549                     if (varTypeIsStruct(lclTyp))
6550                     {
6551                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6552                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6553                     }
6554                     else
6555                     {
6556                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6557                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6558                     }
6559                 }
6560
6561                 return op1;
6562             }
6563             else
6564             {
6565                 void** pFldAddr = nullptr;
6566                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6567
6568                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6569
6570                 /* Create the data member node */
6571                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6572                                           fldSeq);
6573
6574                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6575                 {
6576                     op1->gtFlags |= GTF_ICON_INITCLASS;
6577                 }
6578
6579                 if (pFldAddr != nullptr)
6580                 {
6581                     // There are two cases here, either the static is RVA based,
6582                     // in which case the type of the FIELD node is not a GC type
6583                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6584                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6585                     // because handles to statics now go into the large object heap
6586
6587                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6588                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6589                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6590                 }
6591             }
6592             break;
6593         }
6594     }
6595
6596     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6597     {
6598         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6599
6600         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6601
6602         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6603                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6604     }
6605
6606     if (!(access & CORINFO_ACCESS_ADDRESS))
6607     {
6608         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6609         op1->gtFlags |= GTF_GLOB_REF;
6610     }
6611
6612     return op1;
6613 }
6614
6615 // In general try to call this before most of the verification work.  Most people expect the access
6616 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6617 // out if you can't access something we also think that you're unverifiable for other reasons.
6618 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6619 {
6620     if (result != CORINFO_ACCESS_ALLOWED)
6621     {
6622         impHandleAccessAllowedInternal(result, helperCall);
6623     }
6624 }
6625
6626 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6627 {
6628     switch (result)
6629     {
6630         case CORINFO_ACCESS_ALLOWED:
6631             break;
6632         case CORINFO_ACCESS_ILLEGAL:
6633             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6634             // method is verifiable.  Otherwise, delay the exception to runtime.
6635             if (compIsForImportOnly())
6636             {
6637                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6638             }
6639             else
6640             {
6641                 impInsertHelperCall(helperCall);
6642             }
6643             break;
6644         case CORINFO_ACCESS_RUNTIME_CHECK:
6645             impInsertHelperCall(helperCall);
6646             break;
6647     }
6648 }
6649
6650 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6651 {
6652     // Construct the argument list
6653     GenTreeArgList* args = nullptr;
6654     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6655     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6656     {
6657         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6658         GenTree*                  currentArg = nullptr;
6659         switch (helperArg.argType)
6660         {
6661             case CORINFO_HELPER_ARG_TYPE_Field:
6662                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6663                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6664                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6665                 break;
6666             case CORINFO_HELPER_ARG_TYPE_Method:
6667                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6668                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6669                 break;
6670             case CORINFO_HELPER_ARG_TYPE_Class:
6671                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6672                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6673                 break;
6674             case CORINFO_HELPER_ARG_TYPE_Module:
6675                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6676                 break;
6677             case CORINFO_HELPER_ARG_TYPE_Const:
6678                 currentArg = gtNewIconNode(helperArg.constant);
6679                 break;
6680             default:
6681                 NO_WAY("Illegal helper arg type");
6682         }
6683         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6684     }
6685
6686     /* TODO-Review:
6687      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6688      * Also, consider sticking this in the first basic block.
6689      */
6690     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6691     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6692 }
6693
6694 // Checks whether the return types of caller and callee are compatible
6695 // so that callee can be tail called. Note that here we don't check
6696 // compatibility in IL Verifier sense, but on the lines of return type
6697 // sizes are equal and get returned in the same return register.
6698 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6699                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6700                                             var_types            calleeRetType,
6701                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6702 {
6703     // Note that we can not relax this condition with genActualType() as the
6704     // calling convention dictates that the caller of a function with a small
6705     // typed return value is responsible for normalizing the return val.
6706     if (callerRetType == calleeRetType)
6707     {
6708         return true;
6709     }
6710
6711     // If the class handles are the same and not null, the return types are compatible.
6712     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6713     {
6714         return true;
6715     }
6716
6717 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6718     // Jit64 compat:
6719     if (callerRetType == TYP_VOID)
6720     {
6721         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6722         //     tail.call
6723         //     pop
6724         //     ret
6725         //
6726         // Note that the above IL pattern is not valid as per IL verification rules.
6727         // Therefore, only full trust code can take advantage of this pattern.
6728         return true;
6729     }
6730
6731     // These checks return true if the return value type sizes are the same and
6732     // get returned in the same return register i.e. caller doesn't need to normalize
6733     // return value. Some of the tail calls permitted by below checks would have
6734     // been rejected by IL Verifier before we reached here.  Therefore, only full
6735     // trust code can make those tail calls.
6736     unsigned callerRetTypeSize = 0;
6737     unsigned calleeRetTypeSize = 0;
6738     bool     isCallerRetTypMBEnreg =
6739         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6740     bool isCalleeRetTypMBEnreg =
6741         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6742
6743     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6744     {
6745         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6746     }
6747 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6748
6749     return false;
6750 }
6751
6752 // For prefixFlags
6753 enum
6754 {
6755     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6756     PREFIX_TAILCALL_IMPLICIT =
6757         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6758     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6759     PREFIX_VOLATILE    = 0x00000100,
6760     PREFIX_UNALIGNED   = 0x00001000,
6761     PREFIX_CONSTRAINED = 0x00010000,
6762     PREFIX_READONLY    = 0x00100000
6763 };
6764
6765 /********************************************************************************
6766  *
6767  * Returns true if the current opcode and and the opcodes following it correspond
6768  * to a supported tail call IL pattern.
6769  *
6770  */
6771 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6772                                       OPCODE      curOpcode,
6773                                       const BYTE* codeAddrOfNextOpcode,
6774                                       const BYTE* codeEnd,
6775                                       bool        isRecursive,
6776                                       bool*       isCallPopAndRet /* = nullptr */)
6777 {
6778     // Bail out if the current opcode is not a call.
6779     if (!impOpcodeIsCallOpcode(curOpcode))
6780     {
6781         return false;
6782     }
6783
6784 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6785     // If shared ret tail opt is not enabled, we will enable
6786     // it for recursive methods.
6787     if (isRecursive)
6788 #endif
6789     {
6790         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6791         // sequence. Make sure we don't go past the end of the IL however.
6792         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6793     }
6794
6795     // Bail out if there is no next opcode after call
6796     if (codeAddrOfNextOpcode >= codeEnd)
6797     {
6798         return false;
6799     }
6800
6801     // Scan the opcodes to look for the following IL patterns if either
6802     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6803     //  ii) if tail prefixed, IL verification is not needed for the method.
6804     //
6805     // Only in the above two cases we can allow the below tail call patterns
6806     // violating ECMA spec.
6807     //
6808     // Pattern1:
6809     //       call
6810     //       nop*
6811     //       ret
6812     //
6813     // Pattern2:
6814     //       call
6815     //       nop*
6816     //       pop
6817     //       nop*
6818     //       ret
6819     int    cntPop = 0;
6820     OPCODE nextOpcode;
6821
6822 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6823     do
6824     {
6825         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6826         codeAddrOfNextOpcode += sizeof(__int8);
6827     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6828              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6829              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6830                                                                                          // one pop seen so far.
6831 #else
6832     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6833 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6834
6835     if (isCallPopAndRet)
6836     {
6837         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6838         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6839     }
6840
6841 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6842     // Jit64 Compat:
6843     // Tail call IL pattern could be either of the following
6844     // 1) call/callvirt/calli + ret
6845     // 2) call/callvirt/calli + pop + ret in a method returning void.
6846     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6847 #else
6848     return (nextOpcode == CEE_RET) && (cntPop == 0);
6849 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6850 }
6851
6852 /*****************************************************************************
6853  *
6854  * Determine whether the call could be converted to an implicit tail call
6855  *
6856  */
6857 bool Compiler::impIsImplicitTailCallCandidate(
6858     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6859 {
6860
6861 #if FEATURE_TAILCALL_OPT
6862     if (!opts.compTailCallOpt)
6863     {
6864         return false;
6865     }
6866
6867     if (opts.compDbgCode || opts.MinOpts())
6868     {
6869         return false;
6870     }
6871
6872     // must not be tail prefixed
6873     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6874     {
6875         return false;
6876     }
6877
6878 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6879     // the block containing call is marked as BBJ_RETURN
6880     // We allow shared ret tail call optimization on recursive calls even under
6881     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6882     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6883         return false;
6884 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6885
6886     // must be call+ret or call+pop+ret
6887     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6888     {
6889         return false;
6890     }
6891
6892     return true;
6893 #else
6894     return false;
6895 #endif // FEATURE_TAILCALL_OPT
6896 }
6897
6898 //------------------------------------------------------------------------
6899 // impImportCall: import a call-inspiring opcode
6900 //
6901 // Arguments:
6902 //    opcode                    - opcode that inspires the call
6903 //    pResolvedToken            - resolved token for the call target
6904 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6905 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6906 //    prefixFlags               - IL prefix flags for the call
6907 //    callInfo                  - EE supplied info for the call
6908 //    rawILOffset               - IL offset of the opcode
6909 //
6910 // Returns:
6911 //    Type of the call's return value.
6912 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6913 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
6914 //
6915 //
6916 // Notes:
6917 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6918 //
6919 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6920 //    uninitalized object.
6921
6922 #ifdef _PREFAST_
6923 #pragma warning(push)
6924 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6925 #endif
6926
6927 var_types Compiler::impImportCall(OPCODE                  opcode,
6928                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6929                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6930                                   GenTree*                newobjThis,
6931                                   int                     prefixFlags,
6932                                   CORINFO_CALL_INFO*      callInfo,
6933                                   IL_OFFSET               rawILOffset)
6934 {
6935     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6936
6937     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6938     var_types              callRetTyp                     = TYP_COUNT;
6939     CORINFO_SIG_INFO*      sig                            = nullptr;
6940     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6941     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6942     unsigned               clsFlags                       = 0;
6943     unsigned               mflags                         = 0;
6944     unsigned               argFlags                       = 0;
6945     GenTree*               call                           = nullptr;
6946     GenTreeArgList*        args                           = nullptr;
6947     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6948     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6949     bool                   exactContextNeedsRuntimeLookup = false;
6950     bool                   canTailCall                    = true;
6951     const char*            szCanTailCallFailReason        = nullptr;
6952     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6953     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6954
6955     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6956
6957     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6958     // do that before tailcalls, but that is probably not the intended
6959     // semantic. So just disallow tailcalls from synchronized methods.
6960     // Also, popping arguments in a varargs function is more work and NYI
6961     // If we have a security object, we have to keep our frame around for callers
6962     // to see any imperative security.
6963     if (info.compFlags & CORINFO_FLG_SYNCH)
6964     {
6965         canTailCall             = false;
6966         szCanTailCallFailReason = "Caller is synchronized";
6967     }
6968 #if !FEATURE_FIXED_OUT_ARGS
6969     else if (info.compIsVarArgs)
6970     {
6971         canTailCall             = false;
6972         szCanTailCallFailReason = "Caller is varargs";
6973     }
6974 #endif // FEATURE_FIXED_OUT_ARGS
6975     else if (opts.compNeedSecurityCheck)
6976     {
6977         canTailCall             = false;
6978         szCanTailCallFailReason = "Caller requires a security check.";
6979     }
6980
6981     // We only need to cast the return value of pinvoke inlined calls that return small types
6982
6983     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6984     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6985     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6986     // the time being that the callee might be compiled by the other JIT and thus the return
6987     // value will need to be widened by us (or not widened at all...)
6988
6989     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6990
6991     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6992     bool bIntrinsicImported = false;
6993
6994     CORINFO_SIG_INFO calliSig;
6995     GenTreeArgList*  extraArg = nullptr;
6996
6997     /*-------------------------------------------------------------------------
6998      * First create the call node
6999      */
7000
7001     if (opcode == CEE_CALLI)
7002     {
7003         /* Get the call site sig */
7004         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7005
7006         callRetTyp = JITtype2varType(calliSig.retType);
7007
7008         call = impImportIndirectCall(&calliSig, ilOffset);
7009
7010         // We don't know the target method, so we have to infer the flags, or
7011         // assume the worst-case.
7012         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7013
7014 #ifdef DEBUG
7015         if (verbose)
7016         {
7017             unsigned structSize =
7018                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7019             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7020                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7021         }
7022 #endif
7023         // This should be checked in impImportBlockCode.
7024         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7025
7026         sig = &calliSig;
7027
7028 #ifdef DEBUG
7029         // We cannot lazily obtain the signature of a CALLI call because it has no method
7030         // handle that we can use, so we need to save its full call signature here.
7031         assert(call->gtCall.callSig == nullptr);
7032         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7033         *call->gtCall.callSig = calliSig;
7034 #endif // DEBUG
7035
7036         if (IsTargetAbi(CORINFO_CORERT_ABI))
7037         {
7038             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7039                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7040                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7041                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7042             if (managedCall)
7043             {
7044                 addFatPointerCandidate(call->AsCall());
7045             }
7046         }
7047     }
7048     else // (opcode != CEE_CALLI)
7049     {
7050         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7051
7052         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7053         // supply the instantiation parameters necessary to make direct calls to underlying
7054         // shared generic code, rather than calling through instantiating stubs.  If the
7055         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7056         // must indeed pass an instantiation parameter.
7057
7058         methHnd = callInfo->hMethod;
7059
7060         sig        = &(callInfo->sig);
7061         callRetTyp = JITtype2varType(sig->retType);
7062
7063         mflags = callInfo->methodFlags;
7064
7065 #ifdef DEBUG
7066         if (verbose)
7067         {
7068             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7069             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7070                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7071         }
7072 #endif
7073         if (compIsForInlining())
7074         {
7075             /* Does this call site have security boundary restrictions? */
7076
7077             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7078             {
7079                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7080                 return TYP_UNDEF;
7081             }
7082
7083             /* Does the inlinee need a security check token on the frame */
7084
7085             if (mflags & CORINFO_FLG_SECURITYCHECK)
7086             {
7087                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7088                 return TYP_UNDEF;
7089             }
7090
7091             /* Does the inlinee use StackCrawlMark */
7092
7093             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7094             {
7095                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7096                 return TYP_UNDEF;
7097             }
7098
7099             /* For now ignore delegate invoke */
7100
7101             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7102             {
7103                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7104                 return TYP_UNDEF;
7105             }
7106
7107             /* For now ignore varargs */
7108             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7109             {
7110                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7111                 return TYP_UNDEF;
7112             }
7113
7114             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7115             {
7116                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7117                 return TYP_UNDEF;
7118             }
7119
7120             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7121             {
7122                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7123                 return TYP_UNDEF;
7124             }
7125         }
7126
7127         clsHnd = pResolvedToken->hClass;
7128
7129         clsFlags = callInfo->classFlags;
7130
7131 #ifdef DEBUG
7132         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7133
7134         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7135         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7136         const char* modName;
7137         const char* className;
7138         const char* methodName;
7139         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7140             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7141             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7142         {
7143             return impImportJitTestLabelMark(sig->numArgs);
7144         }
7145 #endif // DEBUG
7146
7147         // <NICE> Factor this into getCallInfo </NICE>
7148         bool isSpecialIntrinsic = false;
7149         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7150         {
7151             const bool isTail = canTailCall && (tailCall != 0);
7152
7153             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7154                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7155
7156             if (compDonotInline())
7157             {
7158                 return TYP_UNDEF;
7159             }
7160
7161             if (call != nullptr)
7162             {
7163                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7164                        (clsFlags & CORINFO_FLG_FINAL));
7165
7166 #ifdef FEATURE_READYTORUN_COMPILER
7167                 if (call->OperGet() == GT_INTRINSIC)
7168                 {
7169                     if (opts.IsReadyToRun())
7170                     {
7171                         noway_assert(callInfo->kind == CORINFO_CALL);
7172                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7173                     }
7174                     else
7175                     {
7176                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7177                     }
7178                 }
7179 #endif
7180
7181                 bIntrinsicImported = true;
7182                 goto DONE_CALL;
7183             }
7184         }
7185
7186 #ifdef FEATURE_SIMD
7187         if (featureSIMD)
7188         {
7189             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7190             if (call != nullptr)
7191             {
7192                 bIntrinsicImported = true;
7193                 goto DONE_CALL;
7194             }
7195         }
7196 #endif // FEATURE_SIMD
7197
7198         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7199         {
7200             NO_WAY("Virtual call to a function added via EnC is not supported");
7201         }
7202
7203         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7204             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7205             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7206         {
7207             BADCODE("Bad calling convention");
7208         }
7209
7210         //-------------------------------------------------------------------------
7211         //  Construct the call node
7212         //
7213         // Work out what sort of call we're making.
7214         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7215
7216         constraintCallThisTransform    = callInfo->thisTransform;
7217         exactContextHnd                = callInfo->contextHandle;
7218         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7219
7220         // Recursive call is treated as a loop to the begining of the method.
7221         if (gtIsRecursiveCall(methHnd))
7222         {
7223 #ifdef DEBUG
7224             if (verbose)
7225             {
7226                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7227                         fgFirstBB->bbNum, compCurBB->bbNum);
7228             }
7229 #endif
7230             fgMarkBackwardJump(fgFirstBB, compCurBB);
7231         }
7232
7233         switch (callInfo->kind)
7234         {
7235
7236             case CORINFO_VIRTUALCALL_STUB:
7237             {
7238                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7239                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7240                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7241                 {
7242
7243                     if (compIsForInlining())
7244                     {
7245                         // Don't import runtime lookups when inlining
7246                         // Inlining has to be aborted in such a case
7247                         /* XXX Fri 3/20/2009
7248                          * By the way, this would never succeed.  If the handle lookup is into the generic
7249                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7250                          * inlined code will crash.
7251                          *
7252                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7253                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7254                          * failing here.
7255                          */
7256                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7257                         return TYP_UNDEF;
7258                     }
7259
7260                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7261                     assert(!compDonotInline());
7262
7263                     // This is the rough code to set up an indirect stub call
7264                     assert(stubAddr != nullptr);
7265
7266                     // The stubAddr may be a
7267                     // complex expression. As it is evaluated after the args,
7268                     // it may cause registered args to be spilled. Simply spill it.
7269
7270                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7271                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7272                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7273
7274                     // Create the actual call node
7275
7276                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7277                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7278
7279                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7280
7281                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7282                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7283
7284 #ifdef _TARGET_X86_
7285                     // No tailcalls allowed for these yet...
7286                     canTailCall             = false;
7287                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7288 #endif
7289                 }
7290                 else
7291                 {
7292                     // ok, the stub is available at compile type.
7293
7294                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7295                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7296                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7297                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
7298                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7299                     {
7300                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7301                     }
7302                 }
7303
7304 #ifdef FEATURE_READYTORUN_COMPILER
7305                 if (opts.IsReadyToRun())
7306                 {
7307                     // Null check is sometimes needed for ready to run to handle
7308                     // non-virtual <-> virtual changes between versions
7309                     if (callInfo->nullInstanceCheck)
7310                     {
7311                         call->gtFlags |= GTF_CALL_NULLCHECK;
7312                     }
7313                 }
7314 #endif
7315
7316                 break;
7317             }
7318
7319             case CORINFO_VIRTUALCALL_VTABLE:
7320             {
7321                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7322                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7323                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7324                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7325                 break;
7326             }
7327
7328             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7329             {
7330                 if (compIsForInlining())
7331                 {
7332                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7333                     return TYP_UNDEF;
7334                 }
7335
7336                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7337                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7338                 // OK, We've been told to call via LDVIRTFTN, so just
7339                 // take the call now....
7340
7341                 args = impPopList(sig->numArgs, sig);
7342
7343                 GenTree* thisPtr = impPopStack().val;
7344                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7345                 assert(thisPtr != nullptr);
7346
7347                 // Clone the (possibly transformed) "this" pointer
7348                 GenTree* thisPtrCopy;
7349                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7350                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7351
7352                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7353                 assert(fptr != nullptr);
7354
7355                 thisPtr = nullptr; // can't reuse it
7356
7357                 // Now make an indirect call through the function pointer
7358
7359                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7360                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7361                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7362
7363                 // Create the actual call node
7364
7365                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7366                 call->gtCall.gtCallObjp = thisPtrCopy;
7367                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7368
7369                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7370                 {
7371                     // CoreRT generic virtual method: need to handle potential fat function pointers
7372                     addFatPointerCandidate(call->AsCall());
7373                 }
7374 #ifdef FEATURE_READYTORUN_COMPILER
7375                 if (opts.IsReadyToRun())
7376                 {
7377                     // Null check is needed for ready to run to handle
7378                     // non-virtual <-> virtual changes between versions
7379                     call->gtFlags |= GTF_CALL_NULLCHECK;
7380                 }
7381 #endif
7382
7383                 // Sine we are jumping over some code, check that its OK to skip that code
7384                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7385                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7386                 goto DONE;
7387             }
7388
7389             case CORINFO_CALL:
7390             {
7391                 // This is for a non-virtual, non-interface etc. call
7392                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7393
7394                 // We remove the nullcheck for the GetType call instrinsic.
7395                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7396                 // and instrinsics.
7397                 if (callInfo->nullInstanceCheck &&
7398                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7399                 {
7400                     call->gtFlags |= GTF_CALL_NULLCHECK;
7401                 }
7402
7403 #ifdef FEATURE_READYTORUN_COMPILER
7404                 if (opts.IsReadyToRun())
7405                 {
7406                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7407                 }
7408 #endif
7409                 break;
7410             }
7411
7412             case CORINFO_CALL_CODE_POINTER:
7413             {
7414                 // The EE has asked us to call by computing a code pointer and then doing an
7415                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7416
7417                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7418                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7419
7420                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7421                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7422
7423                 GenTree* fptr =
7424                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7425
7426                 if (compDonotInline())
7427                 {
7428                     return TYP_UNDEF;
7429                 }
7430
7431                 // Now make an indirect call through the function pointer
7432
7433                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7434                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7435                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7436
7437                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7438                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7439                 if (callInfo->nullInstanceCheck)
7440                 {
7441                     call->gtFlags |= GTF_CALL_NULLCHECK;
7442                 }
7443
7444                 break;
7445             }
7446
7447             default:
7448                 assert(!"unknown call kind");
7449                 break;
7450         }
7451
7452         //-------------------------------------------------------------------------
7453         // Set more flags
7454
7455         PREFIX_ASSUME(call != nullptr);
7456
7457         if (mflags & CORINFO_FLG_NOGCCHECK)
7458         {
7459             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7460         }
7461
7462         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7463         if (isSpecialIntrinsic)
7464         {
7465             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7466         }
7467     }
7468     assert(sig);
7469     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7470
7471     /* Some sanity checks */
7472
7473     // CALL_VIRT and NEWOBJ must have a THIS pointer
7474     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7475     // static bit and hasThis are negations of one another
7476     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7477     assert(call != nullptr);
7478
7479     /*-------------------------------------------------------------------------
7480      * Check special-cases etc
7481      */
7482
7483     /* Special case - Check if it is a call to Delegate.Invoke(). */
7484
7485     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7486     {
7487         assert(!compIsForInlining());
7488         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7489         assert(mflags & CORINFO_FLG_FINAL);
7490
7491         /* Set the delegate flag */
7492         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7493
7494         if (callInfo->secureDelegateInvoke)
7495         {
7496             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7497         }
7498
7499         if (opcode == CEE_CALLVIRT)
7500         {
7501             assert(mflags & CORINFO_FLG_FINAL);
7502
7503             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7504             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7505             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7506         }
7507     }
7508
7509     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7510     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7511     if (varTypeIsStruct(callRetTyp))
7512     {
7513         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7514         call->gtType = callRetTyp;
7515     }
7516
7517 #if !FEATURE_VARARG
7518     /* Check for varargs */
7519     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7520         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7521     {
7522         BADCODE("Varargs not supported.");
7523     }
7524 #endif // !FEATURE_VARARG
7525
7526 #ifdef UNIX_X86_ABI
7527     if (call->gtCall.callSig == nullptr)
7528     {
7529         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7530         *call->gtCall.callSig = *sig;
7531     }
7532 #endif // UNIX_X86_ABI
7533
7534     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7535         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7536     {
7537         assert(!compIsForInlining());
7538
7539         /* Set the right flags */
7540
7541         call->gtFlags |= GTF_CALL_POP_ARGS;
7542         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7543
7544         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7545            will be expecting to pop a certain number of arguments, but if we
7546            tailcall to a function with a different number of arguments, we
7547            are hosed. There are ways around this (caller remembers esp value,
7548            varargs is not caller-pop, etc), but not worth it. */
7549         CLANG_FORMAT_COMMENT_ANCHOR;
7550
7551 #ifdef _TARGET_X86_
7552         if (canTailCall)
7553         {
7554             canTailCall             = false;
7555             szCanTailCallFailReason = "Callee is varargs";
7556         }
7557 #endif
7558
7559         /* Get the total number of arguments - this is already correct
7560          * for CALLI - for methods we have to get it from the call site */
7561
7562         if (opcode != CEE_CALLI)
7563         {
7564 #ifdef DEBUG
7565             unsigned numArgsDef = sig->numArgs;
7566 #endif
7567             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7568
7569 #ifdef DEBUG
7570             // We cannot lazily obtain the signature of a vararg call because using its method
7571             // handle will give us only the declared argument list, not the full argument list.
7572             assert(call->gtCall.callSig == nullptr);
7573             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7574             *call->gtCall.callSig = *sig;
7575 #endif
7576
7577             // For vararg calls we must be sure to load the return type of the
7578             // method actually being called, as well as the return types of the
7579             // specified in the vararg signature. With type equivalency, these types
7580             // may not be the same.
7581             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7582             {
7583                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7584                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7585                     sig->retType != CORINFO_TYPE_VAR)
7586                 {
7587                     // Make sure that all valuetypes (including enums) that we push are loaded.
7588                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7589                     // all valuetypes in the method signature are already loaded.
7590                     // We need to be able to find the size of the valuetypes, but we cannot
7591                     // do a class-load from within GC.
7592                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7593                 }
7594             }
7595
7596             assert(numArgsDef <= sig->numArgs);
7597         }
7598
7599         /* We will have "cookie" as the last argument but we cannot push
7600          * it on the operand stack because we may overflow, so we append it
7601          * to the arg list next after we pop them */
7602     }
7603
7604     if (mflags & CORINFO_FLG_SECURITYCHECK)
7605     {
7606         assert(!compIsForInlining());
7607
7608         // Need security prolog/epilog callouts when there is
7609         // imperative security in the method. This is to give security a
7610         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7611
7612         if (compIsForInlining())
7613         {
7614             // Cannot handle this if the method being imported is an inlinee by itself.
7615             // Because inlinee method does not have its own frame.
7616
7617             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7618             return TYP_UNDEF;
7619         }
7620         else
7621         {
7622             tiSecurityCalloutNeeded = true;
7623
7624             // If the current method calls a method which needs a security check,
7625             // (i.e. the method being compiled has imperative security)
7626             // we need to reserve a slot for the security object in
7627             // the current method's stack frame
7628             opts.compNeedSecurityCheck = true;
7629         }
7630     }
7631
7632     //--------------------------- Inline NDirect ------------------------------
7633
7634     // For inline cases we technically should look at both the current
7635     // block and the call site block (or just the latter if we've
7636     // fused the EH trees). However the block-related checks pertain to
7637     // EH and we currently won't inline a method with EH. So for
7638     // inlinees, just checking the call site block is sufficient.
7639     {
7640         // New lexical block here to avoid compilation errors because of GOTOs.
7641         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7642         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7643     }
7644
7645     if (call->gtFlags & GTF_CALL_UNMANAGED)
7646     {
7647         // We set up the unmanaged call by linking the frame, disabling GC, etc
7648         // This needs to be cleaned up on return
7649         if (canTailCall)
7650         {
7651             canTailCall             = false;
7652             szCanTailCallFailReason = "Callee is native";
7653         }
7654
7655         checkForSmallType = true;
7656
7657         impPopArgsForUnmanagedCall(call, sig);
7658
7659         goto DONE;
7660     }
7661     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7662                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7663                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7664                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7665     {
7666         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7667         {
7668             // Normally this only happens with inlining.
7669             // However, a generic method (or type) being NGENd into another module
7670             // can run into this issue as well.  There's not an easy fall-back for NGEN
7671             // so instead we fallback to JIT.
7672             if (compIsForInlining())
7673             {
7674                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7675             }
7676             else
7677             {
7678                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7679             }
7680
7681             return TYP_UNDEF;
7682         }
7683
7684         GenTree* cookie = eeGetPInvokeCookie(sig);
7685
7686         // This cookie is required to be either a simple GT_CNS_INT or
7687         // an indirection of a GT_CNS_INT
7688         //
7689         GenTree* cookieConst = cookie;
7690         if (cookie->gtOper == GT_IND)
7691         {
7692             cookieConst = cookie->gtOp.gtOp1;
7693         }
7694         assert(cookieConst->gtOper == GT_CNS_INT);
7695
7696         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7697         // we won't allow this tree to participate in any CSE logic
7698         //
7699         cookie->gtFlags |= GTF_DONT_CSE;
7700         cookieConst->gtFlags |= GTF_DONT_CSE;
7701
7702         call->gtCall.gtCallCookie = cookie;
7703
7704         if (canTailCall)
7705         {
7706             canTailCall             = false;
7707             szCanTailCallFailReason = "PInvoke calli";
7708         }
7709     }
7710
7711     /*-------------------------------------------------------------------------
7712      * Create the argument list
7713      */
7714
7715     //-------------------------------------------------------------------------
7716     // Special case - for varargs we have an implicit last argument
7717
7718     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7719     {
7720         assert(!compIsForInlining());
7721
7722         void *varCookie, *pVarCookie;
7723         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7724         {
7725             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7726             return TYP_UNDEF;
7727         }
7728
7729         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7730         assert((!varCookie) != (!pVarCookie));
7731         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7732
7733         assert(extraArg == nullptr);
7734         extraArg = gtNewArgList(cookie);
7735     }
7736
7737     //-------------------------------------------------------------------------
7738     // Extra arg for shared generic code and array methods
7739     //
7740     // Extra argument containing instantiation information is passed in the
7741     // following circumstances:
7742     // (a) To the "Address" method on array classes; the extra parameter is
7743     //     the array's type handle (a TypeDesc)
7744     // (b) To shared-code instance methods in generic structs; the extra parameter
7745     //     is the struct's type handle (a vtable ptr)
7746     // (c) To shared-code per-instantiation non-generic static methods in generic
7747     //     classes and structs; the extra parameter is the type handle
7748     // (d) To shared-code generic methods; the extra parameter is an
7749     //     exact-instantiation MethodDesc
7750     //
7751     // We also set the exact type context associated with the call so we can
7752     // inline the call correctly later on.
7753
7754     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7755     {
7756         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7757         if (clsHnd == nullptr)
7758         {
7759             NO_WAY("CALLI on parameterized type");
7760         }
7761
7762         assert(opcode != CEE_CALLI);
7763
7764         GenTree* instParam;
7765         BOOL     runtimeLookup;
7766
7767         // Instantiated generic method
7768         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7769         {
7770             CORINFO_METHOD_HANDLE exactMethodHandle =
7771                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7772
7773             if (!exactContextNeedsRuntimeLookup)
7774             {
7775 #ifdef FEATURE_READYTORUN_COMPILER
7776                 if (opts.IsReadyToRun())
7777                 {
7778                     instParam =
7779                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7780                     if (instParam == nullptr)
7781                     {
7782                         assert(compDonotInline());
7783                         return TYP_UNDEF;
7784                     }
7785                 }
7786                 else
7787 #endif
7788                 {
7789                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7790                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7791                 }
7792             }
7793             else
7794             {
7795                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7796                 if (instParam == nullptr)
7797                 {
7798                     assert(compDonotInline());
7799                     return TYP_UNDEF;
7800                 }
7801             }
7802         }
7803
7804         // otherwise must be an instance method in a generic struct,
7805         // a static method in a generic type, or a runtime-generated array method
7806         else
7807         {
7808             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7809             CORINFO_CLASS_HANDLE exactClassHandle =
7810                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7811
7812             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7813             {
7814                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7815                 return TYP_UNDEF;
7816             }
7817
7818             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7819             {
7820                 // We indicate "readonly" to the Address operation by using a null
7821                 // instParam.
7822                 instParam = gtNewIconNode(0, TYP_REF);
7823             }
7824             else if (!exactContextNeedsRuntimeLookup)
7825             {
7826 #ifdef FEATURE_READYTORUN_COMPILER
7827                 if (opts.IsReadyToRun())
7828                 {
7829                     instParam =
7830                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7831                     if (instParam == nullptr)
7832                     {
7833                         assert(compDonotInline());
7834                         return TYP_UNDEF;
7835                     }
7836                 }
7837                 else
7838 #endif
7839                 {
7840                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7841                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7842                 }
7843             }
7844             else
7845             {
7846                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7847                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7848                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7849                 if (pConstrainedResolvedToken)
7850                 {
7851                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7852                                                  FALSE /* importParent */);
7853                 }
7854                 else
7855                 {
7856                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7857                 }
7858
7859                 if (instParam == nullptr)
7860                 {
7861                     assert(compDonotInline());
7862                     return TYP_UNDEF;
7863                 }
7864             }
7865         }
7866
7867         assert(extraArg == nullptr);
7868         extraArg = gtNewArgList(instParam);
7869     }
7870
7871     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7872     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7873     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7874     // exactContextHnd is not currently required when inlining shared generic code into shared
7875     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7876     // (e.g. anything marked needsRuntimeLookup)
7877     if (exactContextNeedsRuntimeLookup)
7878     {
7879         exactContextHnd = nullptr;
7880     }
7881
7882     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7883     {
7884         // Only verifiable cases are supported.
7885         // dup; ldvirtftn; newobj; or ldftn; newobj.
7886         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7887         if (impStackHeight() > 0)
7888         {
7889             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7890             if (delegateTypeInfo.IsToken())
7891             {
7892                 ldftnToken = delegateTypeInfo.GetToken();
7893             }
7894         }
7895     }
7896
7897     //-------------------------------------------------------------------------
7898     // The main group of arguments
7899
7900     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7901
7902     if (args)
7903     {
7904         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7905     }
7906
7907     //-------------------------------------------------------------------------
7908     // The "this" pointer
7909
7910     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7911     {
7912         GenTree* obj;
7913
7914         if (opcode == CEE_NEWOBJ)
7915         {
7916             obj = newobjThis;
7917         }
7918         else
7919         {
7920             obj = impPopStack().val;
7921             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7922             if (compDonotInline())
7923             {
7924                 return TYP_UNDEF;
7925             }
7926         }
7927
7928         // Store the "this" value in the call
7929         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7930         call->gtCall.gtCallObjp = obj;
7931
7932         // Is this a virtual or interface call?
7933         if (call->gtCall.IsVirtual())
7934         {
7935             // only true object pointers can be virtual
7936             assert(obj->gtType == TYP_REF);
7937
7938             // See if we can devirtualize.
7939             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7940                                 &exactContextHnd);
7941         }
7942
7943         if (impIsThis(obj))
7944         {
7945             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7946         }
7947     }
7948
7949     //-------------------------------------------------------------------------
7950     // The "this" pointer for "newobj"
7951
7952     if (opcode == CEE_NEWOBJ)
7953     {
7954         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7955         {
7956             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7957             // This is a 'new' of a variable sized object, wher
7958             // the constructor is to return the object.  In this case
7959             // the constructor claims to return VOID but we know it
7960             // actually returns the new object
7961             assert(callRetTyp == TYP_VOID);
7962             callRetTyp   = TYP_REF;
7963             call->gtType = TYP_REF;
7964             impSpillSpecialSideEff();
7965
7966             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7967         }
7968         else
7969         {
7970             if (clsFlags & CORINFO_FLG_DELEGATE)
7971             {
7972                 // New inliner morph it in impImportCall.
7973                 // This will allow us to inline the call to the delegate constructor.
7974                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7975             }
7976
7977             if (!bIntrinsicImported)
7978             {
7979
7980 #if defined(DEBUG) || defined(INLINE_DATA)
7981
7982                 // Keep track of the raw IL offset of the call
7983                 call->gtCall.gtRawILOffset = rawILOffset;
7984
7985 #endif // defined(DEBUG) || defined(INLINE_DATA)
7986
7987                 // Is it an inline candidate?
7988                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7989             }
7990
7991             // append the call node.
7992             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7993
7994             // Now push the value of the 'new onto the stack
7995
7996             // This is a 'new' of a non-variable sized object.
7997             // Append the new node (op1) to the statement list,
7998             // and then push the local holding the value of this
7999             // new instruction on the stack.
8000
8001             if (clsFlags & CORINFO_FLG_VALUECLASS)
8002             {
8003                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8004
8005                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8006                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8007             }
8008             else
8009             {
8010                 if (newobjThis->gtOper == GT_COMMA)
8011                 {
8012                     // In coreclr the callout can be inserted even if verification is disabled
8013                     // so we cannot rely on tiVerificationNeeded alone
8014
8015                     // We must have inserted the callout. Get the real newobj.
8016                     newobjThis = newobjThis->gtOp.gtOp2;
8017                 }
8018
8019                 assert(newobjThis->gtOper == GT_LCL_VAR);
8020                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8021             }
8022         }
8023         return callRetTyp;
8024     }
8025
8026 DONE:
8027
8028     if (tailCall)
8029     {
8030         // This check cannot be performed for implicit tail calls for the reason
8031         // that impIsImplicitTailCallCandidate() is not checking whether return
8032         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8033         // As a result it is possible that in the following case, we find that
8034         // the type stack is non-empty if Callee() is considered for implicit
8035         // tail calling.
8036         //      int Caller(..) { .... void Callee(); ret val; ... }
8037         //
8038         // Note that we cannot check return type compatibility before ImpImportCall()
8039         // as we don't have required info or need to duplicate some of the logic of
8040         // ImpImportCall().
8041         //
8042         // For implicit tail calls, we perform this check after return types are
8043         // known to be compatible.
8044         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8045         {
8046             BADCODE("Stack should be empty after tailcall");
8047         }
8048
8049         // Note that we can not relax this condition with genActualType() as
8050         // the calling convention dictates that the caller of a function with
8051         // a small-typed return value is responsible for normalizing the return val
8052
8053         if (canTailCall &&
8054             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8055                                           callInfo->sig.retTypeClass))
8056         {
8057             canTailCall             = false;
8058             szCanTailCallFailReason = "Return types are not tail call compatible";
8059         }
8060
8061         // Stack empty check for implicit tail calls.
8062         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8063         {
8064 #ifdef _TARGET_AMD64_
8065             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8066             // in JIT64, not an InvalidProgramException.
8067             Verify(false, "Stack should be empty after tailcall");
8068 #else  // _TARGET_64BIT_
8069             BADCODE("Stack should be empty after tailcall");
8070 #endif //!_TARGET_64BIT_
8071         }
8072
8073         // assert(compCurBB is not a catch, finally or filter block);
8074         // assert(compCurBB is not a try block protected by a finally block);
8075
8076         // Check for permission to tailcall
8077         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8078
8079         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8080
8081         if (canTailCall)
8082         {
8083             // True virtual or indirect calls, shouldn't pass in a callee handle.
8084             CORINFO_METHOD_HANDLE exactCalleeHnd =
8085                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8086             GenTree* thisArg = call->gtCall.gtCallObjp;
8087
8088             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8089             {
8090                 canTailCall = true;
8091                 if (explicitTailCall)
8092                 {
8093                     // In case of explicit tail calls, mark it so that it is not considered
8094                     // for in-lining.
8095                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8096 #ifdef DEBUG
8097                     if (verbose)
8098                     {
8099                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8100                         printTreeID(call);
8101                         printf("\n");
8102                     }
8103 #endif
8104                 }
8105                 else
8106                 {
8107 #if FEATURE_TAILCALL_OPT
8108                     // Must be an implicit tail call.
8109                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8110
8111                     // It is possible that a call node is both an inline candidate and marked
8112                     // for opportunistic tail calling.  In-lining happens before morhphing of
8113                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8114                     // reason, it will survive to the morphing stage at which point it will be
8115                     // transformed into a tail call after performing additional checks.
8116
8117                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8118 #ifdef DEBUG
8119                     if (verbose)
8120                     {
8121                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8122                         printTreeID(call);
8123                         printf("\n");
8124                     }
8125 #endif
8126
8127 #else //! FEATURE_TAILCALL_OPT
8128                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8129
8130 #endif // FEATURE_TAILCALL_OPT
8131                 }
8132
8133                 // we can't report success just yet...
8134             }
8135             else
8136             {
8137                 canTailCall = false;
8138 // canTailCall reported its reasons already
8139 #ifdef DEBUG
8140                 if (verbose)
8141                 {
8142                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8143                     printTreeID(call);
8144                     printf("\n");
8145                 }
8146 #endif
8147             }
8148         }
8149         else
8150         {
8151             // If this assert fires it means that canTailCall was set to false without setting a reason!
8152             assert(szCanTailCallFailReason != nullptr);
8153
8154 #ifdef DEBUG
8155             if (verbose)
8156             {
8157                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8158                 printTreeID(call);
8159                 printf(": %s\n", szCanTailCallFailReason);
8160             }
8161 #endif
8162             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8163                                                      szCanTailCallFailReason);
8164         }
8165     }
8166
8167     // Note: we assume that small return types are already normalized by the managed callee
8168     // or by the pinvoke stub for calls to unmanaged code.
8169
8170     if (!bIntrinsicImported)
8171     {
8172         //
8173         // Things needed to be checked when bIntrinsicImported is false.
8174         //
8175
8176         assert(call->gtOper == GT_CALL);
8177         assert(sig != nullptr);
8178
8179         // Tail calls require us to save the call site's sig info so we can obtain an argument
8180         // copying thunk from the EE later on.
8181         if (call->gtCall.callSig == nullptr)
8182         {
8183             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8184             *call->gtCall.callSig = *sig;
8185         }
8186
8187         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8188         {
8189             GenTree* callObj = call->gtCall.gtCallObjp;
8190             assert(callObj != nullptr);
8191
8192             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8193                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8194                                                                    impInlineInfo->inlArgInfo))
8195             {
8196                 impInlineInfo->thisDereferencedFirst = true;
8197             }
8198         }
8199
8200 #if defined(DEBUG) || defined(INLINE_DATA)
8201
8202         // Keep track of the raw IL offset of the call
8203         call->gtCall.gtRawILOffset = rawILOffset;
8204
8205 #endif // defined(DEBUG) || defined(INLINE_DATA)
8206
8207         // Is it an inline candidate?
8208         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8209     }
8210
8211 DONE_CALL:
8212     // Push or append the result of the call
8213     if (callRetTyp == TYP_VOID)
8214     {
8215         if (opcode == CEE_NEWOBJ)
8216         {
8217             // we actually did push something, so don't spill the thing we just pushed.
8218             assert(verCurrentState.esStackDepth > 0);
8219             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8220         }
8221         else
8222         {
8223             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8224         }
8225     }
8226     else
8227     {
8228         impSpillSpecialSideEff();
8229
8230         if (clsFlags & CORINFO_FLG_ARRAY)
8231         {
8232             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8233         }
8234
8235         // Find the return type used for verification by interpreting the method signature.
8236         // NB: we are clobbering the already established sig.
8237         if (tiVerificationNeeded)
8238         {
8239             // Actually, we never get the sig for the original method.
8240             sig = &(callInfo->verSig);
8241         }
8242
8243         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8244         tiRetVal.NormaliseForStack();
8245
8246         // The CEE_READONLY prefix modifies the verification semantics of an Address
8247         // operation on an array type.
8248         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8249         {
8250             tiRetVal.SetIsReadonlyByRef();
8251         }
8252
8253         if (tiVerificationNeeded)
8254         {
8255             // We assume all calls return permanent home byrefs. If they
8256             // didn't they wouldn't be verifiable. This is also covering
8257             // the Address() helper for multidimensional arrays.
8258             if (tiRetVal.IsByRef())
8259             {
8260                 tiRetVal.SetIsPermanentHomeByRef();
8261             }
8262         }
8263
8264         if (call->IsCall())
8265         {
8266             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8267
8268             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8269             if (varTypeIsStruct(callRetTyp))
8270             {
8271                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8272             }
8273
8274             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8275             {
8276                 assert(opts.OptEnabled(CLFLG_INLINING));
8277                 assert(!fatPointerCandidate); // We should not try to inline calli.
8278
8279                 // Make the call its own tree (spill the stack if needed).
8280                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8281
8282                 // TODO: Still using the widened type.
8283                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8284             }
8285             else
8286             {
8287                 if (fatPointerCandidate)
8288                 {
8289                     // fatPointer candidates should be in statements of the form call() or var = call().
8290                     // Such form allows to find statements with fat calls without walking through whole trees
8291                     // and removes problems with cutting trees.
8292                     assert(!bIntrinsicImported);
8293                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8294                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8295                     {
8296                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8297                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8298                         varDsc->lvVerTypeInfo = tiRetVal;
8299                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8300                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8301                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8302                         call           = gtNewLclvNode(calliSlot, type);
8303                     }
8304                 }
8305
8306                 // For non-candidates we must also spill, since we
8307                 // might have locals live on the eval stack that this
8308                 // call can modify.
8309                 //
8310                 // Suppress this for certain well-known call targets
8311                 // that we know won't modify locals, eg calls that are
8312                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8313                 // we may break key fragile pattern matches later on.
8314                 bool spillStack = true;
8315                 if (call->IsCall())
8316                 {
8317                     GenTreeCall* callNode = call->AsCall();
8318                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8319                     {
8320                         spillStack = false;
8321                     }
8322                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8323                     {
8324                         spillStack = false;
8325                     }
8326                 }
8327
8328                 if (spillStack)
8329                 {
8330                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8331                 }
8332             }
8333         }
8334
8335         if (!bIntrinsicImported)
8336         {
8337             //-------------------------------------------------------------------------
8338             //
8339             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8340                 before returning.
8341                 However, we need to normalize small type values returned by unmanaged
8342                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8343                 if we use the shorter inlined pinvoke stub. */
8344
8345             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8346             {
8347                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8348             }
8349         }
8350
8351         impPushOnStack(call, tiRetVal);
8352     }
8353
8354     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8355     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8356     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8357     //  callInfoCache.uncacheCallInfo();
8358
8359     return callRetTyp;
8360 }
8361 #ifdef _PREFAST_
8362 #pragma warning(pop)
8363 #endif
8364
8365 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8366 {
8367     CorInfoType corType = methInfo->args.retType;
8368
8369     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8370     {
8371         // We have some kind of STRUCT being returned
8372
8373         structPassingKind howToReturnStruct = SPK_Unknown;
8374
8375         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8376
8377         if (howToReturnStruct == SPK_ByReference)
8378         {
8379             return true;
8380         }
8381     }
8382
8383     return false;
8384 }
8385
8386 #ifdef DEBUG
8387 //
8388 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8389 {
8390     TestLabelAndNum tlAndN;
8391     if (numArgs == 2)
8392     {
8393         tlAndN.m_num  = 0;
8394         StackEntry se = impPopStack();
8395         assert(se.seTypeInfo.GetType() == TI_INT);
8396         GenTree* val = se.val;
8397         assert(val->IsCnsIntOrI());
8398         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8399     }
8400     else if (numArgs == 3)
8401     {
8402         StackEntry se = impPopStack();
8403         assert(se.seTypeInfo.GetType() == TI_INT);
8404         GenTree* val = se.val;
8405         assert(val->IsCnsIntOrI());
8406         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8407         se           = impPopStack();
8408         assert(se.seTypeInfo.GetType() == TI_INT);
8409         val = se.val;
8410         assert(val->IsCnsIntOrI());
8411         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8412     }
8413     else
8414     {
8415         assert(false);
8416     }
8417
8418     StackEntry expSe = impPopStack();
8419     GenTree*   node  = expSe.val;
8420
8421     // There are a small number of special cases, where we actually put the annotation on a subnode.
8422     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8423     {
8424         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8425         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8426         // offset within the the static field block whose address is returned by the helper call.
8427         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8428         GenTree* helperCall = nullptr;
8429         assert(node->OperGet() == GT_IND);
8430         tlAndN.m_num -= 100;
8431         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8432         GetNodeTestData()->Remove(node);
8433     }
8434     else
8435     {
8436         GetNodeTestData()->Set(node, tlAndN);
8437     }
8438
8439     impPushOnStack(node, expSe.seTypeInfo);
8440     return node->TypeGet();
8441 }
8442 #endif // DEBUG
8443
8444 //-----------------------------------------------------------------------------------
8445 //  impFixupCallStructReturn: For a call node that returns a struct type either
8446 //  adjust the return type to an enregisterable type, or set the flag to indicate
8447 //  struct return via retbuf arg.
8448 //
8449 //  Arguments:
8450 //    call       -  GT_CALL GenTree node
8451 //    retClsHnd  -  Class handle of return type of the call
8452 //
8453 //  Return Value:
8454 //    Returns new GenTree node after fixing struct return of call node
8455 //
8456 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8457 {
8458     if (!varTypeIsStruct(call))
8459     {
8460         return call;
8461     }
8462
8463     call->gtRetClsHnd = retClsHnd;
8464
8465 #if FEATURE_MULTIREG_RET
8466     // Initialize Return type descriptor of call node
8467     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8468     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8469 #endif // FEATURE_MULTIREG_RET
8470
8471 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8472
8473     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8474     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8475
8476     // The return type will remain as the incoming struct type unless normalized to a
8477     // single eightbyte return type below.
8478     call->gtReturnType = call->gtType;
8479
8480     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8481     if (retRegCount != 0)
8482     {
8483         if (retRegCount == 1)
8484         {
8485             // struct returned in a single register
8486             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8487         }
8488         else
8489         {
8490             // must be a struct returned in two registers
8491             assert(retRegCount == 2);
8492
8493             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8494             {
8495                 // Force a call returning multi-reg struct to be always of the IR form
8496                 //   tmp = call
8497                 //
8498                 // No need to assign a multi-reg struct to a local var if:
8499                 //  - It is a tail call or
8500                 //  - The call is marked for in-lining later
8501                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8502             }
8503         }
8504     }
8505     else
8506     {
8507         // struct not returned in registers i.e returned via hiddden retbuf arg.
8508         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8509     }
8510
8511 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8512
8513     // Check for TYP_STRUCT type that wraps a primitive type
8514     // Such structs are returned using a single register
8515     // and we change the return type on those calls here.
8516     //
8517     structPassingKind howToReturnStruct;
8518     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8519
8520     if (howToReturnStruct == SPK_ByReference)
8521     {
8522         assert(returnType == TYP_UNKNOWN);
8523         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8524     }
8525     else
8526     {
8527         assert(returnType != TYP_UNKNOWN);
8528         call->gtReturnType = returnType;
8529
8530         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8531         if ((returnType == TYP_LONG) && (compLongUsed == false))
8532         {
8533             compLongUsed = true;
8534         }
8535         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8536         {
8537             compFloatingPointUsed = true;
8538         }
8539
8540 #if FEATURE_MULTIREG_RET
8541         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8542         assert(retRegCount != 0);
8543
8544         if (retRegCount >= 2)
8545         {
8546             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8547             {
8548                 // Force a call returning multi-reg struct to be always of the IR form
8549                 //   tmp = call
8550                 //
8551                 // No need to assign a multi-reg struct to a local var if:
8552                 //  - It is a tail call or
8553                 //  - The call is marked for in-lining later
8554                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8555             }
8556         }
8557 #endif // FEATURE_MULTIREG_RET
8558     }
8559
8560 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8561
8562     return call;
8563 }
8564
8565 /*****************************************************************************
8566    For struct return values, re-type the operand in the case where the ABI
8567    does not use a struct return buffer
8568    Note that this method is only call for !_TARGET_X86_
8569  */
8570
8571 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8572 {
8573     assert(varTypeIsStruct(info.compRetType));
8574     assert(info.compRetBuffArg == BAD_VAR_NUM);
8575
8576 #if defined(_TARGET_XARCH_)
8577
8578 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8579     // No VarArgs for CoreCLR on x64 Unix
8580     assert(!info.compIsVarArgs);
8581
8582     // Is method returning a multi-reg struct?
8583     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8584     {
8585         // In case of multi-reg struct return, we force IR to be one of the following:
8586         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8587         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8588
8589         if (op->gtOper == GT_LCL_VAR)
8590         {
8591             // Make sure that this struct stays in memory and doesn't get promoted.
8592             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8593             lvaTable[lclNum].lvIsMultiRegRet = true;
8594
8595             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8596             op->gtFlags |= GTF_DONT_CSE;
8597
8598             return op;
8599         }
8600
8601         if (op->gtOper == GT_CALL)
8602         {
8603             return op;
8604         }
8605
8606         return impAssignMultiRegTypeToVar(op, retClsHnd);
8607     }
8608 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8609     assert(info.compRetNativeType != TYP_STRUCT);
8610 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8611
8612 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8613
8614     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8615     {
8616         if (op->gtOper == GT_LCL_VAR)
8617         {
8618             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8619             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8620             // Make sure this struct type stays as struct so that we can return it as an HFA
8621             lvaTable[lclNum].lvIsMultiRegRet = true;
8622
8623             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8624             op->gtFlags |= GTF_DONT_CSE;
8625
8626             return op;
8627         }
8628
8629         if (op->gtOper == GT_CALL)
8630         {
8631             if (op->gtCall.IsVarargs())
8632             {
8633                 // We cannot tail call because control needs to return to fixup the calling
8634                 // convention for result return.
8635                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8636                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8637             }
8638             else
8639             {
8640                 return op;
8641             }
8642         }
8643         return impAssignMultiRegTypeToVar(op, retClsHnd);
8644     }
8645
8646 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8647
8648     // Is method returning a multi-reg struct?
8649     if (IsMultiRegReturnedType(retClsHnd))
8650     {
8651         if (op->gtOper == GT_LCL_VAR)
8652         {
8653             // This LCL_VAR stays as a TYP_STRUCT
8654             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8655
8656             // Make sure this struct type is not struct promoted
8657             lvaTable[lclNum].lvIsMultiRegRet = true;
8658
8659             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8660             op->gtFlags |= GTF_DONT_CSE;
8661
8662             return op;
8663         }
8664
8665         if (op->gtOper == GT_CALL)
8666         {
8667             if (op->gtCall.IsVarargs())
8668             {
8669                 // We cannot tail call because control needs to return to fixup the calling
8670                 // convention for result return.
8671                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8672                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8673             }
8674             else
8675             {
8676                 return op;
8677             }
8678         }
8679         return impAssignMultiRegTypeToVar(op, retClsHnd);
8680     }
8681
8682 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8683
8684 REDO_RETURN_NODE:
8685     // adjust the type away from struct to integral
8686     // and no normalizing
8687     if (op->gtOper == GT_LCL_VAR)
8688     {
8689         op->ChangeOper(GT_LCL_FLD);
8690     }
8691     else if (op->gtOper == GT_OBJ)
8692     {
8693         GenTree* op1 = op->AsObj()->Addr();
8694
8695         // We will fold away OBJ/ADDR
8696         // except for OBJ/ADDR/INDEX
8697         //     as the array type influences the array element's offset
8698         //     Later in this method we change op->gtType to info.compRetNativeType
8699         //     This is not correct when op is a GT_INDEX as the starting offset
8700         //     for the array elements 'elemOffs' is different for an array of
8701         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8702         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8703         //
8704         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8705         {
8706             // Change '*(&X)' to 'X' and see if we can do better
8707             op = op1->gtOp.gtOp1;
8708             goto REDO_RETURN_NODE;
8709         }
8710         op->gtObj.gtClass = NO_CLASS_HANDLE;
8711         op->ChangeOperUnchecked(GT_IND);
8712         op->gtFlags |= GTF_IND_TGTANYWHERE;
8713     }
8714     else if (op->gtOper == GT_CALL)
8715     {
8716         if (op->AsCall()->TreatAsHasRetBufArg(this))
8717         {
8718             // This must be one of those 'special' helpers that don't
8719             // really have a return buffer, but instead use it as a way
8720             // to keep the trees cleaner with fewer address-taken temps.
8721             //
8722             // Well now we have to materialize the the return buffer as
8723             // an address-taken temp. Then we can return the temp.
8724             //
8725             // NOTE: this code assumes that since the call directly
8726             // feeds the return, then the call must be returning the
8727             // same structure/class/type.
8728             //
8729             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8730
8731             // No need to spill anything as we're about to return.
8732             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8733
8734             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8735             // jump directly to a GT_LCL_FLD.
8736             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8737             op->ChangeOper(GT_LCL_FLD);
8738         }
8739         else
8740         {
8741             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8742
8743             // Don't change the gtType of the node just yet, it will get changed later.
8744             return op;
8745         }
8746     }
8747 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8748     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8749     {
8750         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8751         // assert(op->gtType == info.compRetNativeType)
8752         if (op->gtType != info.compRetNativeType)
8753         {
8754             // Insert a register move to keep target type of SIMD intrinsic intact
8755             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8756         }
8757     }
8758 #endif
8759     else if (op->gtOper == GT_COMMA)
8760     {
8761         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8762     }
8763
8764     op->gtType = info.compRetNativeType;
8765
8766     return op;
8767 }
8768
8769 /*****************************************************************************
8770    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8771    finally-protected try. We find the finally blocks protecting the current
8772    offset (in order) by walking over the complete exception table and
8773    finding enclosing clauses. This assumes that the table is sorted.
8774    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8775
8776    If we are leaving a catch handler, we need to attach the
8777    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8778
8779    After this function, the BBJ_LEAVE block has been converted to a different type.
8780  */
8781
8782 #if !FEATURE_EH_FUNCLETS
8783
8784 void Compiler::impImportLeave(BasicBlock* block)
8785 {
8786 #ifdef DEBUG
8787     if (verbose)
8788     {
8789         printf("\nBefore import CEE_LEAVE:\n");
8790         fgDispBasicBlocks();
8791         fgDispHandlerTab();
8792     }
8793 #endif // DEBUG
8794
8795     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8796     unsigned    blkAddr         = block->bbCodeOffs;
8797     BasicBlock* leaveTarget     = block->bbJumpDest;
8798     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8799
8800     // LEAVE clears the stack, spill side effects, and set stack to 0
8801
8802     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8803     verCurrentState.esStackDepth = 0;
8804
8805     assert(block->bbJumpKind == BBJ_LEAVE);
8806     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8807
8808     BasicBlock* step         = DUMMY_INIT(NULL);
8809     unsigned    encFinallies = 0; // Number of enclosing finallies.
8810     GenTree*    endCatches   = NULL;
8811     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8812
8813     unsigned  XTnum;
8814     EHblkDsc* HBtab;
8815
8816     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8817     {
8818         // Grab the handler offsets
8819
8820         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8821         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8822         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8823         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8824
8825         /* Is this a catch-handler we are CEE_LEAVEing out of?
8826          * If so, we need to call CORINFO_HELP_ENDCATCH.
8827          */
8828
8829         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8830         {
8831             // Can't CEE_LEAVE out of a finally/fault handler
8832             if (HBtab->HasFinallyOrFaultHandler())
8833                 BADCODE("leave out of fault/finally block");
8834
8835             // Create the call to CORINFO_HELP_ENDCATCH
8836             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8837
8838             // Make a list of all the currently pending endCatches
8839             if (endCatches)
8840                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8841             else
8842                 endCatches = endCatch;
8843
8844 #ifdef DEBUG
8845             if (verbose)
8846             {
8847                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8848                        "CORINFO_HELP_ENDCATCH\n",
8849                        block->bbNum, XTnum);
8850             }
8851 #endif
8852         }
8853         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8854                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8855         {
8856             /* This is a finally-protected try we are jumping out of */
8857
8858             /* If there are any pending endCatches, and we have already
8859                jumped out of a finally-protected try, then the endCatches
8860                have to be put in a block in an outer try for async
8861                exceptions to work correctly.
8862                Else, just use append to the original block */
8863
8864             BasicBlock* callBlock;
8865
8866             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8867
8868             if (encFinallies == 0)
8869             {
8870                 assert(step == DUMMY_INIT(NULL));
8871                 callBlock             = block;
8872                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8873
8874                 if (endCatches)
8875                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8876
8877 #ifdef DEBUG
8878                 if (verbose)
8879                 {
8880                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8881                            "block %s\n",
8882                            callBlock->dspToString());
8883                 }
8884 #endif
8885             }
8886             else
8887             {
8888                 assert(step != DUMMY_INIT(NULL));
8889
8890                 /* Calling the finally block */
8891                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8892                 assert(step->bbJumpKind == BBJ_ALWAYS);
8893                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8894                                               // finally in the chain)
8895                 step->bbJumpDest->bbRefs++;
8896
8897                 /* The new block will inherit this block's weight */
8898                 callBlock->setBBWeight(block->bbWeight);
8899                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8900
8901 #ifdef DEBUG
8902                 if (verbose)
8903                 {
8904                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8905                            callBlock->dspToString());
8906                 }
8907 #endif
8908
8909                 GenTree* lastStmt;
8910
8911                 if (endCatches)
8912                 {
8913                     lastStmt         = gtNewStmt(endCatches);
8914                     endLFin->gtNext  = lastStmt;
8915                     lastStmt->gtPrev = endLFin;
8916                 }
8917                 else
8918                 {
8919                     lastStmt = endLFin;
8920                 }
8921
8922                 // note that this sets BBF_IMPORTED on the block
8923                 impEndTreeList(callBlock, endLFin, lastStmt);
8924             }
8925
8926             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8927             /* The new block will inherit this block's weight */
8928             step->setBBWeight(block->bbWeight);
8929             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8930
8931 #ifdef DEBUG
8932             if (verbose)
8933             {
8934                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8935                        step->dspToString());
8936             }
8937 #endif
8938
8939             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8940             assert(finallyNesting <= compHndBBtabCount);
8941
8942             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8943             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8944             endLFin               = gtNewStmt(endLFin);
8945             endCatches            = NULL;
8946
8947             encFinallies++;
8948
8949             invalidatePreds = true;
8950         }
8951     }
8952
8953     /* Append any remaining endCatches, if any */
8954
8955     assert(!encFinallies == !endLFin);
8956
8957     if (encFinallies == 0)
8958     {
8959         assert(step == DUMMY_INIT(NULL));
8960         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8961
8962         if (endCatches)
8963             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8964
8965 #ifdef DEBUG
8966         if (verbose)
8967         {
8968             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8969                    "block %s\n",
8970                    block->dspToString());
8971         }
8972 #endif
8973     }
8974     else
8975     {
8976         // If leaveTarget is the start of another try block, we want to make sure that
8977         // we do not insert finalStep into that try block. Hence, we find the enclosing
8978         // try block.
8979         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8980
8981         // Insert a new BB either in the try region indicated by tryIndex or
8982         // the handler region indicated by leaveTarget->bbHndIndex,
8983         // depending on which is the inner region.
8984         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8985         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8986         step->bbJumpDest = finalStep;
8987
8988         /* The new block will inherit this block's weight */
8989         finalStep->setBBWeight(block->bbWeight);
8990         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8991
8992 #ifdef DEBUG
8993         if (verbose)
8994         {
8995             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
8996                    finalStep->dspToString());
8997         }
8998 #endif
8999
9000         GenTree* lastStmt;
9001
9002         if (endCatches)
9003         {
9004             lastStmt         = gtNewStmt(endCatches);
9005             endLFin->gtNext  = lastStmt;
9006             lastStmt->gtPrev = endLFin;
9007         }
9008         else
9009         {
9010             lastStmt = endLFin;
9011         }
9012
9013         impEndTreeList(finalStep, endLFin, lastStmt);
9014
9015         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9016
9017         // Queue up the jump target for importing
9018
9019         impImportBlockPending(leaveTarget);
9020
9021         invalidatePreds = true;
9022     }
9023
9024     if (invalidatePreds && fgComputePredsDone)
9025     {
9026         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9027         fgRemovePreds();
9028     }
9029
9030 #ifdef DEBUG
9031     fgVerifyHandlerTab();
9032
9033     if (verbose)
9034     {
9035         printf("\nAfter import CEE_LEAVE:\n");
9036         fgDispBasicBlocks();
9037         fgDispHandlerTab();
9038     }
9039 #endif // DEBUG
9040 }
9041
9042 #else // FEATURE_EH_FUNCLETS
9043
9044 void Compiler::impImportLeave(BasicBlock* block)
9045 {
9046 #ifdef DEBUG
9047     if (verbose)
9048     {
9049         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9050         fgDispBasicBlocks();
9051         fgDispHandlerTab();
9052     }
9053 #endif // DEBUG
9054
9055     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9056     unsigned    blkAddr         = block->bbCodeOffs;
9057     BasicBlock* leaveTarget     = block->bbJumpDest;
9058     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9059
9060     // LEAVE clears the stack, spill side effects, and set stack to 0
9061
9062     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9063     verCurrentState.esStackDepth = 0;
9064
9065     assert(block->bbJumpKind == BBJ_LEAVE);
9066     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9067
9068     BasicBlock* step = nullptr;
9069
9070     enum StepType
9071     {
9072         // No step type; step == NULL.
9073         ST_None,
9074
9075         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9076         // That is, is step->bbJumpDest where a finally will return to?
9077         ST_FinallyReturn,
9078
9079         // The step block is a catch return.
9080         ST_Catch,
9081
9082         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9083         ST_Try
9084     };
9085     StepType stepType = ST_None;
9086
9087     unsigned  XTnum;
9088     EHblkDsc* HBtab;
9089
9090     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9091     {
9092         // Grab the handler offsets
9093
9094         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9095         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9096         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9097         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9098
9099         /* Is this a catch-handler we are CEE_LEAVEing out of?
9100          */
9101
9102         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9103         {
9104             // Can't CEE_LEAVE out of a finally/fault handler
9105             if (HBtab->HasFinallyOrFaultHandler())
9106             {
9107                 BADCODE("leave out of fault/finally block");
9108             }
9109
9110             /* We are jumping out of a catch */
9111
9112             if (step == nullptr)
9113             {
9114                 step             = block;
9115                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9116                 stepType         = ST_Catch;
9117
9118 #ifdef DEBUG
9119                 if (verbose)
9120                 {
9121                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9122                            "block\n",
9123                            XTnum, step->bbNum);
9124                 }
9125 #endif
9126             }
9127             else
9128             {
9129                 BasicBlock* exitBlock;
9130
9131                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9132                  * scope */
9133                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9134
9135                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9136                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9137                                               // exit) returns to this block
9138                 step->bbJumpDest->bbRefs++;
9139
9140 #if defined(_TARGET_ARM_)
9141                 if (stepType == ST_FinallyReturn)
9142                 {
9143                     assert(step->bbJumpKind == BBJ_ALWAYS);
9144                     // Mark the target of a finally return
9145                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9146                 }
9147 #endif // defined(_TARGET_ARM_)
9148
9149                 /* The new block will inherit this block's weight */
9150                 exitBlock->setBBWeight(block->bbWeight);
9151                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9152
9153                 /* This exit block is the new step */
9154                 step     = exitBlock;
9155                 stepType = ST_Catch;
9156
9157                 invalidatePreds = true;
9158
9159 #ifdef DEBUG
9160                 if (verbose)
9161                 {
9162                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9163                            exitBlock->bbNum);
9164                 }
9165 #endif
9166             }
9167         }
9168         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9169                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9170         {
9171             /* We are jumping out of a finally-protected try */
9172
9173             BasicBlock* callBlock;
9174
9175             if (step == nullptr)
9176             {
9177 #if FEATURE_EH_CALLFINALLY_THUNKS
9178
9179                 // Put the call to the finally in the enclosing region.
9180                 unsigned callFinallyTryIndex =
9181                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9182                 unsigned callFinallyHndIndex =
9183                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9184                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9185
9186                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9187                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9188                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9189                 // next block, and flow optimizations will remove it.
9190                 block->bbJumpKind = BBJ_ALWAYS;
9191                 block->bbJumpDest = callBlock;
9192                 block->bbJumpDest->bbRefs++;
9193
9194                 /* The new block will inherit this block's weight */
9195                 callBlock->setBBWeight(block->bbWeight);
9196                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9197
9198 #ifdef DEBUG
9199                 if (verbose)
9200                 {
9201                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9202                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9203                            XTnum, block->bbNum, callBlock->bbNum);
9204                 }
9205 #endif
9206
9207 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9208
9209                 callBlock             = block;
9210                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9211
9212 #ifdef DEBUG
9213                 if (verbose)
9214                 {
9215                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9216                            "BBJ_CALLFINALLY block\n",
9217                            XTnum, callBlock->bbNum);
9218                 }
9219 #endif
9220
9221 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9222             }
9223             else
9224             {
9225                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9226                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9227                 // a 'finally'), or the step block is the return from a catch.
9228                 //
9229                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9230                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9231                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9232                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9233                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9234                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9235                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9236                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9237                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9238                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9239                 // stack walks.)
9240
9241                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9242
9243 #if FEATURE_EH_CALLFINALLY_THUNKS
9244                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9245                 {
9246                     // Need to create another step block in the 'try' region that will actually branch to the
9247                     // call-to-finally thunk.
9248                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9249                     step->bbJumpDest  = step2;
9250                     step->bbJumpDest->bbRefs++;
9251                     step2->setBBWeight(block->bbWeight);
9252                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9253
9254 #ifdef DEBUG
9255                     if (verbose)
9256                     {
9257                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9258                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9259                                XTnum, step->bbNum, step2->bbNum);
9260                     }
9261 #endif
9262
9263                     step = step2;
9264                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9265                 }
9266 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9267
9268 #if FEATURE_EH_CALLFINALLY_THUNKS
9269                 unsigned callFinallyTryIndex =
9270                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9271                 unsigned callFinallyHndIndex =
9272                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9273 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9274                 unsigned callFinallyTryIndex = XTnum + 1;
9275                 unsigned callFinallyHndIndex = 0; // don't care
9276 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9277
9278                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9279                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9280                                               // finally in the chain)
9281                 step->bbJumpDest->bbRefs++;
9282
9283 #if defined(_TARGET_ARM_)
9284                 if (stepType == ST_FinallyReturn)
9285                 {
9286                     assert(step->bbJumpKind == BBJ_ALWAYS);
9287                     // Mark the target of a finally return
9288                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9289                 }
9290 #endif // defined(_TARGET_ARM_)
9291
9292                 /* The new block will inherit this block's weight */
9293                 callBlock->setBBWeight(block->bbWeight);
9294                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9295
9296 #ifdef DEBUG
9297                 if (verbose)
9298                 {
9299                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9300                            "BB%02u\n",
9301                            XTnum, callBlock->bbNum);
9302                 }
9303 #endif
9304             }
9305
9306             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9307             stepType = ST_FinallyReturn;
9308
9309             /* The new block will inherit this block's weight */
9310             step->setBBWeight(block->bbWeight);
9311             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9312
9313 #ifdef DEBUG
9314             if (verbose)
9315             {
9316                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9317                        "block BB%02u\n",
9318                        XTnum, step->bbNum);
9319             }
9320 #endif
9321
9322             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9323
9324             invalidatePreds = true;
9325         }
9326         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9327                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9328         {
9329             // We are jumping out of a catch-protected try.
9330             //
9331             // If we are returning from a call to a finally, then we must have a step block within a try
9332             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9333             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9334             // and invoke the appropriate catch.
9335             //
9336             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9337             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9338             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9339             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9340             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9341             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9342             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9343             // For example:
9344             //
9345             // try {
9346             //    try {
9347             //       // something here raises ThreadAbortException
9348             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9349             //    } catch (Exception) {
9350             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9351             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9352             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9353             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9354             //       // need to do this transformation if the current EH block is a try/catch that catches
9355             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9356             //       // information, so currently we do it for all catch types.
9357             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9358             //    }
9359             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9360             // } catch (ThreadAbortException) {
9361             // }
9362             // LABEL_1:
9363             //
9364             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9365             // compiler.
9366
9367             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9368             {
9369                 BasicBlock* catchStep;
9370
9371                 assert(step);
9372
9373                 if (stepType == ST_FinallyReturn)
9374                 {
9375                     assert(step->bbJumpKind == BBJ_ALWAYS);
9376                 }
9377                 else
9378                 {
9379                     assert(stepType == ST_Catch);
9380                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9381                 }
9382
9383                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9384                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9385                 step->bbJumpDest = catchStep;
9386                 step->bbJumpDest->bbRefs++;
9387
9388 #if defined(_TARGET_ARM_)
9389                 if (stepType == ST_FinallyReturn)
9390                 {
9391                     // Mark the target of a finally return
9392                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9393                 }
9394 #endif // defined(_TARGET_ARM_)
9395
9396                 /* The new block will inherit this block's weight */
9397                 catchStep->setBBWeight(block->bbWeight);
9398                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9399
9400 #ifdef DEBUG
9401                 if (verbose)
9402                 {
9403                     if (stepType == ST_FinallyReturn)
9404                     {
9405                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9406                                "BBJ_ALWAYS block BB%02u\n",
9407                                XTnum, catchStep->bbNum);
9408                     }
9409                     else
9410                     {
9411                         assert(stepType == ST_Catch);
9412                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9413                                "BBJ_ALWAYS block BB%02u\n",
9414                                XTnum, catchStep->bbNum);
9415                     }
9416                 }
9417 #endif // DEBUG
9418
9419                 /* This block is the new step */
9420                 step     = catchStep;
9421                 stepType = ST_Try;
9422
9423                 invalidatePreds = true;
9424             }
9425         }
9426     }
9427
9428     if (step == nullptr)
9429     {
9430         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9431
9432 #ifdef DEBUG
9433         if (verbose)
9434         {
9435             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9436                    "block BB%02u to BBJ_ALWAYS\n",
9437                    block->bbNum);
9438         }
9439 #endif
9440     }
9441     else
9442     {
9443         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9444
9445 #if defined(_TARGET_ARM_)
9446         if (stepType == ST_FinallyReturn)
9447         {
9448             assert(step->bbJumpKind == BBJ_ALWAYS);
9449             // Mark the target of a finally return
9450             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9451         }
9452 #endif // defined(_TARGET_ARM_)
9453
9454 #ifdef DEBUG
9455         if (verbose)
9456         {
9457             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9458         }
9459 #endif
9460
9461         // Queue up the jump target for importing
9462
9463         impImportBlockPending(leaveTarget);
9464     }
9465
9466     if (invalidatePreds && fgComputePredsDone)
9467     {
9468         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9469         fgRemovePreds();
9470     }
9471
9472 #ifdef DEBUG
9473     fgVerifyHandlerTab();
9474
9475     if (verbose)
9476     {
9477         printf("\nAfter import CEE_LEAVE:\n");
9478         fgDispBasicBlocks();
9479         fgDispHandlerTab();
9480     }
9481 #endif // DEBUG
9482 }
9483
9484 #endif // FEATURE_EH_FUNCLETS
9485
9486 /*****************************************************************************/
9487 // This is called when reimporting a leave block. It resets the JumpKind,
9488 // JumpDest, and bbNext to the original values
9489
9490 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9491 {
9492 #if FEATURE_EH_FUNCLETS
9493     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9494     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9495     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9496     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9497     // only predecessor are also considered orphans and attempted to be deleted.
9498     //
9499     //  try  {
9500     //     ....
9501     //     try
9502     //     {
9503     //         ....
9504     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9505     //     } finally { }
9506     //  } finally { }
9507     //  OUTSIDE:
9508     //
9509     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9510     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9511     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9512     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9513     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9514     // will be treated as pair and handled correctly.
9515     if (block->bbJumpKind == BBJ_CALLFINALLY)
9516     {
9517         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9518         dupBlock->bbFlags    = block->bbFlags;
9519         dupBlock->bbJumpDest = block->bbJumpDest;
9520         dupBlock->copyEHRegion(block);
9521         dupBlock->bbCatchTyp = block->bbCatchTyp;
9522
9523         // Mark this block as
9524         //  a) not referenced by any other block to make sure that it gets deleted
9525         //  b) weight zero
9526         //  c) prevent from being imported
9527         //  d) as internal
9528         //  e) as rarely run
9529         dupBlock->bbRefs   = 0;
9530         dupBlock->bbWeight = 0;
9531         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9532
9533         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9534         // will be next to each other.
9535         fgInsertBBafter(block, dupBlock);
9536
9537 #ifdef DEBUG
9538         if (verbose)
9539         {
9540             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9541         }
9542 #endif
9543     }
9544 #endif // FEATURE_EH_FUNCLETS
9545
9546     block->bbJumpKind = BBJ_LEAVE;
9547     fgInitBBLookup();
9548     block->bbJumpDest = fgLookupBB(jmpAddr);
9549
9550     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9551     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9552     // reason we don't want to remove the block at this point is that if we call
9553     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9554     // added and the linked list length will be different than fgBBcount.
9555 }
9556
9557 /*****************************************************************************/
9558 // Get the first non-prefix opcode. Used for verification of valid combinations
9559 // of prefixes and actual opcodes.
9560
9561 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9562 {
9563     while (codeAddr < codeEndp)
9564     {
9565         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9566         codeAddr += sizeof(__int8);
9567
9568         if (opcode == CEE_PREFIX1)
9569         {
9570             if (codeAddr >= codeEndp)
9571             {
9572                 break;
9573             }
9574             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9575             codeAddr += sizeof(__int8);
9576         }
9577
9578         switch (opcode)
9579         {
9580             case CEE_UNALIGNED:
9581             case CEE_VOLATILE:
9582             case CEE_TAILCALL:
9583             case CEE_CONSTRAINED:
9584             case CEE_READONLY:
9585                 break;
9586             default:
9587                 return opcode;
9588         }
9589
9590         codeAddr += opcodeSizes[opcode];
9591     }
9592
9593     return CEE_ILLEGAL;
9594 }
9595
9596 /*****************************************************************************/
9597 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9598
9599 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9600 {
9601     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9602
9603     if (!(
9604             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9605             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9606             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9607             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9608             // volatile. prefix is allowed with the ldsfld and stsfld
9609             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9610     {
9611         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9612     }
9613 }
9614
9615 /*****************************************************************************/
9616
9617 #ifdef DEBUG
9618
9619 #undef RETURN // undef contracts RETURN macro
9620
9621 enum controlFlow_t
9622 {
9623     NEXT,
9624     CALL,
9625     RETURN,
9626     THROW,
9627     BRANCH,
9628     COND_BRANCH,
9629     BREAK,
9630     PHI,
9631     META,
9632 };
9633
9634 const static controlFlow_t controlFlow[] = {
9635 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9636 #include "opcode.def"
9637 #undef OPDEF
9638 };
9639
9640 #endif // DEBUG
9641
9642 /*****************************************************************************
9643  *  Determine the result type of an arithemetic operation
9644  *  On 64-bit inserts upcasts when native int is mixed with int32
9645  */
9646 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9647 {
9648     var_types type = TYP_UNDEF;
9649     GenTree*  op1  = *pOp1;
9650     GenTree*  op2  = *pOp2;
9651
9652     // Arithemetic operations are generally only allowed with
9653     // primitive types, but certain operations are allowed
9654     // with byrefs
9655
9656     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9657     {
9658         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9659         {
9660             // byref1-byref2 => gives a native int
9661             type = TYP_I_IMPL;
9662         }
9663         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9664         {
9665             // [native] int - byref => gives a native int
9666
9667             //
9668             // The reason is that it is possible, in managed C++,
9669             // to have a tree like this:
9670             //
9671             //              -
9672             //             / \
9673             //            /   \
9674             //           /     \
9675             //          /       \
9676             // const(h) int     addr byref
9677             //
9678             // <BUGNUM> VSW 318822 </BUGNUM>
9679             //
9680             // So here we decide to make the resulting type to be a native int.
9681             CLANG_FORMAT_COMMENT_ANCHOR;
9682
9683 #ifdef _TARGET_64BIT_
9684             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9685             {
9686                 // insert an explicit upcast
9687                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9688             }
9689 #endif // _TARGET_64BIT_
9690
9691             type = TYP_I_IMPL;
9692         }
9693         else
9694         {
9695             // byref - [native] int => gives a byref
9696             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9697
9698 #ifdef _TARGET_64BIT_
9699             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9700             {
9701                 // insert an explicit upcast
9702                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9703             }
9704 #endif // _TARGET_64BIT_
9705
9706             type = TYP_BYREF;
9707         }
9708     }
9709     else if ((oper == GT_ADD) &&
9710              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9711     {
9712         // byref + [native] int => gives a byref
9713         // (or)
9714         // [native] int + byref => gives a byref
9715
9716         // only one can be a byref : byref op byref not allowed
9717         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9718         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9719
9720 #ifdef _TARGET_64BIT_
9721         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9722         {
9723             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9724             {
9725                 // insert an explicit upcast
9726                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9727             }
9728         }
9729         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9730         {
9731             // insert an explicit upcast
9732             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9733         }
9734 #endif // _TARGET_64BIT_
9735
9736         type = TYP_BYREF;
9737     }
9738 #ifdef _TARGET_64BIT_
9739     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9740     {
9741         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9742
9743         // int + long => gives long
9744         // long + int => gives long
9745         // we get this because in the IL the long isn't Int64, it's just IntPtr
9746
9747         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9748         {
9749             // insert an explicit upcast
9750             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9751         }
9752         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9753         {
9754             // insert an explicit upcast
9755             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9756         }
9757
9758         type = TYP_I_IMPL;
9759     }
9760 #else  // 32-bit TARGET
9761     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9762     {
9763         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9764
9765         // int + long => gives long
9766         // long + int => gives long
9767
9768         type = TYP_LONG;
9769     }
9770 #endif // _TARGET_64BIT_
9771     else
9772     {
9773         // int + int => gives an int
9774         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9775
9776         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9777                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9778
9779         type = genActualType(op1->gtType);
9780
9781 #if FEATURE_X87_DOUBLES
9782
9783         // For x87, since we only have 1 size of registers, prefer double
9784         // For everybody else, be more precise
9785         if (type == TYP_FLOAT)
9786             type = TYP_DOUBLE;
9787
9788 #else // !FEATURE_X87_DOUBLES
9789
9790         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9791         // Otherwise, turn floats into doubles
9792         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9793         {
9794             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9795             type = TYP_DOUBLE;
9796         }
9797
9798 #endif // FEATURE_X87_DOUBLES
9799     }
9800
9801 #if FEATURE_X87_DOUBLES
9802     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9803 #else  // FEATURE_X87_DOUBLES
9804     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9805 #endif // FEATURE_X87_DOUBLES
9806
9807     return type;
9808 }
9809
9810 //------------------------------------------------------------------------
9811 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9812 //
9813 // Arguments:
9814 //   op1 - value to cast
9815 //   pResolvedToken - resolved token for type to cast to
9816 //   isCastClass - true if this is a castclass, false if isinst
9817 //
9818 // Return Value:
9819 //   tree representing optimized cast, or null if no optimization possible
9820
9821 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9822 {
9823     assert(op1->TypeGet() == TYP_REF);
9824
9825     // Don't optimize for minopts or debug codegen.
9826     if (opts.compDbgCode || opts.MinOpts())
9827     {
9828         return nullptr;
9829     }
9830
9831     // See what we know about the type of the object being cast.
9832     bool                 isExact   = false;
9833     bool                 isNonNull = false;
9834     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9835     GenTree*             optResult = nullptr;
9836
9837     if (fromClass != nullptr)
9838     {
9839         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9840         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9841                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9842                 info.compCompHnd->getClassName(toClass));
9843
9844         // Perhaps we know if the cast will succeed or fail.
9845         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9846
9847         if (castResult == TypeCompareState::Must)
9848         {
9849             // Cast will succeed, result is simply op1.
9850             JITDUMP("Cast will succeed, optimizing to simply return input\n");
9851             return op1;
9852         }
9853         else if (castResult == TypeCompareState::MustNot)
9854         {
9855             // See if we can sharpen exactness by looking for final classes
9856             if (!isExact)
9857             {
9858                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
9859                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9860                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9861                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9862             }
9863
9864             // Cast to exact type will fail. Handle case where we have
9865             // an exact type (that is, fromClass is not a subtype)
9866             // and we're not going to throw on failure.
9867             if (isExact && !isCastClass)
9868             {
9869                 JITDUMP("Cast will fail, optimizing to return null\n");
9870                 GenTree* result = gtNewIconNode(0, TYP_REF);
9871
9872                 // If the cast was fed by a box, we can remove that too.
9873                 if (op1->IsBoxedValue())
9874                 {
9875                     JITDUMP("Also removing upstream box\n");
9876                     gtTryRemoveBoxUpstreamEffects(op1);
9877                 }
9878
9879                 return result;
9880             }
9881             else if (isExact)
9882             {
9883                 JITDUMP("Not optimizing failing castclass (yet)\n");
9884             }
9885             else
9886             {
9887                 JITDUMP("Can't optimize since fromClass is inexact\n");
9888             }
9889         }
9890         else
9891         {
9892             JITDUMP("Result of cast unknown, must generate runtime test\n");
9893         }
9894     }
9895     else
9896     {
9897         JITDUMP("\nCan't optimize since fromClass is unknown\n");
9898     }
9899
9900     return nullptr;
9901 }
9902
9903 //------------------------------------------------------------------------
9904 // impCastClassOrIsInstToTree: build and import castclass/isinst
9905 //
9906 // Arguments:
9907 //   op1 - value to cast
9908 //   op2 - type handle for type to cast to
9909 //   pResolvedToken - resolved token from the cast operation
9910 //   isCastClass - true if this is castclass, false means isinst
9911 //
9912 // Return Value:
9913 //   Tree representing the cast
9914 //
9915 // Notes:
9916 //   May expand into a series of runtime checks or a helper call.
9917
9918 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
9919                                               GenTree*                op2,
9920                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
9921                                               bool                    isCastClass)
9922 {
9923     assert(op1->TypeGet() == TYP_REF);
9924
9925     // Optimistically assume the jit should expand this as an inline test
9926     bool shouldExpandInline = true;
9927
9928     // Profitability check.
9929     //
9930     // Don't bother with inline expansion when jit is trying to
9931     // generate code quickly, or the cast is in code that won't run very
9932     // often, or the method already is pretty big.
9933     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9934     {
9935         // not worth the code expansion if jitting fast or in a rarely run block
9936         shouldExpandInline = false;
9937     }
9938     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9939     {
9940         // not worth creating an untracked local variable
9941         shouldExpandInline = false;
9942     }
9943
9944     // Pessimistically assume the jit cannot expand this as an inline test
9945     bool                  canExpandInline = false;
9946     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9947
9948     // Legality check.
9949     //
9950     // Not all classclass/isinst operations can be inline expanded.
9951     // Check legality only if an inline expansion is desirable.
9952     if (shouldExpandInline)
9953     {
9954         if (isCastClass)
9955         {
9956             // Jit can only inline expand the normal CHKCASTCLASS helper.
9957             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9958         }
9959         else
9960         {
9961             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9962             {
9963                 // Check the class attributes.
9964                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9965
9966                 // If the class is final and is not marshal byref or
9967                 // contextful, the jit can expand the IsInst check inline.
9968                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9969                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9970             }
9971         }
9972     }
9973
9974     const bool expandInline = canExpandInline && shouldExpandInline;
9975
9976     if (!expandInline)
9977     {
9978         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9979                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9980
9981         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9982         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9983         //
9984         op2->gtFlags |= GTF_DONT_CSE;
9985
9986         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9987     }
9988
9989     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9990
9991     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9992
9993     GenTree* temp;
9994     GenTree* condMT;
9995     //
9996     // expand the methodtable match:
9997     //
9998     //  condMT ==>   GT_NE
9999     //               /    \
10000     //           GT_IND   op2 (typically CNS_INT)
10001     //              |
10002     //           op1Copy
10003     //
10004
10005     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10006     //
10007     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10008     //
10009     // op1 is now known to be a non-complex tree
10010     // thus we can use gtClone(op1) from now on
10011     //
10012
10013     GenTree* op2Var = op2;
10014     if (isCastClass)
10015     {
10016         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10017         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10018     }
10019     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10020     temp->gtFlags |= GTF_EXCEPT;
10021     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10022
10023     GenTree* condNull;
10024     //
10025     // expand the null check:
10026     //
10027     //  condNull ==>   GT_EQ
10028     //                 /    \
10029     //             op1Copy CNS_INT
10030     //                      null
10031     //
10032     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10033
10034     //
10035     // expand the true and false trees for the condMT
10036     //
10037     GenTree* condFalse = gtClone(op1);
10038     GenTree* condTrue;
10039     if (isCastClass)
10040     {
10041         //
10042         // use the special helper that skips the cases checked by our inlined cast
10043         //
10044         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10045
10046         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10047     }
10048     else
10049     {
10050         condTrue = gtNewIconNode(0, TYP_REF);
10051     }
10052
10053 #define USE_QMARK_TREES
10054
10055 #ifdef USE_QMARK_TREES
10056     GenTree* qmarkMT;
10057     //
10058     // Generate first QMARK - COLON tree
10059     //
10060     //  qmarkMT ==>   GT_QMARK
10061     //                 /     \
10062     //            condMT   GT_COLON
10063     //                      /     \
10064     //                condFalse  condTrue
10065     //
10066     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10067     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10068     condMT->gtFlags |= GTF_RELOP_QMARK;
10069
10070     GenTree* qmarkNull;
10071     //
10072     // Generate second QMARK - COLON tree
10073     //
10074     //  qmarkNull ==>  GT_QMARK
10075     //                 /     \
10076     //           condNull  GT_COLON
10077     //                      /     \
10078     //                qmarkMT   op1Copy
10079     //
10080     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10081     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10082     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10083     condNull->gtFlags |= GTF_RELOP_QMARK;
10084
10085     // Make QMark node a top level node by spilling it.
10086     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10087     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10088
10089     // TODO: Is it possible op1 has a better type?
10090     lvaSetClass(tmp, pResolvedToken->hClass);
10091     return gtNewLclvNode(tmp, TYP_REF);
10092 #endif
10093 }
10094
10095 #ifndef DEBUG
10096 #define assertImp(cond) ((void)0)
10097 #else
10098 #define assertImp(cond)                                                                                                \
10099     do                                                                                                                 \
10100     {                                                                                                                  \
10101         if (!(cond))                                                                                                   \
10102         {                                                                                                              \
10103             const int cchAssertImpBuf = 600;                                                                           \
10104             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10105             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10106                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10107                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10108                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10109             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10110         }                                                                                                              \
10111     } while (0)
10112 #endif // DEBUG
10113
10114 #ifdef _PREFAST_
10115 #pragma warning(push)
10116 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10117 #endif
10118 /*****************************************************************************
10119  *  Import the instr for the given basic block
10120  */
10121 void Compiler::impImportBlockCode(BasicBlock* block)
10122 {
10123 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10124
10125 #ifdef DEBUG
10126
10127     if (verbose)
10128     {
10129         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10130     }
10131 #endif
10132
10133     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10134     IL_OFFSET nxtStmtOffs;
10135
10136     GenTree*                     arrayNodeFrom;
10137     GenTree*                     arrayNodeTo;
10138     GenTree*                     arrayNodeToIndex;
10139     CorInfoHelpFunc              helper;
10140     CorInfoIsAccessAllowedResult accessAllowedResult;
10141     CORINFO_HELPER_DESC          calloutHelper;
10142     const BYTE*                  lastLoadToken = nullptr;
10143
10144     // reject cyclic constraints
10145     if (tiVerificationNeeded)
10146     {
10147         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10148         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10149     }
10150
10151     /* Get the tree list started */
10152
10153     impBeginTreeList();
10154
10155     /* Walk the opcodes that comprise the basic block */
10156
10157     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10158     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10159
10160     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10161     IL_OFFSET lastSpillOffs = opcodeOffs;
10162
10163     signed jmpDist;
10164
10165     /* remember the start of the delegate creation sequence (used for verification) */
10166     const BYTE* delegateCreateStart = nullptr;
10167
10168     int  prefixFlags = 0;
10169     bool explicitTailCall, constraintCall, readonlyCall;
10170
10171     typeInfo tiRetVal;
10172
10173     unsigned numArgs = info.compArgsCount;
10174
10175     /* Now process all the opcodes in the block */
10176
10177     var_types callTyp    = TYP_COUNT;
10178     OPCODE    prevOpcode = CEE_ILLEGAL;
10179
10180     if (block->bbCatchTyp)
10181     {
10182         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10183         {
10184             impCurStmtOffsSet(block->bbCodeOffs);
10185         }
10186
10187         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10188         // to a temp. This is a trade off for code simplicity
10189         impSpillSpecialSideEff();
10190     }
10191
10192     while (codeAddr < codeEndp)
10193     {
10194         bool                   usingReadyToRunHelper = false;
10195         CORINFO_RESOLVED_TOKEN resolvedToken;
10196         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10197         CORINFO_CALL_INFO      callInfo;
10198         CORINFO_FIELD_INFO     fieldInfo;
10199
10200         tiRetVal = typeInfo(); // Default type info
10201
10202         //---------------------------------------------------------------------
10203
10204         /* We need to restrict the max tree depth as many of the Compiler
10205            functions are recursive. We do this by spilling the stack */
10206
10207         if (verCurrentState.esStackDepth)
10208         {
10209             /* Has it been a while since we last saw a non-empty stack (which
10210                guarantees that the tree depth isnt accumulating. */
10211
10212             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10213             {
10214                 impSpillStackEnsure();
10215                 lastSpillOffs = opcodeOffs;
10216             }
10217         }
10218         else
10219         {
10220             lastSpillOffs   = opcodeOffs;
10221             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10222         }
10223
10224         /* Compute the current instr offset */
10225
10226         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10227
10228 #ifndef DEBUG
10229         if (opts.compDbgInfo)
10230 #endif
10231         {
10232             if (!compIsForInlining())
10233             {
10234                 nxtStmtOffs =
10235                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10236
10237                 /* Have we reached the next stmt boundary ? */
10238
10239                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10240                 {
10241                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10242
10243                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10244                     {
10245                         /* We need to provide accurate IP-mapping at this point.
10246                            So spill anything on the stack so that it will form
10247                            gtStmts with the correct stmt offset noted */
10248
10249                         impSpillStackEnsure(true);
10250                     }
10251
10252                     // Has impCurStmtOffs been reported in any tree?
10253
10254                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10255                     {
10256                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10257                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10258
10259                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10260                     }
10261
10262                     if (impCurStmtOffs == BAD_IL_OFFSET)
10263                     {
10264                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10265                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10266
10267                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10268                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10269                         {
10270                             nxtStmtIndex++;
10271                         }
10272
10273                         /* Go to the new stmt */
10274
10275                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10276
10277                         /* Update the stmt boundary index */
10278
10279                         nxtStmtIndex++;
10280                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10281
10282                         /* Are there any more line# entries after this one? */
10283
10284                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10285                         {
10286                             /* Remember where the next line# starts */
10287
10288                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10289                         }
10290                         else
10291                         {
10292                             /* No more line# entries */
10293
10294                             nxtStmtOffs = BAD_IL_OFFSET;
10295                         }
10296                     }
10297                 }
10298                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10299                          (verCurrentState.esStackDepth == 0))
10300                 {
10301                     /* At stack-empty locations, we have already added the tree to
10302                        the stmt list with the last offset. We just need to update
10303                        impCurStmtOffs
10304                      */
10305
10306                     impCurStmtOffsSet(opcodeOffs);
10307                 }
10308                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10309                          impOpcodeIsCallSiteBoundary(prevOpcode))
10310                 {
10311                     /* Make sure we have a type cached */
10312                     assert(callTyp != TYP_COUNT);
10313
10314                     if (callTyp == TYP_VOID)
10315                     {
10316                         impCurStmtOffsSet(opcodeOffs);
10317                     }
10318                     else if (opts.compDbgCode)
10319                     {
10320                         impSpillStackEnsure(true);
10321                         impCurStmtOffsSet(opcodeOffs);
10322                     }
10323                 }
10324                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10325                 {
10326                     if (opts.compDbgCode)
10327                     {
10328                         impSpillStackEnsure(true);
10329                     }
10330
10331                     impCurStmtOffsSet(opcodeOffs);
10332                 }
10333
10334                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10335                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10336             }
10337         }
10338
10339         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10340         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10341         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10342
10343         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10344         GenTree*        op1           = DUMMY_INIT(NULL);
10345         GenTree*        op2           = DUMMY_INIT(NULL);
10346         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10347         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10348         bool            uns           = DUMMY_INIT(false);
10349         bool            isLocal       = false;
10350
10351         /* Get the next opcode and the size of its parameters */
10352
10353         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10354         codeAddr += sizeof(__int8);
10355
10356 #ifdef DEBUG
10357         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10358         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10359 #endif
10360
10361     DECODE_OPCODE:
10362
10363         // Return if any previous code has caused inline to fail.
10364         if (compDonotInline())
10365         {
10366             return;
10367         }
10368
10369         /* Get the size of additional parameters */
10370
10371         signed int sz = opcodeSizes[opcode];
10372
10373 #ifdef DEBUG
10374         clsHnd  = NO_CLASS_HANDLE;
10375         lclTyp  = TYP_COUNT;
10376         callTyp = TYP_COUNT;
10377
10378         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10379         impCurOpcName = opcodeNames[opcode];
10380
10381         if (verbose && (opcode != CEE_PREFIX1))
10382         {
10383             printf("%s", impCurOpcName);
10384         }
10385
10386         /* Use assertImp() to display the opcode */
10387
10388         op1 = op2 = nullptr;
10389 #endif
10390
10391         /* See what kind of an opcode we have, then */
10392
10393         unsigned mflags   = 0;
10394         unsigned clsFlags = 0;
10395
10396         switch (opcode)
10397         {
10398             unsigned  lclNum;
10399             var_types type;
10400
10401             GenTree*   op3;
10402             genTreeOps oper;
10403             unsigned   size;
10404
10405             int val;
10406
10407             CORINFO_SIG_INFO     sig;
10408             IL_OFFSET            jmpAddr;
10409             bool                 ovfl, unordered, callNode;
10410             bool                 ldstruct;
10411             CORINFO_CLASS_HANDLE tokenType;
10412
10413             union {
10414                 int     intVal;
10415                 float   fltVal;
10416                 __int64 lngVal;
10417                 double  dblVal;
10418             } cval;
10419
10420             case CEE_PREFIX1:
10421                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10422                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10423                 codeAddr += sizeof(__int8);
10424                 goto DECODE_OPCODE;
10425
10426             SPILL_APPEND:
10427
10428                 // We need to call impSpillLclRefs() for a struct type lclVar.
10429                 // This is done for non-block assignments in the handling of stloc.
10430                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10431                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10432                 {
10433                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10434                 }
10435
10436                 /* Append 'op1' to the list of statements */
10437                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10438                 goto DONE_APPEND;
10439
10440             APPEND:
10441
10442                 /* Append 'op1' to the list of statements */
10443
10444                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10445                 goto DONE_APPEND;
10446
10447             DONE_APPEND:
10448
10449 #ifdef DEBUG
10450                 // Remember at which BC offset the tree was finished
10451                 impNoteLastILoffs();
10452 #endif
10453                 break;
10454
10455             case CEE_LDNULL:
10456                 impPushNullObjRefOnStack();
10457                 break;
10458
10459             case CEE_LDC_I4_M1:
10460             case CEE_LDC_I4_0:
10461             case CEE_LDC_I4_1:
10462             case CEE_LDC_I4_2:
10463             case CEE_LDC_I4_3:
10464             case CEE_LDC_I4_4:
10465             case CEE_LDC_I4_5:
10466             case CEE_LDC_I4_6:
10467             case CEE_LDC_I4_7:
10468             case CEE_LDC_I4_8:
10469                 cval.intVal = (opcode - CEE_LDC_I4_0);
10470                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10471                 goto PUSH_I4CON;
10472
10473             case CEE_LDC_I4_S:
10474                 cval.intVal = getI1LittleEndian(codeAddr);
10475                 goto PUSH_I4CON;
10476             case CEE_LDC_I4:
10477                 cval.intVal = getI4LittleEndian(codeAddr);
10478                 goto PUSH_I4CON;
10479             PUSH_I4CON:
10480                 JITDUMP(" %d", cval.intVal);
10481                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10482                 break;
10483
10484             case CEE_LDC_I8:
10485                 cval.lngVal = getI8LittleEndian(codeAddr);
10486                 JITDUMP(" 0x%016llx", cval.lngVal);
10487                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10488                 break;
10489
10490             case CEE_LDC_R8:
10491                 cval.dblVal = getR8LittleEndian(codeAddr);
10492                 JITDUMP(" %#.17g", cval.dblVal);
10493                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10494                 break;
10495
10496             case CEE_LDC_R4:
10497                 cval.dblVal = getR4LittleEndian(codeAddr);
10498                 JITDUMP(" %#.17g", cval.dblVal);
10499                 {
10500                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10501 #if !FEATURE_X87_DOUBLES
10502                     // X87 stack doesn't differentiate between float/double
10503                     // so R4 is treated as R8, but everybody else does
10504                     cnsOp->gtType = TYP_FLOAT;
10505 #endif // FEATURE_X87_DOUBLES
10506                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10507                 }
10508                 break;
10509
10510             case CEE_LDSTR:
10511
10512                 if (compIsForInlining())
10513                 {
10514                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10515                     {
10516                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10517                         return;
10518                     }
10519                 }
10520
10521                 val = getU4LittleEndian(codeAddr);
10522                 JITDUMP(" %08X", val);
10523                 if (tiVerificationNeeded)
10524                 {
10525                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10526                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10527                 }
10528                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10529
10530                 break;
10531
10532             case CEE_LDARG:
10533                 lclNum = getU2LittleEndian(codeAddr);
10534                 JITDUMP(" %u", lclNum);
10535                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10536                 break;
10537
10538             case CEE_LDARG_S:
10539                 lclNum = getU1LittleEndian(codeAddr);
10540                 JITDUMP(" %u", lclNum);
10541                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10542                 break;
10543
10544             case CEE_LDARG_0:
10545             case CEE_LDARG_1:
10546             case CEE_LDARG_2:
10547             case CEE_LDARG_3:
10548                 lclNum = (opcode - CEE_LDARG_0);
10549                 assert(lclNum >= 0 && lclNum < 4);
10550                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10551                 break;
10552
10553             case CEE_LDLOC:
10554                 lclNum = getU2LittleEndian(codeAddr);
10555                 JITDUMP(" %u", lclNum);
10556                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10557                 break;
10558
10559             case CEE_LDLOC_S:
10560                 lclNum = getU1LittleEndian(codeAddr);
10561                 JITDUMP(" %u", lclNum);
10562                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10563                 break;
10564
10565             case CEE_LDLOC_0:
10566             case CEE_LDLOC_1:
10567             case CEE_LDLOC_2:
10568             case CEE_LDLOC_3:
10569                 lclNum = (opcode - CEE_LDLOC_0);
10570                 assert(lclNum >= 0 && lclNum < 4);
10571                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10572                 break;
10573
10574             case CEE_STARG:
10575                 lclNum = getU2LittleEndian(codeAddr);
10576                 goto STARG;
10577
10578             case CEE_STARG_S:
10579                 lclNum = getU1LittleEndian(codeAddr);
10580             STARG:
10581                 JITDUMP(" %u", lclNum);
10582
10583                 if (tiVerificationNeeded)
10584                 {
10585                     Verify(lclNum < info.compILargsCount, "bad arg num");
10586                 }
10587
10588                 if (compIsForInlining())
10589                 {
10590                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10591                     noway_assert(op1->gtOper == GT_LCL_VAR);
10592                     lclNum = op1->AsLclVar()->gtLclNum;
10593
10594                     goto VAR_ST_VALID;
10595                 }
10596
10597                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10598                 assertImp(lclNum < numArgs);
10599
10600                 if (lclNum == info.compThisArg)
10601                 {
10602                     lclNum = lvaArg0Var;
10603                 }
10604
10605                 // We should have seen this arg write in the prescan
10606                 assert(lvaTable[lclNum].lvHasILStoreOp);
10607
10608                 if (tiVerificationNeeded)
10609                 {
10610                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10611                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10612                            "type mismatch");
10613
10614                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10615                     {
10616                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10617                     }
10618                 }
10619
10620                 goto VAR_ST;
10621
10622             case CEE_STLOC:
10623                 lclNum  = getU2LittleEndian(codeAddr);
10624                 isLocal = true;
10625                 JITDUMP(" %u", lclNum);
10626                 goto LOC_ST;
10627
10628             case CEE_STLOC_S:
10629                 lclNum  = getU1LittleEndian(codeAddr);
10630                 isLocal = true;
10631                 JITDUMP(" %u", lclNum);
10632                 goto LOC_ST;
10633
10634             case CEE_STLOC_0:
10635             case CEE_STLOC_1:
10636             case CEE_STLOC_2:
10637             case CEE_STLOC_3:
10638                 isLocal = true;
10639                 lclNum  = (opcode - CEE_STLOC_0);
10640                 assert(lclNum >= 0 && lclNum < 4);
10641
10642             LOC_ST:
10643                 if (tiVerificationNeeded)
10644                 {
10645                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10646                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10647                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10648                            "type mismatch");
10649                 }
10650
10651                 if (compIsForInlining())
10652                 {
10653                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10654
10655                     /* Have we allocated a temp for this local? */
10656
10657                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10658
10659                     goto _PopValue;
10660                 }
10661
10662                 lclNum += numArgs;
10663
10664             VAR_ST:
10665
10666                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10667                 {
10668                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10669                     BADCODE("Bad IL");
10670                 }
10671
10672             VAR_ST_VALID:
10673
10674                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10675                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10676
10677                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10678                 {
10679                     lclTyp = lvaGetRealType(lclNum);
10680                 }
10681                 else
10682                 {
10683                     lclTyp = lvaGetActualType(lclNum);
10684                 }
10685
10686             _PopValue:
10687                 /* Pop the value being assigned */
10688
10689                 {
10690                     StackEntry se = impPopStack();
10691                     clsHnd        = se.seTypeInfo.GetClassHandle();
10692                     op1           = se.val;
10693                     tiRetVal      = se.seTypeInfo;
10694                 }
10695
10696 #ifdef FEATURE_SIMD
10697                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10698                 {
10699                     assert(op1->TypeGet() == TYP_STRUCT);
10700                     op1->gtType = lclTyp;
10701                 }
10702 #endif // FEATURE_SIMD
10703
10704                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10705
10706 #ifdef _TARGET_64BIT_
10707                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10708                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10709                 {
10710                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10711                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10712                 }
10713 #endif // _TARGET_64BIT_
10714
10715                 // We had better assign it a value of the correct type
10716                 assertImp(
10717                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10718                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10719                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10720                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10721                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10722                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10723
10724                 /* If op1 is "&var" then its type is the transient "*" and it can
10725                    be used either as TYP_BYREF or TYP_I_IMPL */
10726
10727                 if (op1->IsVarAddr())
10728                 {
10729                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10730
10731                     /* When "&var" is created, we assume it is a byref. If it is
10732                        being assigned to a TYP_I_IMPL var, change the type to
10733                        prevent unnecessary GC info */
10734
10735                     if (genActualType(lclTyp) == TYP_I_IMPL)
10736                     {
10737                         op1->gtType = TYP_I_IMPL;
10738                     }
10739                 }
10740
10741                 // If this is a local and the local is a ref type, see
10742                 // if we can improve type information based on the
10743                 // value being assigned.
10744                 if (isLocal && (lclTyp == TYP_REF))
10745                 {
10746                     // We should have seen a stloc in our IL prescan.
10747                     assert(lvaTable[lclNum].lvHasILStoreOp);
10748
10749                     const bool isSingleILStoreLocal =
10750                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10751
10752                     // Conservative check that there is just one
10753                     // definition that reaches this store.
10754                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10755
10756                     if (isSingleILStoreLocal && hasSingleReachingDef)
10757                     {
10758                         lvaUpdateClass(lclNum, op1, clsHnd);
10759                     }
10760                 }
10761
10762                 /* Filter out simple assignments to itself */
10763
10764                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10765                 {
10766                     if (opts.compDbgCode)
10767                     {
10768                         op1 = gtNewNothingNode();
10769                         goto SPILL_APPEND;
10770                     }
10771                     else
10772                     {
10773                         break;
10774                     }
10775                 }
10776
10777                 /* Create the assignment node */
10778
10779                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10780
10781                 /* If the local is aliased or pinned, we need to spill calls and
10782                    indirections from the stack. */
10783
10784                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10785                     (verCurrentState.esStackDepth > 0))
10786                 {
10787                     impSpillSideEffects(false,
10788                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10789                 }
10790
10791                 /* Spill any refs to the local from the stack */
10792
10793                 impSpillLclRefs(lclNum);
10794
10795 #if !FEATURE_X87_DOUBLES
10796                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10797                 // We insert a cast to the dest 'op2' type
10798                 //
10799                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10800                     varTypeIsFloating(op2->gtType))
10801                 {
10802                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10803                 }
10804 #endif // !FEATURE_X87_DOUBLES
10805
10806                 if (varTypeIsStruct(lclTyp))
10807                 {
10808                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10809                 }
10810                 else
10811                 {
10812                     // The code generator generates GC tracking information
10813                     // based on the RHS of the assignment.  Later the LHS (which is
10814                     // is a BYREF) gets used and the emitter checks that that variable
10815                     // is being tracked.  It is not (since the RHS was an int and did
10816                     // not need tracking).  To keep this assert happy, we change the RHS
10817                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10818                     {
10819                         op1->gtType = TYP_BYREF;
10820                     }
10821                     op1 = gtNewAssignNode(op2, op1);
10822                 }
10823
10824                 goto SPILL_APPEND;
10825
10826             case CEE_LDLOCA:
10827                 lclNum = getU2LittleEndian(codeAddr);
10828                 goto LDLOCA;
10829
10830             case CEE_LDLOCA_S:
10831                 lclNum = getU1LittleEndian(codeAddr);
10832             LDLOCA:
10833                 JITDUMP(" %u", lclNum);
10834                 if (tiVerificationNeeded)
10835                 {
10836                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10837                     Verify(info.compInitMem, "initLocals not set");
10838                 }
10839
10840                 if (compIsForInlining())
10841                 {
10842                     // Get the local type
10843                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10844
10845                     /* Have we allocated a temp for this local? */
10846
10847                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10848
10849                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10850
10851                     goto _PUSH_ADRVAR;
10852                 }
10853
10854                 lclNum += numArgs;
10855                 assertImp(lclNum < info.compLocalsCount);
10856                 goto ADRVAR;
10857
10858             case CEE_LDARGA:
10859                 lclNum = getU2LittleEndian(codeAddr);
10860                 goto LDARGA;
10861
10862             case CEE_LDARGA_S:
10863                 lclNum = getU1LittleEndian(codeAddr);
10864             LDARGA:
10865                 JITDUMP(" %u", lclNum);
10866                 Verify(lclNum < info.compILargsCount, "bad arg num");
10867
10868                 if (compIsForInlining())
10869                 {
10870                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10871                     // followed by a ldfld to load the field.
10872
10873                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10874                     if (op1->gtOper != GT_LCL_VAR)
10875                     {
10876                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10877                         return;
10878                     }
10879
10880                     assert(op1->gtOper == GT_LCL_VAR);
10881
10882                     goto _PUSH_ADRVAR;
10883                 }
10884
10885                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10886                 assertImp(lclNum < numArgs);
10887
10888                 if (lclNum == info.compThisArg)
10889                 {
10890                     lclNum = lvaArg0Var;
10891                 }
10892
10893                 goto ADRVAR;
10894
10895             ADRVAR:
10896
10897                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10898
10899             _PUSH_ADRVAR:
10900                 assert(op1->gtOper == GT_LCL_VAR);
10901
10902                 /* Note that this is supposed to create the transient type "*"
10903                    which may be used as a TYP_I_IMPL. However we catch places
10904                    where it is used as a TYP_I_IMPL and change the node if needed.
10905                    Thus we are pessimistic and may report byrefs in the GC info
10906                    where it was not absolutely needed, but it is safer this way.
10907                  */
10908                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10909
10910                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10911                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10912
10913                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10914                 if (tiVerificationNeeded)
10915                 {
10916                     // Don't allow taking address of uninit this ptr.
10917                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10918                     {
10919                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10920                     }
10921
10922                     if (!tiRetVal.IsByRef())
10923                     {
10924                         tiRetVal.MakeByRef();
10925                     }
10926                     else
10927                     {
10928                         Verify(false, "byref to byref");
10929                     }
10930                 }
10931
10932                 impPushOnStack(op1, tiRetVal);
10933                 break;
10934
10935             case CEE_ARGLIST:
10936
10937                 if (!info.compIsVarArgs)
10938                 {
10939                     BADCODE("arglist in non-vararg method");
10940                 }
10941
10942                 if (tiVerificationNeeded)
10943                 {
10944                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10945                 }
10946                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10947
10948                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10949                    adjusted the arg count cos this is like fetching the last param */
10950                 assertImp(0 < numArgs);
10951                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10952                 lclNum = lvaVarargsHandleArg;
10953                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10954                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10955                 impPushOnStack(op1, tiRetVal);
10956                 break;
10957
10958             case CEE_ENDFINALLY:
10959
10960                 if (compIsForInlining())
10961                 {
10962                     assert(!"Shouldn't have exception handlers in the inliner!");
10963                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10964                     return;
10965                 }
10966
10967                 if (verCurrentState.esStackDepth > 0)
10968                 {
10969                     impEvalSideEffects();
10970                 }
10971
10972                 if (info.compXcptnsCount == 0)
10973                 {
10974                     BADCODE("endfinally outside finally");
10975                 }
10976
10977                 assert(verCurrentState.esStackDepth == 0);
10978
10979                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10980                 goto APPEND;
10981
10982             case CEE_ENDFILTER:
10983
10984                 if (compIsForInlining())
10985                 {
10986                     assert(!"Shouldn't have exception handlers in the inliner!");
10987                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10988                     return;
10989                 }
10990
10991                 block->bbSetRunRarely(); // filters are rare
10992
10993                 if (info.compXcptnsCount == 0)
10994                 {
10995                     BADCODE("endfilter outside filter");
10996                 }
10997
10998                 if (tiVerificationNeeded)
10999                 {
11000                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11001                 }
11002
11003                 op1 = impPopStack().val;
11004                 assertImp(op1->gtType == TYP_INT);
11005                 if (!bbInFilterILRange(block))
11006                 {
11007                     BADCODE("EndFilter outside a filter handler");
11008                 }
11009
11010                 /* Mark current bb as end of filter */
11011
11012                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11013                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11014
11015                 /* Mark catch handler as successor */
11016
11017                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11018                 if (verCurrentState.esStackDepth != 0)
11019                 {
11020                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11021                                                 DEBUGARG(__LINE__));
11022                 }
11023                 goto APPEND;
11024
11025             case CEE_RET:
11026                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11027             RET:
11028                 if (!impReturnInstruction(block, prefixFlags, opcode))
11029                 {
11030                     return; // abort
11031                 }
11032                 else
11033                 {
11034                     break;
11035                 }
11036
11037             case CEE_JMP:
11038
11039                 assert(!compIsForInlining());
11040
11041                 if (tiVerificationNeeded)
11042                 {
11043                     Verify(false, "Invalid opcode: CEE_JMP");
11044                 }
11045
11046                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11047                 {
11048                     /* CEE_JMP does not make sense in some "protected" regions. */
11049
11050                     BADCODE("Jmp not allowed in protected region");
11051                 }
11052
11053                 if (verCurrentState.esStackDepth != 0)
11054                 {
11055                     BADCODE("Stack must be empty after CEE_JMPs");
11056                 }
11057
11058                 _impResolveToken(CORINFO_TOKENKIND_Method);
11059
11060                 JITDUMP(" %08X", resolvedToken.token);
11061
11062                 /* The signature of the target has to be identical to ours.
11063                    At least check that argCnt and returnType match */
11064
11065                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11066                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11067                     sig.retType != info.compMethodInfo->args.retType ||
11068                     sig.callConv != info.compMethodInfo->args.callConv)
11069                 {
11070                     BADCODE("Incompatible target for CEE_JMPs");
11071                 }
11072
11073                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11074
11075                 /* Mark the basic block as being a JUMP instead of RETURN */
11076
11077                 block->bbFlags |= BBF_HAS_JMP;
11078
11079                 /* Set this flag to make sure register arguments have a location assigned
11080                  * even if we don't use them inside the method */
11081
11082                 compJmpOpUsed = true;
11083
11084                 fgNoStructPromotion = true;
11085
11086                 goto APPEND;
11087
11088             case CEE_LDELEMA:
11089                 assertImp(sz == sizeof(unsigned));
11090
11091                 _impResolveToken(CORINFO_TOKENKIND_Class);
11092
11093                 JITDUMP(" %08X", resolvedToken.token);
11094
11095                 ldelemClsHnd = resolvedToken.hClass;
11096
11097                 if (tiVerificationNeeded)
11098                 {
11099                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11100                     typeInfo tiIndex = impStackTop().seTypeInfo;
11101
11102                     // As per ECMA 'index' specified can be either int32 or native int.
11103                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11104
11105                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11106                     Verify(tiArray.IsNullObjRef() ||
11107                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11108                            "bad array");
11109
11110                     tiRetVal = arrayElemType;
11111                     tiRetVal.MakeByRef();
11112                     if (prefixFlags & PREFIX_READONLY)
11113                     {
11114                         tiRetVal.SetIsReadonlyByRef();
11115                     }
11116
11117                     // an array interior pointer is always in the heap
11118                     tiRetVal.SetIsPermanentHomeByRef();
11119                 }
11120
11121                 // If it's a value class array we just do a simple address-of
11122                 if (eeIsValueClass(ldelemClsHnd))
11123                 {
11124                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11125                     if (cit == CORINFO_TYPE_UNDEF)
11126                     {
11127                         lclTyp = TYP_STRUCT;
11128                     }
11129                     else
11130                     {
11131                         lclTyp = JITtype2varType(cit);
11132                     }
11133                     goto ARR_LD_POST_VERIFY;
11134                 }
11135
11136                 // Similarly, if its a readonly access, we can do a simple address-of
11137                 // without doing a runtime type-check
11138                 if (prefixFlags & PREFIX_READONLY)
11139                 {
11140                     lclTyp = TYP_REF;
11141                     goto ARR_LD_POST_VERIFY;
11142                 }
11143
11144                 // Otherwise we need the full helper function with run-time type check
11145                 op1 = impTokenToHandle(&resolvedToken);
11146                 if (op1 == nullptr)
11147                 { // compDonotInline()
11148                     return;
11149                 }
11150
11151                 args = gtNewArgList(op1);                      // Type
11152                 args = gtNewListNode(impPopStack().val, args); // index
11153                 args = gtNewListNode(impPopStack().val, args); // array
11154                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11155
11156                 impPushOnStack(op1, tiRetVal);
11157                 break;
11158
11159             // ldelem for reference and value types
11160             case CEE_LDELEM:
11161                 assertImp(sz == sizeof(unsigned));
11162
11163                 _impResolveToken(CORINFO_TOKENKIND_Class);
11164
11165                 JITDUMP(" %08X", resolvedToken.token);
11166
11167                 ldelemClsHnd = resolvedToken.hClass;
11168
11169                 if (tiVerificationNeeded)
11170                 {
11171                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11172                     typeInfo tiIndex = impStackTop().seTypeInfo;
11173
11174                     // As per ECMA 'index' specified can be either int32 or native int.
11175                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11176                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11177
11178                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11179                            "type of array incompatible with type operand");
11180                     tiRetVal.NormaliseForStack();
11181                 }
11182
11183                 // If it's a reference type or generic variable type
11184                 // then just generate code as though it's a ldelem.ref instruction
11185                 if (!eeIsValueClass(ldelemClsHnd))
11186                 {
11187                     lclTyp = TYP_REF;
11188                     opcode = CEE_LDELEM_REF;
11189                 }
11190                 else
11191                 {
11192                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11193                     lclTyp             = JITtype2varType(jitTyp);
11194                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11195                     tiRetVal.NormaliseForStack();
11196                 }
11197                 goto ARR_LD_POST_VERIFY;
11198
11199             case CEE_LDELEM_I1:
11200                 lclTyp = TYP_BYTE;
11201                 goto ARR_LD;
11202             case CEE_LDELEM_I2:
11203                 lclTyp = TYP_SHORT;
11204                 goto ARR_LD;
11205             case CEE_LDELEM_I:
11206                 lclTyp = TYP_I_IMPL;
11207                 goto ARR_LD;
11208
11209             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11210             // and treating it as TYP_INT avoids other asserts.
11211             case CEE_LDELEM_U4:
11212                 lclTyp = TYP_INT;
11213                 goto ARR_LD;
11214
11215             case CEE_LDELEM_I4:
11216                 lclTyp = TYP_INT;
11217                 goto ARR_LD;
11218             case CEE_LDELEM_I8:
11219                 lclTyp = TYP_LONG;
11220                 goto ARR_LD;
11221             case CEE_LDELEM_REF:
11222                 lclTyp = TYP_REF;
11223                 goto ARR_LD;
11224             case CEE_LDELEM_R4:
11225                 lclTyp = TYP_FLOAT;
11226                 goto ARR_LD;
11227             case CEE_LDELEM_R8:
11228                 lclTyp = TYP_DOUBLE;
11229                 goto ARR_LD;
11230             case CEE_LDELEM_U1:
11231                 lclTyp = TYP_UBYTE;
11232                 goto ARR_LD;
11233             case CEE_LDELEM_U2:
11234                 lclTyp = TYP_USHORT;
11235                 goto ARR_LD;
11236
11237             ARR_LD:
11238
11239                 if (tiVerificationNeeded)
11240                 {
11241                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11242                     typeInfo tiIndex = impStackTop().seTypeInfo;
11243
11244                     // As per ECMA 'index' specified can be either int32 or native int.
11245                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11246                     if (tiArray.IsNullObjRef())
11247                     {
11248                         if (lclTyp == TYP_REF)
11249                         { // we will say a deref of a null array yields a null ref
11250                             tiRetVal = typeInfo(TI_NULL);
11251                         }
11252                         else
11253                         {
11254                             tiRetVal = typeInfo(lclTyp);
11255                         }
11256                     }
11257                     else
11258                     {
11259                         tiRetVal             = verGetArrayElemType(tiArray);
11260                         typeInfo arrayElemTi = typeInfo(lclTyp);
11261 #ifdef _TARGET_64BIT_
11262                         if (opcode == CEE_LDELEM_I)
11263                         {
11264                             arrayElemTi = typeInfo::nativeInt();
11265                         }
11266
11267                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11268                         {
11269                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11270                         }
11271                         else
11272 #endif // _TARGET_64BIT_
11273                         {
11274                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11275                         }
11276                     }
11277                     tiRetVal.NormaliseForStack();
11278                 }
11279             ARR_LD_POST_VERIFY:
11280
11281                 /* Pull the index value and array address */
11282                 op2 = impPopStack().val;
11283                 op1 = impPopStack().val;
11284                 assertImp(op1->gtType == TYP_REF);
11285
11286                 /* Check for null pointer - in the inliner case we simply abort */
11287
11288                 if (compIsForInlining())
11289                 {
11290                     if (op1->gtOper == GT_CNS_INT)
11291                     {
11292                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11293                         return;
11294                     }
11295                 }
11296
11297                 op1 = impCheckForNullPointer(op1);
11298
11299                 /* Mark the block as containing an index expression */
11300
11301                 if (op1->gtOper == GT_LCL_VAR)
11302                 {
11303                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11304                     {
11305                         block->bbFlags |= BBF_HAS_IDX_LEN;
11306                         optMethodFlags |= OMF_HAS_ARRAYREF;
11307                     }
11308                 }
11309
11310                 /* Create the index node and push it on the stack */
11311
11312                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11313
11314                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11315
11316                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11317                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11318                 {
11319                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11320
11321                     // remember the element size
11322                     if (lclTyp == TYP_REF)
11323                     {
11324                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11325                     }
11326                     else
11327                     {
11328                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11329                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11330                         {
11331                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11332                         }
11333                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11334                         if (lclTyp == TYP_STRUCT)
11335                         {
11336                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11337                             op1->gtIndex.gtIndElemSize = size;
11338                             op1->gtType                = lclTyp;
11339                         }
11340                     }
11341
11342                     if ((opcode == CEE_LDELEMA) || ldstruct)
11343                     {
11344                         // wrap it in a &
11345                         lclTyp = TYP_BYREF;
11346
11347                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11348                     }
11349                     else
11350                     {
11351                         assert(lclTyp != TYP_STRUCT);
11352                     }
11353                 }
11354
11355                 if (ldstruct)
11356                 {
11357                     // Create an OBJ for the result
11358                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11359                     op1->gtFlags |= GTF_EXCEPT;
11360                 }
11361                 impPushOnStack(op1, tiRetVal);
11362                 break;
11363
11364             // stelem for reference and value types
11365             case CEE_STELEM:
11366
11367                 assertImp(sz == sizeof(unsigned));
11368
11369                 _impResolveToken(CORINFO_TOKENKIND_Class);
11370
11371                 JITDUMP(" %08X", resolvedToken.token);
11372
11373                 stelemClsHnd = resolvedToken.hClass;
11374
11375                 if (tiVerificationNeeded)
11376                 {
11377                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11378                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11379                     typeInfo tiValue = impStackTop().seTypeInfo;
11380
11381                     // As per ECMA 'index' specified can be either int32 or native int.
11382                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11383                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11384
11385                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11386                            "type operand incompatible with array element type");
11387                     arrayElem.NormaliseForStack();
11388                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11389                 }
11390
11391                 // If it's a reference type just behave as though it's a stelem.ref instruction
11392                 if (!eeIsValueClass(stelemClsHnd))
11393                 {
11394                     goto STELEM_REF_POST_VERIFY;
11395                 }
11396
11397                 // Otherwise extract the type
11398                 {
11399                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11400                     lclTyp             = JITtype2varType(jitTyp);
11401                     goto ARR_ST_POST_VERIFY;
11402                 }
11403
11404             case CEE_STELEM_REF:
11405
11406                 if (tiVerificationNeeded)
11407                 {
11408                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11409                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11410                     typeInfo tiValue = impStackTop().seTypeInfo;
11411
11412                     // As per ECMA 'index' specified can be either int32 or native int.
11413                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11414                     Verify(tiValue.IsObjRef(), "bad value");
11415
11416                     // we only check that it is an object referece, The helper does additional checks
11417                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11418                 }
11419
11420             STELEM_REF_POST_VERIFY:
11421
11422                 arrayNodeTo      = impStackTop(2).val;
11423                 arrayNodeToIndex = impStackTop(1).val;
11424                 arrayNodeFrom    = impStackTop().val;
11425
11426                 //
11427                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11428                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11429                 //
11430
11431                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11432                 // This does not need CORINFO_HELP_ARRADDR_ST
11433                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11434                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11435                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11436                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11437                 {
11438                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11439                     lclTyp = TYP_REF;
11440                     goto ARR_ST_POST_VERIFY;
11441                 }
11442
11443                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11444                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11445                 {
11446                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11447                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11448                     lclTyp = TYP_REF;
11449                     goto ARR_ST_POST_VERIFY;
11450                 }
11451
11452                 /* Call a helper function to do the assignment */
11453                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11454
11455                 goto SPILL_APPEND;
11456
11457             case CEE_STELEM_I1:
11458                 lclTyp = TYP_BYTE;
11459                 goto ARR_ST;
11460             case CEE_STELEM_I2:
11461                 lclTyp = TYP_SHORT;
11462                 goto ARR_ST;
11463             case CEE_STELEM_I:
11464                 lclTyp = TYP_I_IMPL;
11465                 goto ARR_ST;
11466             case CEE_STELEM_I4:
11467                 lclTyp = TYP_INT;
11468                 goto ARR_ST;
11469             case CEE_STELEM_I8:
11470                 lclTyp = TYP_LONG;
11471                 goto ARR_ST;
11472             case CEE_STELEM_R4:
11473                 lclTyp = TYP_FLOAT;
11474                 goto ARR_ST;
11475             case CEE_STELEM_R8:
11476                 lclTyp = TYP_DOUBLE;
11477                 goto ARR_ST;
11478
11479             ARR_ST:
11480
11481                 if (tiVerificationNeeded)
11482                 {
11483                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11484                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11485                     typeInfo tiValue = impStackTop().seTypeInfo;
11486
11487                     // As per ECMA 'index' specified can be either int32 or native int.
11488                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11489                     typeInfo arrayElem = typeInfo(lclTyp);
11490 #ifdef _TARGET_64BIT_
11491                     if (opcode == CEE_STELEM_I)
11492                     {
11493                         arrayElem = typeInfo::nativeInt();
11494                     }
11495 #endif // _TARGET_64BIT_
11496                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11497                            "bad array");
11498
11499                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11500                            "bad value");
11501                 }
11502
11503             ARR_ST_POST_VERIFY:
11504                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11505                    range-check, and then assignment. However, codegen currently
11506                    does the range-check before evaluation the RHS-operands. So to
11507                    maintain strict ordering, we spill the stack. */
11508
11509                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11510                 {
11511                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11512                                                    "Strict ordering of exceptions for Array store"));
11513                 }
11514
11515                 /* Pull the new value from the stack */
11516                 op2 = impPopStack().val;
11517
11518                 /* Pull the index value */
11519                 op1 = impPopStack().val;
11520
11521                 /* Pull the array address */
11522                 op3 = impPopStack().val;
11523
11524                 assertImp(op3->gtType == TYP_REF);
11525                 if (op2->IsVarAddr())
11526                 {
11527                     op2->gtType = TYP_I_IMPL;
11528                 }
11529
11530                 op3 = impCheckForNullPointer(op3);
11531
11532                 // Mark the block as containing an index expression
11533
11534                 if (op3->gtOper == GT_LCL_VAR)
11535                 {
11536                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11537                     {
11538                         block->bbFlags |= BBF_HAS_IDX_LEN;
11539                         optMethodFlags |= OMF_HAS_ARRAYREF;
11540                     }
11541                 }
11542
11543                 /* Create the index node */
11544
11545                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11546
11547                 /* Create the assignment node and append it */
11548
11549                 if (lclTyp == TYP_STRUCT)
11550                 {
11551                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11552
11553                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11554                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11555                 }
11556                 if (varTypeIsStruct(op1))
11557                 {
11558                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11559                 }
11560                 else
11561                 {
11562                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11563                     op1 = gtNewAssignNode(op1, op2);
11564                 }
11565
11566                 /* Mark the expression as containing an assignment */
11567
11568                 op1->gtFlags |= GTF_ASG;
11569
11570                 goto SPILL_APPEND;
11571
11572             case CEE_ADD:
11573                 oper = GT_ADD;
11574                 goto MATH_OP2;
11575
11576             case CEE_ADD_OVF:
11577                 uns = false;
11578                 goto ADD_OVF;
11579             case CEE_ADD_OVF_UN:
11580                 uns = true;
11581                 goto ADD_OVF;
11582
11583             ADD_OVF:
11584                 ovfl     = true;
11585                 callNode = false;
11586                 oper     = GT_ADD;
11587                 goto MATH_OP2_FLAGS;
11588
11589             case CEE_SUB:
11590                 oper = GT_SUB;
11591                 goto MATH_OP2;
11592
11593             case CEE_SUB_OVF:
11594                 uns = false;
11595                 goto SUB_OVF;
11596             case CEE_SUB_OVF_UN:
11597                 uns = true;
11598                 goto SUB_OVF;
11599
11600             SUB_OVF:
11601                 ovfl     = true;
11602                 callNode = false;
11603                 oper     = GT_SUB;
11604                 goto MATH_OP2_FLAGS;
11605
11606             case CEE_MUL:
11607                 oper = GT_MUL;
11608                 goto MATH_MAYBE_CALL_NO_OVF;
11609
11610             case CEE_MUL_OVF:
11611                 uns = false;
11612                 goto MUL_OVF;
11613             case CEE_MUL_OVF_UN:
11614                 uns = true;
11615                 goto MUL_OVF;
11616
11617             MUL_OVF:
11618                 ovfl = true;
11619                 oper = GT_MUL;
11620                 goto MATH_MAYBE_CALL_OVF;
11621
11622             // Other binary math operations
11623
11624             case CEE_DIV:
11625                 oper = GT_DIV;
11626                 goto MATH_MAYBE_CALL_NO_OVF;
11627
11628             case CEE_DIV_UN:
11629                 oper = GT_UDIV;
11630                 goto MATH_MAYBE_CALL_NO_OVF;
11631
11632             case CEE_REM:
11633                 oper = GT_MOD;
11634                 goto MATH_MAYBE_CALL_NO_OVF;
11635
11636             case CEE_REM_UN:
11637                 oper = GT_UMOD;
11638                 goto MATH_MAYBE_CALL_NO_OVF;
11639
11640             MATH_MAYBE_CALL_NO_OVF:
11641                 ovfl = false;
11642             MATH_MAYBE_CALL_OVF:
11643                 // Morpher has some complex logic about when to turn different
11644                 // typed nodes on different platforms into helper calls. We
11645                 // need to either duplicate that logic here, or just
11646                 // pessimistically make all the nodes large enough to become
11647                 // call nodes.  Since call nodes aren't that much larger and
11648                 // these opcodes are infrequent enough I chose the latter.
11649                 callNode = true;
11650                 goto MATH_OP2_FLAGS;
11651
11652             case CEE_AND:
11653                 oper = GT_AND;
11654                 goto MATH_OP2;
11655             case CEE_OR:
11656                 oper = GT_OR;
11657                 goto MATH_OP2;
11658             case CEE_XOR:
11659                 oper = GT_XOR;
11660                 goto MATH_OP2;
11661
11662             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11663
11664                 ovfl     = false;
11665                 callNode = false;
11666
11667             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11668
11669                 /* Pull two values and push back the result */
11670
11671                 if (tiVerificationNeeded)
11672                 {
11673                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11674                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11675
11676                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11677                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11678                     {
11679                         Verify(tiOp1.IsNumberType(), "not number");
11680                     }
11681                     else
11682                     {
11683                         Verify(tiOp1.IsIntegerType(), "not integer");
11684                     }
11685
11686                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11687
11688                     tiRetVal = tiOp1;
11689
11690 #ifdef _TARGET_64BIT_
11691                     if (tiOp2.IsNativeIntType())
11692                     {
11693                         tiRetVal = tiOp2;
11694                     }
11695 #endif // _TARGET_64BIT_
11696                 }
11697
11698                 op2 = impPopStack().val;
11699                 op1 = impPopStack().val;
11700
11701 #if !CPU_HAS_FP_SUPPORT
11702                 if (varTypeIsFloating(op1->gtType))
11703                 {
11704                     callNode = true;
11705                 }
11706 #endif
11707                 /* Can't do arithmetic with references */
11708                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11709
11710                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11711                 // if it is in the stack)
11712                 impBashVarAddrsToI(op1, op2);
11713
11714                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11715
11716                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11717
11718                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11719
11720                 if (op2->gtOper == GT_CNS_INT)
11721                 {
11722                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11723                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11724
11725                     {
11726                         impPushOnStack(op1, tiRetVal);
11727                         break;
11728                     }
11729                 }
11730
11731 #if !FEATURE_X87_DOUBLES
11732                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11733                 //
11734                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11735                 {
11736                     if (op1->TypeGet() != type)
11737                     {
11738                         // We insert a cast of op1 to 'type'
11739                         op1 = gtNewCastNode(type, op1, type);
11740                     }
11741                     if (op2->TypeGet() != type)
11742                     {
11743                         // We insert a cast of op2 to 'type'
11744                         op2 = gtNewCastNode(type, op2, type);
11745                     }
11746                 }
11747 #endif // !FEATURE_X87_DOUBLES
11748
11749 #if SMALL_TREE_NODES
11750                 if (callNode)
11751                 {
11752                     /* These operators can later be transformed into 'GT_CALL' */
11753
11754                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11755 #ifndef _TARGET_ARM_
11756                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11757                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11758                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11759                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11760 #endif
11761                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11762                     // that we'll need to transform into a general large node, but rather specifically
11763                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11764                     // and a CALL is no longer the largest.
11765                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11766                     // than an "if".
11767                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11768                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11769                 }
11770                 else
11771 #endif // SMALL_TREE_NODES
11772                 {
11773                     op1 = gtNewOperNode(oper, type, op1, op2);
11774                 }
11775
11776                 /* Special case: integer/long division may throw an exception */
11777
11778                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11779                 {
11780                     op1->gtFlags |= GTF_EXCEPT;
11781                 }
11782
11783                 if (ovfl)
11784                 {
11785                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11786                     if (ovflType != TYP_UNKNOWN)
11787                     {
11788                         op1->gtType = ovflType;
11789                     }
11790                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11791                     if (uns)
11792                     {
11793                         op1->gtFlags |= GTF_UNSIGNED;
11794                     }
11795                 }
11796
11797                 impPushOnStack(op1, tiRetVal);
11798                 break;
11799
11800             case CEE_SHL:
11801                 oper = GT_LSH;
11802                 goto CEE_SH_OP2;
11803
11804             case CEE_SHR:
11805                 oper = GT_RSH;
11806                 goto CEE_SH_OP2;
11807             case CEE_SHR_UN:
11808                 oper = GT_RSZ;
11809                 goto CEE_SH_OP2;
11810
11811             CEE_SH_OP2:
11812                 if (tiVerificationNeeded)
11813                 {
11814                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11815                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11816                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11817                     tiRetVal = tiVal;
11818                 }
11819                 op2 = impPopStack().val;
11820                 op1 = impPopStack().val; // operand to be shifted
11821                 impBashVarAddrsToI(op1, op2);
11822
11823                 type = genActualType(op1->TypeGet());
11824                 op1  = gtNewOperNode(oper, type, op1, op2);
11825
11826                 impPushOnStack(op1, tiRetVal);
11827                 break;
11828
11829             case CEE_NOT:
11830                 if (tiVerificationNeeded)
11831                 {
11832                     tiRetVal = impStackTop().seTypeInfo;
11833                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11834                 }
11835
11836                 op1 = impPopStack().val;
11837                 impBashVarAddrsToI(op1, nullptr);
11838                 type = genActualType(op1->TypeGet());
11839                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11840                 break;
11841
11842             case CEE_CKFINITE:
11843                 if (tiVerificationNeeded)
11844                 {
11845                     tiRetVal = impStackTop().seTypeInfo;
11846                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11847                 }
11848                 op1  = impPopStack().val;
11849                 type = op1->TypeGet();
11850                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11851                 op1->gtFlags |= GTF_EXCEPT;
11852
11853                 impPushOnStack(op1, tiRetVal);
11854                 break;
11855
11856             case CEE_LEAVE:
11857
11858                 val     = getI4LittleEndian(codeAddr); // jump distance
11859                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11860                 goto LEAVE;
11861
11862             case CEE_LEAVE_S:
11863                 val     = getI1LittleEndian(codeAddr); // jump distance
11864                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11865
11866             LEAVE:
11867
11868                 if (compIsForInlining())
11869                 {
11870                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11871                     return;
11872                 }
11873
11874                 JITDUMP(" %04X", jmpAddr);
11875                 if (block->bbJumpKind != BBJ_LEAVE)
11876                 {
11877                     impResetLeaveBlock(block, jmpAddr);
11878                 }
11879
11880                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11881                 impImportLeave(block);
11882                 impNoteBranchOffs();
11883
11884                 break;
11885
11886             case CEE_BR:
11887             case CEE_BR_S:
11888                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11889
11890                 if (compIsForInlining() && jmpDist == 0)
11891                 {
11892                     break; /* NOP */
11893                 }
11894
11895                 impNoteBranchOffs();
11896                 break;
11897
11898             case CEE_BRTRUE:
11899             case CEE_BRTRUE_S:
11900             case CEE_BRFALSE:
11901             case CEE_BRFALSE_S:
11902
11903                 /* Pop the comparand (now there's a neat term) from the stack */
11904                 if (tiVerificationNeeded)
11905                 {
11906                     typeInfo& tiVal = impStackTop().seTypeInfo;
11907                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11908                            "bad value");
11909                 }
11910
11911                 op1  = impPopStack().val;
11912                 type = op1->TypeGet();
11913
11914                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11915                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11916                 {
11917                     block->bbJumpKind = BBJ_NONE;
11918
11919                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11920                     {
11921                         op1 = gtUnusedValNode(op1);
11922                         goto SPILL_APPEND;
11923                     }
11924                     else
11925                     {
11926                         break;
11927                     }
11928                 }
11929
11930                 if (op1->OperIsCompare())
11931                 {
11932                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11933                     {
11934                         // Flip the sense of the compare
11935
11936                         op1 = gtReverseCond(op1);
11937                     }
11938                 }
11939                 else
11940                 {
11941                     /* We'll compare against an equally-sized integer 0 */
11942                     /* For small types, we always compare against int   */
11943                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11944
11945                     /* Create the comparison operator and try to fold it */
11946
11947                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11948                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11949                 }
11950
11951             // fall through
11952
11953             COND_JUMP:
11954
11955                 /* Fold comparison if we can */
11956
11957                 op1 = gtFoldExpr(op1);
11958
11959                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11960                 /* Don't make any blocks unreachable in import only mode */
11961
11962                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11963                 {
11964                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11965                        unreachable under compDbgCode */
11966                     assert(!opts.compDbgCode);
11967
11968                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11969                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11970                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11971                                                                          // block for the second time
11972
11973                     block->bbJumpKind = foldedJumpKind;
11974 #ifdef DEBUG
11975                     if (verbose)
11976                     {
11977                         if (op1->gtIntCon.gtIconVal)
11978                         {
11979                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11980                                    block->bbJumpDest->bbNum);
11981                         }
11982                         else
11983                         {
11984                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11985                         }
11986                     }
11987 #endif
11988                     break;
11989                 }
11990
11991                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11992
11993                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11994                    in impImportBlock(block). For correct line numbers, spill stack. */
11995
11996                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11997                 {
11998                     impSpillStackEnsure(true);
11999                 }
12000
12001                 goto SPILL_APPEND;
12002
12003             case CEE_CEQ:
12004                 oper = GT_EQ;
12005                 uns  = false;
12006                 goto CMP_2_OPs;
12007             case CEE_CGT_UN:
12008                 oper = GT_GT;
12009                 uns  = true;
12010                 goto CMP_2_OPs;
12011             case CEE_CGT:
12012                 oper = GT_GT;
12013                 uns  = false;
12014                 goto CMP_2_OPs;
12015             case CEE_CLT_UN:
12016                 oper = GT_LT;
12017                 uns  = true;
12018                 goto CMP_2_OPs;
12019             case CEE_CLT:
12020                 oper = GT_LT;
12021                 uns  = false;
12022                 goto CMP_2_OPs;
12023
12024             CMP_2_OPs:
12025                 if (tiVerificationNeeded)
12026                 {
12027                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12028                     tiRetVal = typeInfo(TI_INT);
12029                 }
12030
12031                 op2 = impPopStack().val;
12032                 op1 = impPopStack().val;
12033
12034 #ifdef _TARGET_64BIT_
12035                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12036                 {
12037                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12038                 }
12039                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12040                 {
12041                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12042                 }
12043 #endif // _TARGET_64BIT_
12044
12045                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12046                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12047                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12048
12049                 /* Create the comparison node */
12050
12051                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12052
12053                 /* TODO: setting both flags when only one is appropriate */
12054                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12055                 {
12056                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12057                 }
12058
12059                 // Fold result, if possible.
12060                 op1 = gtFoldExpr(op1);
12061
12062                 impPushOnStack(op1, tiRetVal);
12063                 break;
12064
12065             case CEE_BEQ_S:
12066             case CEE_BEQ:
12067                 oper = GT_EQ;
12068                 goto CMP_2_OPs_AND_BR;
12069
12070             case CEE_BGE_S:
12071             case CEE_BGE:
12072                 oper = GT_GE;
12073                 goto CMP_2_OPs_AND_BR;
12074
12075             case CEE_BGE_UN_S:
12076             case CEE_BGE_UN:
12077                 oper = GT_GE;
12078                 goto CMP_2_OPs_AND_BR_UN;
12079
12080             case CEE_BGT_S:
12081             case CEE_BGT:
12082                 oper = GT_GT;
12083                 goto CMP_2_OPs_AND_BR;
12084
12085             case CEE_BGT_UN_S:
12086             case CEE_BGT_UN:
12087                 oper = GT_GT;
12088                 goto CMP_2_OPs_AND_BR_UN;
12089
12090             case CEE_BLE_S:
12091             case CEE_BLE:
12092                 oper = GT_LE;
12093                 goto CMP_2_OPs_AND_BR;
12094
12095             case CEE_BLE_UN_S:
12096             case CEE_BLE_UN:
12097                 oper = GT_LE;
12098                 goto CMP_2_OPs_AND_BR_UN;
12099
12100             case CEE_BLT_S:
12101             case CEE_BLT:
12102                 oper = GT_LT;
12103                 goto CMP_2_OPs_AND_BR;
12104
12105             case CEE_BLT_UN_S:
12106             case CEE_BLT_UN:
12107                 oper = GT_LT;
12108                 goto CMP_2_OPs_AND_BR_UN;
12109
12110             case CEE_BNE_UN_S:
12111             case CEE_BNE_UN:
12112                 oper = GT_NE;
12113                 goto CMP_2_OPs_AND_BR_UN;
12114
12115             CMP_2_OPs_AND_BR_UN:
12116                 uns       = true;
12117                 unordered = true;
12118                 goto CMP_2_OPs_AND_BR_ALL;
12119             CMP_2_OPs_AND_BR:
12120                 uns       = false;
12121                 unordered = false;
12122                 goto CMP_2_OPs_AND_BR_ALL;
12123             CMP_2_OPs_AND_BR_ALL:
12124
12125                 if (tiVerificationNeeded)
12126                 {
12127                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12128                 }
12129
12130                 /* Pull two values */
12131                 op2 = impPopStack().val;
12132                 op1 = impPopStack().val;
12133
12134 #ifdef _TARGET_64BIT_
12135                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12136                 {
12137                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12138                 }
12139                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12140                 {
12141                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
12142                 }
12143 #endif // _TARGET_64BIT_
12144
12145                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12146                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12147                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12148
12149                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12150                 {
12151                     block->bbJumpKind = BBJ_NONE;
12152
12153                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12154                     {
12155                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12156                                                        "Branch to next Optimization, op1 side effect"));
12157                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12158                     }
12159                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12160                     {
12161                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12162                                                        "Branch to next Optimization, op2 side effect"));
12163                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12164                     }
12165
12166 #ifdef DEBUG
12167                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12168                     {
12169                         impNoteLastILoffs();
12170                     }
12171 #endif
12172                     break;
12173                 }
12174 #if !FEATURE_X87_DOUBLES
12175                 // We can generate an compare of different sized floating point op1 and op2
12176                 // We insert a cast
12177                 //
12178                 if (varTypeIsFloating(op1->TypeGet()))
12179                 {
12180                     if (op1->TypeGet() != op2->TypeGet())
12181                     {
12182                         assert(varTypeIsFloating(op2->TypeGet()));
12183
12184                         // say op1=double, op2=float. To avoid loss of precision
12185                         // while comparing, op2 is converted to double and double
12186                         // comparison is done.
12187                         if (op1->TypeGet() == TYP_DOUBLE)
12188                         {
12189                             // We insert a cast of op2 to TYP_DOUBLE
12190                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
12191                         }
12192                         else if (op2->TypeGet() == TYP_DOUBLE)
12193                         {
12194                             // We insert a cast of op1 to TYP_DOUBLE
12195                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
12196                         }
12197                     }
12198                 }
12199 #endif // !FEATURE_X87_DOUBLES
12200
12201                 /* Create and append the operator */
12202
12203                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12204
12205                 if (uns)
12206                 {
12207                     op1->gtFlags |= GTF_UNSIGNED;
12208                 }
12209
12210                 if (unordered)
12211                 {
12212                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12213                 }
12214
12215                 goto COND_JUMP;
12216
12217             case CEE_SWITCH:
12218                 assert(!compIsForInlining());
12219
12220                 if (tiVerificationNeeded)
12221                 {
12222                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12223                 }
12224                 /* Pop the switch value off the stack */
12225                 op1 = impPopStack().val;
12226                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12227
12228                 /* We can create a switch node */
12229
12230                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12231
12232                 val = (int)getU4LittleEndian(codeAddr);
12233                 codeAddr += 4 + val * 4; // skip over the switch-table
12234
12235                 goto SPILL_APPEND;
12236
12237             /************************** Casting OPCODES ***************************/
12238
12239             case CEE_CONV_OVF_I1:
12240                 lclTyp = TYP_BYTE;
12241                 goto CONV_OVF;
12242             case CEE_CONV_OVF_I2:
12243                 lclTyp = TYP_SHORT;
12244                 goto CONV_OVF;
12245             case CEE_CONV_OVF_I:
12246                 lclTyp = TYP_I_IMPL;
12247                 goto CONV_OVF;
12248             case CEE_CONV_OVF_I4:
12249                 lclTyp = TYP_INT;
12250                 goto CONV_OVF;
12251             case CEE_CONV_OVF_I8:
12252                 lclTyp = TYP_LONG;
12253                 goto CONV_OVF;
12254
12255             case CEE_CONV_OVF_U1:
12256                 lclTyp = TYP_UBYTE;
12257                 goto CONV_OVF;
12258             case CEE_CONV_OVF_U2:
12259                 lclTyp = TYP_USHORT;
12260                 goto CONV_OVF;
12261             case CEE_CONV_OVF_U:
12262                 lclTyp = TYP_U_IMPL;
12263                 goto CONV_OVF;
12264             case CEE_CONV_OVF_U4:
12265                 lclTyp = TYP_UINT;
12266                 goto CONV_OVF;
12267             case CEE_CONV_OVF_U8:
12268                 lclTyp = TYP_ULONG;
12269                 goto CONV_OVF;
12270
12271             case CEE_CONV_OVF_I1_UN:
12272                 lclTyp = TYP_BYTE;
12273                 goto CONV_OVF_UN;
12274             case CEE_CONV_OVF_I2_UN:
12275                 lclTyp = TYP_SHORT;
12276                 goto CONV_OVF_UN;
12277             case CEE_CONV_OVF_I_UN:
12278                 lclTyp = TYP_I_IMPL;
12279                 goto CONV_OVF_UN;
12280             case CEE_CONV_OVF_I4_UN:
12281                 lclTyp = TYP_INT;
12282                 goto CONV_OVF_UN;
12283             case CEE_CONV_OVF_I8_UN:
12284                 lclTyp = TYP_LONG;
12285                 goto CONV_OVF_UN;
12286
12287             case CEE_CONV_OVF_U1_UN:
12288                 lclTyp = TYP_UBYTE;
12289                 goto CONV_OVF_UN;
12290             case CEE_CONV_OVF_U2_UN:
12291                 lclTyp = TYP_USHORT;
12292                 goto CONV_OVF_UN;
12293             case CEE_CONV_OVF_U_UN:
12294                 lclTyp = TYP_U_IMPL;
12295                 goto CONV_OVF_UN;
12296             case CEE_CONV_OVF_U4_UN:
12297                 lclTyp = TYP_UINT;
12298                 goto CONV_OVF_UN;
12299             case CEE_CONV_OVF_U8_UN:
12300                 lclTyp = TYP_ULONG;
12301                 goto CONV_OVF_UN;
12302
12303             CONV_OVF_UN:
12304                 uns = true;
12305                 goto CONV_OVF_COMMON;
12306             CONV_OVF:
12307                 uns = false;
12308                 goto CONV_OVF_COMMON;
12309
12310             CONV_OVF_COMMON:
12311                 ovfl = true;
12312                 goto _CONV;
12313
12314             case CEE_CONV_I1:
12315                 lclTyp = TYP_BYTE;
12316                 goto CONV;
12317             case CEE_CONV_I2:
12318                 lclTyp = TYP_SHORT;
12319                 goto CONV;
12320             case CEE_CONV_I:
12321                 lclTyp = TYP_I_IMPL;
12322                 goto CONV;
12323             case CEE_CONV_I4:
12324                 lclTyp = TYP_INT;
12325                 goto CONV;
12326             case CEE_CONV_I8:
12327                 lclTyp = TYP_LONG;
12328                 goto CONV;
12329
12330             case CEE_CONV_U1:
12331                 lclTyp = TYP_UBYTE;
12332                 goto CONV;
12333             case CEE_CONV_U2:
12334                 lclTyp = TYP_USHORT;
12335                 goto CONV;
12336 #if (REGSIZE_BYTES == 8)
12337             case CEE_CONV_U:
12338                 lclTyp = TYP_U_IMPL;
12339                 goto CONV_UN;
12340 #else
12341             case CEE_CONV_U:
12342                 lclTyp = TYP_U_IMPL;
12343                 goto CONV;
12344 #endif
12345             case CEE_CONV_U4:
12346                 lclTyp = TYP_UINT;
12347                 goto CONV;
12348             case CEE_CONV_U8:
12349                 lclTyp = TYP_ULONG;
12350                 goto CONV_UN;
12351
12352             case CEE_CONV_R4:
12353                 lclTyp = TYP_FLOAT;
12354                 goto CONV;
12355             case CEE_CONV_R8:
12356                 lclTyp = TYP_DOUBLE;
12357                 goto CONV;
12358
12359             case CEE_CONV_R_UN:
12360                 lclTyp = TYP_DOUBLE;
12361                 goto CONV_UN;
12362
12363             CONV_UN:
12364                 uns  = true;
12365                 ovfl = false;
12366                 goto _CONV;
12367
12368             CONV:
12369                 uns  = false;
12370                 ovfl = false;
12371                 goto _CONV;
12372
12373             _CONV:
12374                 // just check that we have a number on the stack
12375                 if (tiVerificationNeeded)
12376                 {
12377                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12378                     Verify(tiVal.IsNumberType(), "bad arg");
12379
12380 #ifdef _TARGET_64BIT_
12381                     bool isNative = false;
12382
12383                     switch (opcode)
12384                     {
12385                         case CEE_CONV_OVF_I:
12386                         case CEE_CONV_OVF_I_UN:
12387                         case CEE_CONV_I:
12388                         case CEE_CONV_OVF_U:
12389                         case CEE_CONV_OVF_U_UN:
12390                         case CEE_CONV_U:
12391                             isNative = true;
12392                         default:
12393                             // leave 'isNative' = false;
12394                             break;
12395                     }
12396                     if (isNative)
12397                     {
12398                         tiRetVal = typeInfo::nativeInt();
12399                     }
12400                     else
12401 #endif // _TARGET_64BIT_
12402                     {
12403                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12404                     }
12405                 }
12406
12407                 // only converts from FLOAT or DOUBLE to an integer type
12408                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12409
12410                 if (varTypeIsFloating(lclTyp))
12411                 {
12412                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12413 #ifdef _TARGET_64BIT_
12414                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12415                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12416                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12417                                // and generate SSE2 code instead of going through helper calls.
12418                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12419 #endif
12420                         ;
12421                 }
12422                 else
12423                 {
12424                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12425                 }
12426
12427                 // At this point uns, ovf, callNode all set
12428
12429                 op1 = impPopStack().val;
12430                 impBashVarAddrsToI(op1);
12431
12432                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12433                 {
12434                     op2 = op1->gtOp.gtOp2;
12435
12436                     if (op2->gtOper == GT_CNS_INT)
12437                     {
12438                         ssize_t ival = op2->gtIntCon.gtIconVal;
12439                         ssize_t mask, umask;
12440
12441                         switch (lclTyp)
12442                         {
12443                             case TYP_BYTE:
12444                             case TYP_UBYTE:
12445                                 mask  = 0x00FF;
12446                                 umask = 0x007F;
12447                                 break;
12448                             case TYP_USHORT:
12449                             case TYP_SHORT:
12450                                 mask  = 0xFFFF;
12451                                 umask = 0x7FFF;
12452                                 break;
12453
12454                             default:
12455                                 assert(!"unexpected type");
12456                                 return;
12457                         }
12458
12459                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12460                         {
12461                             /* Toss the cast, it's a waste of time */
12462
12463                             impPushOnStack(op1, tiRetVal);
12464                             break;
12465                         }
12466                         else if (ival == mask)
12467                         {
12468                             /* Toss the masking, it's a waste of time, since
12469                                we sign-extend from the small value anyways */
12470
12471                             op1 = op1->gtOp.gtOp1;
12472                         }
12473                     }
12474                 }
12475
12476                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12477                     since the result of a cast to one of the 'small' integer
12478                     types is an integer.
12479                  */
12480
12481                 type = genActualType(lclTyp);
12482
12483 #if SMALL_TREE_NODES
12484                 if (callNode)
12485                 {
12486                     op1 = gtNewCastNodeL(type, op1, lclTyp);
12487                 }
12488                 else
12489 #endif // SMALL_TREE_NODES
12490                 {
12491                     op1 = gtNewCastNode(type, op1, lclTyp);
12492                 }
12493
12494                 if (ovfl)
12495                 {
12496                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12497                 }
12498                 if (uns)
12499                 {
12500                     op1->gtFlags |= GTF_UNSIGNED;
12501                 }
12502                 impPushOnStack(op1, tiRetVal);
12503                 break;
12504
12505             case CEE_NEG:
12506                 if (tiVerificationNeeded)
12507                 {
12508                     tiRetVal = impStackTop().seTypeInfo;
12509                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12510                 }
12511
12512                 op1 = impPopStack().val;
12513                 impBashVarAddrsToI(op1, nullptr);
12514                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12515                 break;
12516
12517             case CEE_POP:
12518             {
12519                 /* Pull the top value from the stack */
12520
12521                 StackEntry se = impPopStack();
12522                 clsHnd        = se.seTypeInfo.GetClassHandle();
12523                 op1           = se.val;
12524
12525                 /* Get hold of the type of the value being duplicated */
12526
12527                 lclTyp = genActualType(op1->gtType);
12528
12529                 /* Does the value have any side effects? */
12530
12531                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12532                 {
12533                     // Since we are throwing away the value, just normalize
12534                     // it to its address.  This is more efficient.
12535
12536                     if (varTypeIsStruct(op1))
12537                     {
12538 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12539                         // Non-calls, such as obj or ret_expr, have to go through this.
12540                         // Calls with large struct return value have to go through this.
12541                         // Helper calls with small struct return value also have to go
12542                         // through this since they do not follow Unix calling convention.
12543                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12544                             op1->AsCall()->gtCallType == CT_HELPER)
12545 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12546                         {
12547                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12548                         }
12549                     }
12550
12551                     // If op1 is non-overflow cast, throw it away since it is useless.
12552                     // Another reason for throwing away the useless cast is in the context of
12553                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12554                     // The cast gets added as part of importing GT_CALL, which gets in the way
12555                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12556                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12557                     {
12558                         op1 = op1->gtOp.gtOp1;
12559                     }
12560
12561                     // If 'op1' is an expression, create an assignment node.
12562                     // Helps analyses (like CSE) to work fine.
12563
12564                     if (op1->gtOper != GT_CALL)
12565                     {
12566                         op1 = gtUnusedValNode(op1);
12567                     }
12568
12569                     /* Append the value to the tree list */
12570                     goto SPILL_APPEND;
12571                 }
12572
12573                 /* No side effects - just throw the <BEEP> thing away */
12574             }
12575             break;
12576
12577             case CEE_DUP:
12578             {
12579                 if (tiVerificationNeeded)
12580                 {
12581                     // Dup could start the begining of delegate creation sequence, remember that
12582                     delegateCreateStart = codeAddr - 1;
12583                     impStackTop(0);
12584                 }
12585
12586                 // If the expression to dup is simple, just clone it.
12587                 // Otherwise spill it to a temp, and reload the temp
12588                 // twice.
12589                 StackEntry se   = impPopStack();
12590                 GenTree*   tree = se.val;
12591                 tiRetVal        = se.seTypeInfo;
12592                 op1             = tree;
12593
12594                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12595                 {
12596                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12597                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12598                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12599                     op1            = gtNewLclvNode(tmpNum, type);
12600
12601                     // Propagate type info to the temp from the stack and the original tree
12602                     if (type == TYP_REF)
12603                     {
12604                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12605                     }
12606                 }
12607
12608                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12609                                    nullptr DEBUGARG("DUP instruction"));
12610
12611                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12612                 impPushOnStack(op1, tiRetVal);
12613                 impPushOnStack(op2, tiRetVal);
12614             }
12615             break;
12616
12617             case CEE_STIND_I1:
12618                 lclTyp = TYP_BYTE;
12619                 goto STIND;
12620             case CEE_STIND_I2:
12621                 lclTyp = TYP_SHORT;
12622                 goto STIND;
12623             case CEE_STIND_I4:
12624                 lclTyp = TYP_INT;
12625                 goto STIND;
12626             case CEE_STIND_I8:
12627                 lclTyp = TYP_LONG;
12628                 goto STIND;
12629             case CEE_STIND_I:
12630                 lclTyp = TYP_I_IMPL;
12631                 goto STIND;
12632             case CEE_STIND_REF:
12633                 lclTyp = TYP_REF;
12634                 goto STIND;
12635             case CEE_STIND_R4:
12636                 lclTyp = TYP_FLOAT;
12637                 goto STIND;
12638             case CEE_STIND_R8:
12639                 lclTyp = TYP_DOUBLE;
12640                 goto STIND;
12641             STIND:
12642
12643                 if (tiVerificationNeeded)
12644                 {
12645                     typeInfo instrType(lclTyp);
12646 #ifdef _TARGET_64BIT_
12647                     if (opcode == CEE_STIND_I)
12648                     {
12649                         instrType = typeInfo::nativeInt();
12650                     }
12651 #endif // _TARGET_64BIT_
12652                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12653                 }
12654                 else
12655                 {
12656                     compUnsafeCastUsed = true; // Have to go conservative
12657                 }
12658
12659             STIND_POST_VERIFY:
12660
12661                 op2 = impPopStack().val; // value to store
12662                 op1 = impPopStack().val; // address to store to
12663
12664                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12665                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12666
12667                 impBashVarAddrsToI(op1, op2);
12668
12669                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12670
12671 #ifdef _TARGET_64BIT_
12672                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12673                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12674                 {
12675                     op2->gtType = TYP_I_IMPL;
12676                 }
12677                 else
12678                 {
12679                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12680                     //
12681                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12682                     {
12683                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12684                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12685                     }
12686                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12687                     //
12688                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12689                     {
12690                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12691                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12692                     }
12693                 }
12694 #endif // _TARGET_64BIT_
12695
12696                 if (opcode == CEE_STIND_REF)
12697                 {
12698                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12699                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12700                     lclTyp = genActualType(op2->TypeGet());
12701                 }
12702
12703 // Check target type.
12704 #ifdef DEBUG
12705                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12706                 {
12707                     if (op2->gtType == TYP_BYREF)
12708                     {
12709                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12710                     }
12711                     else if (lclTyp == TYP_BYREF)
12712                     {
12713                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12714                     }
12715                 }
12716                 else
12717                 {
12718                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12719                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12720                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12721                 }
12722 #endif
12723
12724                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12725
12726                 // stind could point anywhere, example a boxed class static int
12727                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12728
12729                 if (prefixFlags & PREFIX_VOLATILE)
12730                 {
12731                     assert(op1->OperGet() == GT_IND);
12732                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12733                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12734                     op1->gtFlags |= GTF_IND_VOLATILE;
12735                 }
12736
12737                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12738                 {
12739                     assert(op1->OperGet() == GT_IND);
12740                     op1->gtFlags |= GTF_IND_UNALIGNED;
12741                 }
12742
12743                 op1 = gtNewAssignNode(op1, op2);
12744                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12745
12746                 // Spill side-effects AND global-data-accesses
12747                 if (verCurrentState.esStackDepth > 0)
12748                 {
12749                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12750                 }
12751
12752                 goto APPEND;
12753
12754             case CEE_LDIND_I1:
12755                 lclTyp = TYP_BYTE;
12756                 goto LDIND;
12757             case CEE_LDIND_I2:
12758                 lclTyp = TYP_SHORT;
12759                 goto LDIND;
12760             case CEE_LDIND_U4:
12761             case CEE_LDIND_I4:
12762                 lclTyp = TYP_INT;
12763                 goto LDIND;
12764             case CEE_LDIND_I8:
12765                 lclTyp = TYP_LONG;
12766                 goto LDIND;
12767             case CEE_LDIND_REF:
12768                 lclTyp = TYP_REF;
12769                 goto LDIND;
12770             case CEE_LDIND_I:
12771                 lclTyp = TYP_I_IMPL;
12772                 goto LDIND;
12773             case CEE_LDIND_R4:
12774                 lclTyp = TYP_FLOAT;
12775                 goto LDIND;
12776             case CEE_LDIND_R8:
12777                 lclTyp = TYP_DOUBLE;
12778                 goto LDIND;
12779             case CEE_LDIND_U1:
12780                 lclTyp = TYP_UBYTE;
12781                 goto LDIND;
12782             case CEE_LDIND_U2:
12783                 lclTyp = TYP_USHORT;
12784                 goto LDIND;
12785             LDIND:
12786
12787                 if (tiVerificationNeeded)
12788                 {
12789                     typeInfo lclTiType(lclTyp);
12790 #ifdef _TARGET_64BIT_
12791                     if (opcode == CEE_LDIND_I)
12792                     {
12793                         lclTiType = typeInfo::nativeInt();
12794                     }
12795 #endif // _TARGET_64BIT_
12796                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12797                     tiRetVal.NormaliseForStack();
12798                 }
12799                 else
12800                 {
12801                     compUnsafeCastUsed = true; // Have to go conservative
12802                 }
12803
12804             LDIND_POST_VERIFY:
12805
12806                 op1 = impPopStack().val; // address to load from
12807                 impBashVarAddrsToI(op1);
12808
12809 #ifdef _TARGET_64BIT_
12810                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12811                 //
12812                 if (genActualType(op1->gtType) == TYP_INT)
12813                 {
12814                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12815                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12816                 }
12817 #endif
12818
12819                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12820
12821                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12822
12823                 // ldind could point anywhere, example a boxed class static int
12824                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12825
12826                 if (prefixFlags & PREFIX_VOLATILE)
12827                 {
12828                     assert(op1->OperGet() == GT_IND);
12829                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12830                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12831                     op1->gtFlags |= GTF_IND_VOLATILE;
12832                 }
12833
12834                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12835                 {
12836                     assert(op1->OperGet() == GT_IND);
12837                     op1->gtFlags |= GTF_IND_UNALIGNED;
12838                 }
12839
12840                 impPushOnStack(op1, tiRetVal);
12841
12842                 break;
12843
12844             case CEE_UNALIGNED:
12845
12846                 assert(sz == 1);
12847                 val = getU1LittleEndian(codeAddr);
12848                 ++codeAddr;
12849                 JITDUMP(" %u", val);
12850                 if ((val != 1) && (val != 2) && (val != 4))
12851                 {
12852                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12853                 }
12854
12855                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12856                 prefixFlags |= PREFIX_UNALIGNED;
12857
12858                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12859
12860             PREFIX:
12861                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12862                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12863                 codeAddr += sizeof(__int8);
12864                 goto DECODE_OPCODE;
12865
12866             case CEE_VOLATILE:
12867
12868                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12869                 prefixFlags |= PREFIX_VOLATILE;
12870
12871                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12872
12873                 assert(sz == 0);
12874                 goto PREFIX;
12875
12876             case CEE_LDFTN:
12877             {
12878                 // Need to do a lookup here so that we perform an access check
12879                 // and do a NOWAY if protections are violated
12880                 _impResolveToken(CORINFO_TOKENKIND_Method);
12881
12882                 JITDUMP(" %08X", resolvedToken.token);
12883
12884                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12885                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12886                               &callInfo);
12887
12888                 // This check really only applies to intrinsic Array.Address methods
12889                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12890                 {
12891                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12892                 }
12893
12894                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12895                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12896
12897                 if (tiVerificationNeeded)
12898                 {
12899                     // LDFTN could start the begining of delegate creation sequence, remember that
12900                     delegateCreateStart = codeAddr - 2;
12901
12902                     // check any constraints on the callee's class and type parameters
12903                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12904                                    "method has unsatisfied class constraints");
12905                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12906                                                                                 resolvedToken.hMethod),
12907                                    "method has unsatisfied method constraints");
12908
12909                     mflags = callInfo.verMethodFlags;
12910                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12911                 }
12912
12913             DO_LDFTN:
12914                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12915                 if (compDonotInline())
12916                 {
12917                     return;
12918                 }
12919
12920                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12921                 impPushOnStack(op1, typeInfo(heapToken));
12922
12923                 break;
12924             }
12925
12926             case CEE_LDVIRTFTN:
12927             {
12928                 /* Get the method token */
12929
12930                 _impResolveToken(CORINFO_TOKENKIND_Method);
12931
12932                 JITDUMP(" %08X", resolvedToken.token);
12933
12934                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12935                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12936                                                     CORINFO_CALLINFO_CALLVIRT)),
12937                               &callInfo);
12938
12939                 // This check really only applies to intrinsic Array.Address methods
12940                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12941                 {
12942                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12943                 }
12944
12945                 mflags = callInfo.methodFlags;
12946
12947                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12948
12949                 if (compIsForInlining())
12950                 {
12951                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12952                     {
12953                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12954                         return;
12955                     }
12956                 }
12957
12958                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12959
12960                 if (tiVerificationNeeded)
12961                 {
12962
12963                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12964                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12965
12966                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12967                     typeInfo declType =
12968                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12969
12970                     typeInfo arg = impStackTop().seTypeInfo;
12971                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12972                            "bad ldvirtftn");
12973
12974                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12975                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12976                     {
12977                         instanceClassHnd = arg.GetClassHandleForObjRef();
12978                     }
12979
12980                     // check any constraints on the method's class and type parameters
12981                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12982                                    "method has unsatisfied class constraints");
12983                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12984                                                                                 resolvedToken.hMethod),
12985                                    "method has unsatisfied method constraints");
12986
12987                     if (mflags & CORINFO_FLG_PROTECTED)
12988                     {
12989                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12990                                "Accessing protected method through wrong type.");
12991                     }
12992                 }
12993
12994                 /* Get the object-ref */
12995                 op1 = impPopStack().val;
12996                 assertImp(op1->gtType == TYP_REF);
12997
12998                 if (opts.IsReadyToRun())
12999                 {
13000                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13001                     {
13002                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13003                         {
13004                             op1 = gtUnusedValNode(op1);
13005                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13006                         }
13007                         goto DO_LDFTN;
13008                     }
13009                 }
13010                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13011                 {
13012                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13013                     {
13014                         op1 = gtUnusedValNode(op1);
13015                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13016                     }
13017                     goto DO_LDFTN;
13018                 }
13019
13020                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13021                 if (compDonotInline())
13022                 {
13023                     return;
13024                 }
13025
13026                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13027                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13028                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13029                 impPushOnStack(fptr, typeInfo(heapToken));
13030
13031                 break;
13032             }
13033
13034             case CEE_CONSTRAINED:
13035
13036                 assertImp(sz == sizeof(unsigned));
13037                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13038                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13039                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13040
13041                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13042                 prefixFlags |= PREFIX_CONSTRAINED;
13043
13044                 {
13045                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13046                     if (actualOpcode != CEE_CALLVIRT)
13047                     {
13048                         BADCODE("constrained. has to be followed by callvirt");
13049                     }
13050                 }
13051
13052                 goto PREFIX;
13053
13054             case CEE_READONLY:
13055                 JITDUMP(" readonly.");
13056
13057                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13058                 prefixFlags |= PREFIX_READONLY;
13059
13060                 {
13061                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13062                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13063                     {
13064                         BADCODE("readonly. has to be followed by ldelema or call");
13065                     }
13066                 }
13067
13068                 assert(sz == 0);
13069                 goto PREFIX;
13070
13071             case CEE_TAILCALL:
13072                 JITDUMP(" tail.");
13073
13074                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13075                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13076
13077                 {
13078                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13079                     if (!impOpcodeIsCallOpcode(actualOpcode))
13080                     {
13081                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13082                     }
13083                 }
13084                 assert(sz == 0);
13085                 goto PREFIX;
13086
13087             case CEE_NEWOBJ:
13088
13089                 /* Since we will implicitly insert newObjThisPtr at the start of the
13090                    argument list, spill any GTF_ORDER_SIDEEFF */
13091                 impSpillSpecialSideEff();
13092
13093                 /* NEWOBJ does not respond to TAIL */
13094                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13095
13096                 /* NEWOBJ does not respond to CONSTRAINED */
13097                 prefixFlags &= ~PREFIX_CONSTRAINED;
13098
13099                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13100
13101                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13102                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13103                               &callInfo);
13104
13105                 if (compIsForInlining())
13106                 {
13107                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13108                     {
13109                         // Check to see if this call violates the boundary.
13110                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13111                         return;
13112                     }
13113                 }
13114
13115                 mflags = callInfo.methodFlags;
13116
13117                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13118                 {
13119                     BADCODE("newobj on static or abstract method");
13120                 }
13121
13122                 // Insert the security callout before any actual code is generated
13123                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13124
13125                 // There are three different cases for new
13126                 // Object size is variable (depends on arguments)
13127                 //      1) Object is an array (arrays treated specially by the EE)
13128                 //      2) Object is some other variable sized object (e.g. String)
13129                 //      3) Class Size can be determined beforehand (normal case)
13130                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13131                 // in the second case we call the constructor with a '0' this pointer
13132                 // In the third case we alloc the memory, then call the constuctor
13133
13134                 clsFlags = callInfo.classFlags;
13135                 if (clsFlags & CORINFO_FLG_ARRAY)
13136                 {
13137                     if (tiVerificationNeeded)
13138                     {
13139                         CORINFO_CLASS_HANDLE elemTypeHnd;
13140                         INDEBUG(CorInfoType corType =)
13141                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13142                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13143                         Verify(elemTypeHnd == nullptr ||
13144                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13145                                "newarr of byref-like objects");
13146                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13147                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13148                                       &callInfo DEBUGARG(info.compFullName));
13149                     }
13150                     // Arrays need to call the NEWOBJ helper.
13151                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13152
13153                     impImportNewObjArray(&resolvedToken, &callInfo);
13154                     if (compDonotInline())
13155                     {
13156                         return;
13157                     }
13158
13159                     callTyp = TYP_REF;
13160                     break;
13161                 }
13162                 // At present this can only be String
13163                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13164                 {
13165                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13166                     {
13167                         // The dummy argument does not exist in CoreRT
13168                         newObjThisPtr = nullptr;
13169                     }
13170                     else
13171                     {
13172                         // This is the case for variable-sized objects that are not
13173                         // arrays.  In this case, call the constructor with a null 'this'
13174                         // pointer
13175                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13176                     }
13177
13178                     /* Remember that this basic block contains 'new' of an object */
13179                     block->bbFlags |= BBF_HAS_NEWOBJ;
13180                     optMethodFlags |= OMF_HAS_NEWOBJ;
13181                 }
13182                 else
13183                 {
13184                     // This is the normal case where the size of the object is
13185                     // fixed.  Allocate the memory and call the constructor.
13186
13187                     // Note: We cannot add a peep to avoid use of temp here
13188                     // becase we don't have enough interference info to detect when
13189                     // sources and destination interfere, example: s = new S(ref);
13190
13191                     // TODO: We find the correct place to introduce a general
13192                     // reverse copy prop for struct return values from newobj or
13193                     // any function returning structs.
13194
13195                     /* get a temporary for the new object */
13196                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13197                     if (compDonotInline())
13198                     {
13199                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13200                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13201                         return;
13202                     }
13203
13204                     // In the value class case we only need clsHnd for size calcs.
13205                     //
13206                     // The lookup of the code pointer will be handled by CALL in this case
13207                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13208                     {
13209                         if (compIsForInlining())
13210                         {
13211                             // If value class has GC fields, inform the inliner. It may choose to
13212                             // bail out on the inline.
13213                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13214                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13215                             {
13216                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13217                                 if (compInlineResult->IsFailure())
13218                                 {
13219                                     return;
13220                                 }
13221
13222                                 // Do further notification in the case where the call site is rare;
13223                                 // some policies do not track the relative hotness of call sites for
13224                                 // "always" inline cases.
13225                                 if (impInlineInfo->iciBlock->isRunRarely())
13226                                 {
13227                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13228                                     if (compInlineResult->IsFailure())
13229                                     {
13230                                         return;
13231                                     }
13232                                 }
13233                             }
13234                         }
13235
13236                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13237                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13238
13239                         if (impIsPrimitive(jitTyp))
13240                         {
13241                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13242                         }
13243                         else
13244                         {
13245                             // The local variable itself is the allocated space.
13246                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13247                             // and potentially exploitable.
13248                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13249                         }
13250                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13251                         {
13252                             // Append a tree to zero-out the temp
13253                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13254
13255                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13256                                                            gtNewIconNode(0), // Value
13257                                                            size,             // Size
13258                                                            false,            // isVolatile
13259                                                            false);           // not copyBlock
13260                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13261                         }
13262
13263                         // Obtain the address of the temp
13264                         newObjThisPtr =
13265                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13266                     }
13267                     else
13268                     {
13269 #ifdef FEATURE_READYTORUN_COMPILER
13270                         if (opts.IsReadyToRun())
13271                         {
13272                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13273                             usingReadyToRunHelper = (op1 != nullptr);
13274                         }
13275
13276                         if (!usingReadyToRunHelper)
13277 #endif
13278                         {
13279                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13280                             if (op1 == nullptr)
13281                             { // compDonotInline()
13282                                 return;
13283                             }
13284
13285                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13286                             // and the newfast call with a single call to a dynamic R2R cell that will:
13287                             //      1) Load the context
13288                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13289                             //      stub
13290                             //      3) Allocate and return the new object
13291                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13292
13293                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13294                                                     resolvedToken.hClass, TYP_REF, op1);
13295                         }
13296
13297                         // Remember that this basic block contains 'new' of an object
13298                         block->bbFlags |= BBF_HAS_NEWOBJ;
13299                         optMethodFlags |= OMF_HAS_NEWOBJ;
13300
13301                         // Append the assignment to the temp/local. Dont need to spill
13302                         // at all as we are just calling an EE-Jit helper which can only
13303                         // cause an (async) OutOfMemoryException.
13304
13305                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13306                         // to a temp. Note that the pattern "temp = allocObj" is required
13307                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13308                         // without exhaustive walk over all expressions.
13309
13310                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13311                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13312
13313                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13314                     }
13315                 }
13316                 goto CALL;
13317
13318             case CEE_CALLI:
13319
13320                 /* CALLI does not respond to CONSTRAINED */
13321                 prefixFlags &= ~PREFIX_CONSTRAINED;
13322
13323                 if (compIsForInlining())
13324                 {
13325                     // CALLI doesn't have a method handle, so assume the worst.
13326                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13327                     {
13328                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13329                         return;
13330                     }
13331                 }
13332
13333             // fall through
13334
13335             case CEE_CALLVIRT:
13336             case CEE_CALL:
13337
13338                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13339                 // many other places.  We unfortunately embed that knowledge here.
13340                 if (opcode != CEE_CALLI)
13341                 {
13342                     _impResolveToken(CORINFO_TOKENKIND_Method);
13343
13344                     eeGetCallInfo(&resolvedToken,
13345                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13346                                   // this is how impImportCall invokes getCallInfo
13347                                   addVerifyFlag(
13348                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13349                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13350                                                                        : CORINFO_CALLINFO_NONE)),
13351                                   &callInfo);
13352                 }
13353                 else
13354                 {
13355                     // Suppress uninitialized use warning.
13356                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13357                     memset(&callInfo, 0, sizeof(callInfo));
13358
13359                     resolvedToken.token = getU4LittleEndian(codeAddr);
13360                 }
13361
13362             CALL: // memberRef should be set.
13363                 // newObjThisPtr should be set for CEE_NEWOBJ
13364
13365                 JITDUMP(" %08X", resolvedToken.token);
13366                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13367
13368                 bool newBBcreatedForTailcallStress;
13369
13370                 newBBcreatedForTailcallStress = false;
13371
13372                 if (compIsForInlining())
13373                 {
13374                     if (compDonotInline())
13375                     {
13376                         return;
13377                     }
13378                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13379                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13380                 }
13381                 else
13382                 {
13383                     if (compTailCallStress())
13384                     {
13385                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13386                         // Tail call stress only recognizes call+ret patterns and forces them to be
13387                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13388                         // doesn't import 'ret' opcode following the call into the basic block containing
13389                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13390                         // is already checking that there is an opcode following call and hence it is
13391                         // safe here to read next opcode without bounds check.
13392                         newBBcreatedForTailcallStress =
13393                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13394                                                              // make it jump to RET.
13395                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13396
13397                         if (newBBcreatedForTailcallStress &&
13398                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13399                             verCheckTailCallConstraint(opcode, &resolvedToken,
13400                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13401                                                        true) // Is it legal to do tailcall?
13402                             )
13403                         {
13404                             // Stress the tailcall.
13405                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13406                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13407                         }
13408                     }
13409                 }
13410
13411                 // This is split up to avoid goto flow warnings.
13412                 bool isRecursive;
13413                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13414
13415                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13416                 // hence will not be considered for implicit tail calling.
13417                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13418                 {
13419                     if (compIsForInlining())
13420                     {
13421 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13422                         // Are we inlining at an implicit tail call site? If so the we can flag
13423                         // implicit tail call sites in the inline body. These call sites
13424                         // often end up in non BBJ_RETURN blocks, so only flag them when
13425                         // we're able to handle shared returns.
13426                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13427                         {
13428                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13429                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13430                         }
13431 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13432                     }
13433                     else
13434                     {
13435                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13436                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13437                     }
13438                 }
13439
13440                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13441                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13442                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13443
13444                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13445                 {
13446                     // All calls and delegates need a security callout.
13447                     // For delegates, this is the call to the delegate constructor, not the access check on the
13448                     // LD(virt)FTN.
13449                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13450
13451 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13452
13453                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13454                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13455                 // ldtoken <filed token>, and we now check accessibility
13456                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13457                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13458                 {
13459                     if (prevOpcode != CEE_LDTOKEN)
13460                     {
13461                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13462                     }
13463                     else
13464                     {
13465                         assert(lastLoadToken != NULL);
13466                         // Now that we know we have a token, verify that it is accessible for loading
13467                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13468                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13469                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13470                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13471                     }
13472                 }
13473
13474 #endif // DevDiv 410397
13475                 }
13476
13477                 if (tiVerificationNeeded)
13478                 {
13479                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13480                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13481                                   &callInfo DEBUGARG(info.compFullName));
13482                 }
13483
13484                 // Insert delegate callout here.
13485                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13486                 {
13487 #ifdef DEBUG
13488                     // We should do this only if verification is enabled
13489                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13490                     if (tiVerificationNeeded)
13491                     {
13492                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13493                         // We should get here only for well formed delegate creation.
13494                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13495                     }
13496 #endif
13497                 }
13498
13499                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13500                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13501                 if (compDonotInline())
13502                 {
13503                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13504                     assert((callTyp == TYP_UNDEF) ||
13505                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13506                     return;
13507                 }
13508
13509                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13510                                                                        // have created a new BB after the "call"
13511                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13512                 {
13513                     assert(!compIsForInlining());
13514                     goto RET;
13515                 }
13516
13517                 break;
13518
13519             case CEE_LDFLD:
13520             case CEE_LDSFLD:
13521             case CEE_LDFLDA:
13522             case CEE_LDSFLDA:
13523             {
13524
13525                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13526                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13527
13528                 /* Get the CP_Fieldref index */
13529                 assertImp(sz == sizeof(unsigned));
13530
13531                 _impResolveToken(CORINFO_TOKENKIND_Field);
13532
13533                 JITDUMP(" %08X", resolvedToken.token);
13534
13535                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13536
13537                 GenTree*             obj     = nullptr;
13538                 typeInfo*            tiObj   = nullptr;
13539                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13540
13541                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13542                 {
13543                     tiObj         = &impStackTop().seTypeInfo;
13544                     StackEntry se = impPopStack();
13545                     objType       = se.seTypeInfo.GetClassHandle();
13546                     obj           = se.val;
13547
13548                     if (impIsThis(obj))
13549                     {
13550                         aflags |= CORINFO_ACCESS_THIS;
13551
13552                         // An optimization for Contextful classes:
13553                         // we unwrap the proxy when we have a 'this reference'
13554
13555                         if (info.compUnwrapContextful)
13556                         {
13557                             aflags |= CORINFO_ACCESS_UNWRAP;
13558                         }
13559                     }
13560                 }
13561
13562                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13563
13564                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13565                 // handle
13566                 CorInfoType ciType = fieldInfo.fieldType;
13567                 clsHnd             = fieldInfo.structType;
13568
13569                 lclTyp = JITtype2varType(ciType);
13570
13571 #ifdef _TARGET_AMD64
13572                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13573 #endif // _TARGET_AMD64
13574
13575                 if (compIsForInlining())
13576                 {
13577                     switch (fieldInfo.fieldAccessor)
13578                     {
13579                         case CORINFO_FIELD_INSTANCE_HELPER:
13580                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13581                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13582                         case CORINFO_FIELD_STATIC_TLS:
13583
13584                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13585                             return;
13586
13587                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13588                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13589                             /* We may be able to inline the field accessors in specific instantiations of generic
13590                              * methods */
13591                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13592                             return;
13593
13594                         default:
13595                             break;
13596                     }
13597
13598                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13599                         clsHnd)
13600                     {
13601                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13602                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13603                         {
13604                             // Loading a static valuetype field usually will cause a JitHelper to be called
13605                             // for the static base. This will bloat the code.
13606                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13607
13608                             if (compInlineResult->IsFailure())
13609                             {
13610                                 return;
13611                             }
13612                         }
13613                     }
13614                 }
13615
13616                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13617                 if (isLoadAddress)
13618                 {
13619                     tiRetVal.MakeByRef();
13620                 }
13621                 else
13622                 {
13623                     tiRetVal.NormaliseForStack();
13624                 }
13625
13626                 // Perform this check always to ensure that we get field access exceptions even with
13627                 // SkipVerification.
13628                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13629
13630                 if (tiVerificationNeeded)
13631                 {
13632                     // You can also pass the unboxed struct to  LDFLD
13633                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13634                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13635                     {
13636                         bAllowPlainValueTypeAsThis = TRUE;
13637                     }
13638
13639                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13640
13641                     // If we're doing this on a heap object or from a 'safe' byref
13642                     // then the result is a safe byref too
13643                     if (isLoadAddress) // load address
13644                     {
13645                         if (fieldInfo.fieldFlags &
13646                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13647                         {
13648                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13649                             {
13650                                 tiRetVal.SetIsPermanentHomeByRef();
13651                             }
13652                         }
13653                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13654                         {
13655                             // ldflda of byref is safe if done on a gc object or on  a
13656                             // safe byref
13657                             tiRetVal.SetIsPermanentHomeByRef();
13658                         }
13659                     }
13660                 }
13661                 else
13662                 {
13663                     // tiVerificationNeeded is false.
13664                     // Raise InvalidProgramException if static load accesses non-static field
13665                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13666                     {
13667                         BADCODE("static access on an instance field");
13668                     }
13669                 }
13670
13671                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13672                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13673                 {
13674                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13675                     {
13676                         obj = gtUnusedValNode(obj);
13677                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13678                     }
13679                     obj = nullptr;
13680                 }
13681
13682                 /* Preserve 'small' int types */
13683                 if (!varTypeIsSmall(lclTyp))
13684                 {
13685                     lclTyp = genActualType(lclTyp);
13686                 }
13687
13688                 bool usesHelper = false;
13689
13690                 switch (fieldInfo.fieldAccessor)
13691                 {
13692                     case CORINFO_FIELD_INSTANCE:
13693 #ifdef FEATURE_READYTORUN_COMPILER
13694                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13695 #endif
13696                     {
13697                         bool nullcheckNeeded = false;
13698
13699                         obj = impCheckForNullPointer(obj);
13700
13701                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13702                         {
13703                             nullcheckNeeded = true;
13704                         }
13705
13706                         // If the object is a struct, what we really want is
13707                         // for the field to operate on the address of the struct.
13708                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13709                         {
13710                             assert(opcode == CEE_LDFLD && objType != nullptr);
13711
13712                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13713                         }
13714
13715                         /* Create the data member node */
13716                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13717
13718 #ifdef FEATURE_READYTORUN_COMPILER
13719                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13720                         {
13721                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13722                         }
13723 #endif
13724
13725                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13726
13727                         if (fgAddrCouldBeNull(obj))
13728                         {
13729                             op1->gtFlags |= GTF_EXCEPT;
13730                         }
13731
13732                         // If gtFldObj is a BYREF then our target is a value class and
13733                         // it could point anywhere, example a boxed class static int
13734                         if (obj->gtType == TYP_BYREF)
13735                         {
13736                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13737                         }
13738
13739                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13740                         if (StructHasOverlappingFields(typeFlags))
13741                         {
13742                             op1->gtField.gtFldMayOverlap = true;
13743                         }
13744
13745                         // wrap it in a address of operator if necessary
13746                         if (isLoadAddress)
13747                         {
13748                             op1 = gtNewOperNode(GT_ADDR,
13749                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13750                         }
13751                         else
13752                         {
13753                             if (compIsForInlining() &&
13754                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13755                                                                                    impInlineInfo->inlArgInfo))
13756                             {
13757                                 impInlineInfo->thisDereferencedFirst = true;
13758                             }
13759                         }
13760                     }
13761                     break;
13762
13763                     case CORINFO_FIELD_STATIC_TLS:
13764 #ifdef _TARGET_X86_
13765                         // Legacy TLS access is implemented as intrinsic on x86 only
13766
13767                         /* Create the data member node */
13768                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13769                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13770
13771                         if (isLoadAddress)
13772                         {
13773                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13774                         }
13775                         break;
13776 #else
13777                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13778
13779                         __fallthrough;
13780 #endif
13781
13782                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13783                     case CORINFO_FIELD_INSTANCE_HELPER:
13784                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13785                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13786                                                clsHnd, nullptr);
13787                         usesHelper = true;
13788                         break;
13789
13790                     case CORINFO_FIELD_STATIC_ADDRESS:
13791                         // Replace static read-only fields with constant if possible
13792                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13793                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13794                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13795                         {
13796                             CorInfoInitClassResult initClassResult =
13797                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13798                                                             impTokenLookupContextHandle);
13799
13800                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13801                             {
13802                                 void** pFldAddr = nullptr;
13803                                 void*  fldAddr =
13804                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13805
13806                                 // We should always be able to access this static's address directly
13807                                 assert(pFldAddr == nullptr);
13808
13809                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13810                                 goto FIELD_DONE;
13811                             }
13812                         }
13813
13814                         __fallthrough;
13815
13816                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13817                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13818                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13819                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13820                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13821                                                          lclTyp);
13822                         break;
13823
13824                     case CORINFO_FIELD_INTRINSIC_ZERO:
13825                     {
13826                         assert(aflags & CORINFO_ACCESS_GET);
13827                         op1 = gtNewIconNode(0, lclTyp);
13828                         goto FIELD_DONE;
13829                     }
13830                     break;
13831
13832                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13833                     {
13834                         assert(aflags & CORINFO_ACCESS_GET);
13835
13836                         LPVOID         pValue;
13837                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13838                         op1                = gtNewStringLiteralNode(iat, pValue);
13839                         goto FIELD_DONE;
13840                     }
13841                     break;
13842
13843                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13844                     {
13845                         assert(aflags & CORINFO_ACCESS_GET);
13846 #if BIGENDIAN
13847                         op1 = gtNewIconNode(0, lclTyp);
13848 #else
13849                         op1                     = gtNewIconNode(1, lclTyp);
13850 #endif
13851                         goto FIELD_DONE;
13852                     }
13853                     break;
13854
13855                     default:
13856                         assert(!"Unexpected fieldAccessor");
13857                 }
13858
13859                 if (!isLoadAddress)
13860                 {
13861
13862                     if (prefixFlags & PREFIX_VOLATILE)
13863                     {
13864                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13865                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13866
13867                         if (!usesHelper)
13868                         {
13869                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13870                                    (op1->OperGet() == GT_OBJ));
13871                             op1->gtFlags |= GTF_IND_VOLATILE;
13872                         }
13873                     }
13874
13875                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13876                     {
13877                         if (!usesHelper)
13878                         {
13879                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13880                                    (op1->OperGet() == GT_OBJ));
13881                             op1->gtFlags |= GTF_IND_UNALIGNED;
13882                         }
13883                     }
13884                 }
13885
13886                 /* Check if the class needs explicit initialization */
13887
13888                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13889                 {
13890                     GenTree* helperNode = impInitClass(&resolvedToken);
13891                     if (compDonotInline())
13892                     {
13893                         return;
13894                     }
13895                     if (helperNode != nullptr)
13896                     {
13897                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13898                     }
13899                 }
13900
13901             FIELD_DONE:
13902                 impPushOnStack(op1, tiRetVal);
13903             }
13904             break;
13905
13906             case CEE_STFLD:
13907             case CEE_STSFLD:
13908             {
13909
13910                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13911
13912                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13913
13914                 /* Get the CP_Fieldref index */
13915
13916                 assertImp(sz == sizeof(unsigned));
13917
13918                 _impResolveToken(CORINFO_TOKENKIND_Field);
13919
13920                 JITDUMP(" %08X", resolvedToken.token);
13921
13922                 int       aflags = CORINFO_ACCESS_SET;
13923                 GenTree*  obj    = nullptr;
13924                 typeInfo* tiObj  = nullptr;
13925                 typeInfo  tiVal;
13926
13927                 /* Pull the value from the stack */
13928                 StackEntry se = impPopStack();
13929                 op2           = se.val;
13930                 tiVal         = se.seTypeInfo;
13931                 clsHnd        = tiVal.GetClassHandle();
13932
13933                 if (opcode == CEE_STFLD)
13934                 {
13935                     tiObj = &impStackTop().seTypeInfo;
13936                     obj   = impPopStack().val;
13937
13938                     if (impIsThis(obj))
13939                     {
13940                         aflags |= CORINFO_ACCESS_THIS;
13941
13942                         // An optimization for Contextful classes:
13943                         // we unwrap the proxy when we have a 'this reference'
13944
13945                         if (info.compUnwrapContextful)
13946                         {
13947                             aflags |= CORINFO_ACCESS_UNWRAP;
13948                         }
13949                     }
13950                 }
13951
13952                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13953
13954                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13955                 // handle
13956                 CorInfoType ciType = fieldInfo.fieldType;
13957                 fieldClsHnd        = fieldInfo.structType;
13958
13959                 lclTyp = JITtype2varType(ciType);
13960
13961                 if (compIsForInlining())
13962                 {
13963                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13964                      * per-inst static? */
13965
13966                     switch (fieldInfo.fieldAccessor)
13967                     {
13968                         case CORINFO_FIELD_INSTANCE_HELPER:
13969                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13970                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13971                         case CORINFO_FIELD_STATIC_TLS:
13972
13973                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13974                             return;
13975
13976                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13977                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13978                             /* We may be able to inline the field accessors in specific instantiations of generic
13979                              * methods */
13980                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13981                             return;
13982
13983                         default:
13984                             break;
13985                     }
13986                 }
13987
13988                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13989
13990                 if (tiVerificationNeeded)
13991                 {
13992                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13993                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13994                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13995                 }
13996                 else
13997                 {
13998                     // tiVerificationNeed is false.
13999                     // Raise InvalidProgramException if static store accesses non-static field
14000                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14001                     {
14002                         BADCODE("static access on an instance field");
14003                     }
14004                 }
14005
14006                 // We are using stfld on a static field.
14007                 // We allow it, but need to eval any side-effects for obj
14008                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14009                 {
14010                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14011                     {
14012                         obj = gtUnusedValNode(obj);
14013                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14014                     }
14015                     obj = nullptr;
14016                 }
14017
14018                 /* Preserve 'small' int types */
14019                 if (!varTypeIsSmall(lclTyp))
14020                 {
14021                     lclTyp = genActualType(lclTyp);
14022                 }
14023
14024                 switch (fieldInfo.fieldAccessor)
14025                 {
14026                     case CORINFO_FIELD_INSTANCE:
14027 #ifdef FEATURE_READYTORUN_COMPILER
14028                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14029 #endif
14030                     {
14031                         obj = impCheckForNullPointer(obj);
14032
14033                         /* Create the data member node */
14034                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14035                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14036                         if (StructHasOverlappingFields(typeFlags))
14037                         {
14038                             op1->gtField.gtFldMayOverlap = true;
14039                         }
14040
14041 #ifdef FEATURE_READYTORUN_COMPILER
14042                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14043                         {
14044                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14045                         }
14046 #endif
14047
14048                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14049
14050                         if (fgAddrCouldBeNull(obj))
14051                         {
14052                             op1->gtFlags |= GTF_EXCEPT;
14053                         }
14054
14055                         // If gtFldObj is a BYREF then our target is a value class and
14056                         // it could point anywhere, example a boxed class static int
14057                         if (obj->gtType == TYP_BYREF)
14058                         {
14059                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14060                         }
14061
14062                         if (compIsForInlining() &&
14063                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14064                         {
14065                             impInlineInfo->thisDereferencedFirst = true;
14066                         }
14067                     }
14068                     break;
14069
14070                     case CORINFO_FIELD_STATIC_TLS:
14071 #ifdef _TARGET_X86_
14072                         // Legacy TLS access is implemented as intrinsic on x86 only
14073
14074                         /* Create the data member node */
14075                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14076                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14077
14078                         break;
14079 #else
14080                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14081
14082                         __fallthrough;
14083 #endif
14084
14085                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14086                     case CORINFO_FIELD_INSTANCE_HELPER:
14087                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14088                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14089                                                clsHnd, op2);
14090                         goto SPILL_APPEND;
14091
14092                     case CORINFO_FIELD_STATIC_ADDRESS:
14093                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14094                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14095                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14096                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14097                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14098                                                          lclTyp);
14099                         break;
14100
14101                     default:
14102                         assert(!"Unexpected fieldAccessor");
14103                 }
14104
14105                 // Create the member assignment, unless we have a struct.
14106                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14107                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14108
14109                 if (!deferStructAssign)
14110                 {
14111                     if (prefixFlags & PREFIX_VOLATILE)
14112                     {
14113                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14114                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14115                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14116                         op1->gtFlags |= GTF_IND_VOLATILE;
14117                     }
14118                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14119                     {
14120                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14121                         op1->gtFlags |= GTF_IND_UNALIGNED;
14122                     }
14123
14124                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14125                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14126                        importation and reads from the union as if it were a long during code generation. Though this
14127                        can potentially read garbage, one can get lucky to have this working correctly.
14128
14129                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14130                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14131                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14132                        it works correctly always.
14133
14134                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14135                        for V4.0.
14136                     */
14137                     CLANG_FORMAT_COMMENT_ANCHOR;
14138
14139 #ifndef _TARGET_64BIT_
14140                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14141                     // generated for ARM as well as x86, so the following IR will be accepted:
14142                     //     *  STMT      void
14143                     //         |  /--*  CNS_INT   int    2
14144                     //         \--*  ASG       long
14145                     //            \--*  CLS_VAR   long
14146
14147                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14148                         varTypeIsLong(op1->TypeGet()))
14149                     {
14150                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14151                     }
14152 #endif
14153
14154 #ifdef _TARGET_64BIT_
14155                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14156                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14157                     {
14158                         op2->gtType = TYP_I_IMPL;
14159                     }
14160                     else
14161                     {
14162                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14163                         //
14164                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14165                         {
14166                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
14167                         }
14168                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14169                         //
14170                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14171                         {
14172                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
14173                         }
14174                     }
14175 #endif
14176
14177 #if !FEATURE_X87_DOUBLES
14178                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14179                     // We insert a cast to the dest 'op1' type
14180                     //
14181                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14182                         varTypeIsFloating(op2->gtType))
14183                     {
14184                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
14185                     }
14186 #endif // !FEATURE_X87_DOUBLES
14187
14188                     op1 = gtNewAssignNode(op1, op2);
14189
14190                     /* Mark the expression as containing an assignment */
14191
14192                     op1->gtFlags |= GTF_ASG;
14193                 }
14194
14195                 /* Check if the class needs explicit initialization */
14196
14197                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14198                 {
14199                     GenTree* helperNode = impInitClass(&resolvedToken);
14200                     if (compDonotInline())
14201                     {
14202                         return;
14203                     }
14204                     if (helperNode != nullptr)
14205                     {
14206                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14207                     }
14208                 }
14209
14210                 /* stfld can interfere with value classes (consider the sequence
14211                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14212                    spill all value class references from the stack. */
14213
14214                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14215                 {
14216                     assert(tiObj);
14217
14218                     if (impIsValueType(tiObj))
14219                     {
14220                         impSpillEvalStack();
14221                     }
14222                     else
14223                     {
14224                         impSpillValueClasses();
14225                     }
14226                 }
14227
14228                 /* Spill any refs to the same member from the stack */
14229
14230                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14231
14232                 /* stsfld also interferes with indirect accesses (for aliased
14233                    statics) and calls. But don't need to spill other statics
14234                    as we have explicitly spilled this particular static field. */
14235
14236                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14237
14238                 if (deferStructAssign)
14239                 {
14240                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14241                 }
14242             }
14243                 goto APPEND;
14244
14245             case CEE_NEWARR:
14246             {
14247
14248                 /* Get the class type index operand */
14249
14250                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14251
14252                 JITDUMP(" %08X", resolvedToken.token);
14253
14254                 if (!opts.IsReadyToRun())
14255                 {
14256                     // Need to restore array classes before creating array objects on the heap
14257                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14258                     if (op1 == nullptr)
14259                     { // compDonotInline()
14260                         return;
14261                     }
14262                 }
14263
14264                 if (tiVerificationNeeded)
14265                 {
14266                     // As per ECMA 'numElems' specified can be either int32 or native int.
14267                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14268
14269                     CORINFO_CLASS_HANDLE elemTypeHnd;
14270                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14271                     Verify(elemTypeHnd == nullptr ||
14272                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14273                            "array of byref-like type");
14274                 }
14275
14276                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14277
14278                 accessAllowedResult =
14279                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14280                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14281
14282                 /* Form the arglist: array class handle, size */
14283                 op2 = impPopStack().val;
14284                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14285
14286 #ifdef FEATURE_READYTORUN_COMPILER
14287                 if (opts.IsReadyToRun())
14288                 {
14289                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14290                                                     gtNewArgList(op2));
14291                     usingReadyToRunHelper = (op1 != nullptr);
14292
14293                     if (!usingReadyToRunHelper)
14294                     {
14295                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14296                         // and the newarr call with a single call to a dynamic R2R cell that will:
14297                         //      1) Load the context
14298                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14299                         //      3) Allocate the new array
14300                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14301
14302                         // Need to restore array classes before creating array objects on the heap
14303                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14304                         if (op1 == nullptr)
14305                         { // compDonotInline()
14306                             return;
14307                         }
14308                     }
14309                 }
14310
14311                 if (!usingReadyToRunHelper)
14312 #endif
14313                 {
14314                     args = gtNewArgList(op1, op2);
14315
14316                     /* Create a call to 'new' */
14317
14318                     // Note that this only works for shared generic code because the same helper is used for all
14319                     // reference array types
14320                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14321                 }
14322
14323                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14324
14325                 /* Remember that this basic block contains 'new' of an sd array */
14326
14327                 block->bbFlags |= BBF_HAS_NEWARRAY;
14328                 optMethodFlags |= OMF_HAS_NEWARRAY;
14329
14330                 /* Push the result of the call on the stack */
14331
14332                 impPushOnStack(op1, tiRetVal);
14333
14334                 callTyp = TYP_REF;
14335             }
14336             break;
14337
14338             case CEE_LOCALLOC:
14339                 if (tiVerificationNeeded)
14340                 {
14341                     Verify(false, "bad opcode");
14342                 }
14343
14344                 // We don't allow locallocs inside handlers
14345                 if (block->hasHndIndex())
14346                 {
14347                     BADCODE("Localloc can't be inside handler");
14348                 }
14349
14350                 setNeedsGSSecurityCookie();
14351
14352                 // Get the size to allocate
14353
14354                 op2 = impPopStack().val;
14355                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14356
14357                 if (verCurrentState.esStackDepth != 0)
14358                 {
14359                     BADCODE("Localloc can only be used when the stack is empty");
14360                 }
14361
14362                 // If the localloc is not in a loop and its size is a small constant,
14363                 // create a new local var of TYP_BLK and return its address.
14364                 {
14365                     bool convertedToLocal = false;
14366
14367                     // Need to aggressively fold here, as even fixed-size locallocs
14368                     // will have casts in the way.
14369                     op2 = gtFoldExpr(op2);
14370
14371                     if (op2->IsIntegralConst())
14372                     {
14373                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14374
14375                         if (allocSize == 0)
14376                         {
14377                             // Result is nullptr
14378                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14379                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14380                             convertedToLocal = true;
14381                         }
14382                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14383                         {
14384                             // Get the size threshold for local conversion
14385                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14386
14387 #ifdef DEBUG
14388                             // Optionally allow this to be modified
14389                             maxSize = JitConfig.JitStackAllocToLocalSize();
14390 #endif // DEBUG
14391
14392                             if (allocSize <= maxSize)
14393                             {
14394                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14395                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14396                                         stackallocAsLocal);
14397                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14398                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14399                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14400                                 op1                      = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14401                                 op1                      = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14402                                 convertedToLocal         = true;
14403                                 compGSReorderStackLayout = true;
14404                             }
14405                         }
14406                     }
14407
14408                     if (!convertedToLocal)
14409                     {
14410                         // Bail out if inlining and the localloc was not converted.
14411                         //
14412                         // Note we might consider allowing the inline, if the call
14413                         // site is not in a loop.
14414                         if (compIsForInlining())
14415                         {
14416                             InlineObservation obs = op2->IsIntegralConst()
14417                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14418                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14419                             compInlineResult->NoteFatal(obs);
14420                             return;
14421                         }
14422
14423                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14424                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14425                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14426
14427                         /* The FP register may not be back to the original value at the end
14428                            of the method, even if the frame size is 0, as localloc may
14429                            have modified it. So we will HAVE to reset it */
14430                         compLocallocUsed = true;
14431                     }
14432                     else
14433                     {
14434                         compLocallocOptimized = true;
14435                     }
14436                 }
14437
14438                 impPushOnStack(op1, tiRetVal);
14439                 break;
14440
14441             case CEE_ISINST:
14442             {
14443                 /* Get the type token */
14444                 assertImp(sz == sizeof(unsigned));
14445
14446                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14447
14448                 JITDUMP(" %08X", resolvedToken.token);
14449
14450                 if (!opts.IsReadyToRun())
14451                 {
14452                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14453                     if (op2 == nullptr)
14454                     { // compDonotInline()
14455                         return;
14456                     }
14457                 }
14458
14459                 if (tiVerificationNeeded)
14460                 {
14461                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14462                     // Even if this is a value class, we know it is boxed.
14463                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14464                 }
14465                 accessAllowedResult =
14466                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14467                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14468
14469                 op1 = impPopStack().val;
14470
14471                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14472
14473                 if (optTree != nullptr)
14474                 {
14475                     impPushOnStack(optTree, tiRetVal);
14476                 }
14477                 else
14478                 {
14479
14480 #ifdef FEATURE_READYTORUN_COMPILER
14481                     if (opts.IsReadyToRun())
14482                     {
14483                         GenTreeCall* opLookup =
14484                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14485                                                       gtNewArgList(op1));
14486                         usingReadyToRunHelper = (opLookup != nullptr);
14487                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14488
14489                         if (!usingReadyToRunHelper)
14490                         {
14491                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14492                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14493                             //      1) Load the context
14494                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14495                             //      stub
14496                             //      3) Perform the 'is instance' check on the input object
14497                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14498
14499                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14500                             if (op2 == nullptr)
14501                             { // compDonotInline()
14502                                 return;
14503                             }
14504                         }
14505                     }
14506
14507                     if (!usingReadyToRunHelper)
14508 #endif
14509                     {
14510                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14511                     }
14512                     if (compDonotInline())
14513                     {
14514                         return;
14515                     }
14516
14517                     impPushOnStack(op1, tiRetVal);
14518                 }
14519                 break;
14520             }
14521
14522             case CEE_REFANYVAL:
14523
14524                 // get the class handle and make a ICON node out of it
14525
14526                 _impResolveToken(CORINFO_TOKENKIND_Class);
14527
14528                 JITDUMP(" %08X", resolvedToken.token);
14529
14530                 op2 = impTokenToHandle(&resolvedToken);
14531                 if (op2 == nullptr)
14532                 { // compDonotInline()
14533                     return;
14534                 }
14535
14536                 if (tiVerificationNeeded)
14537                 {
14538                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14539                            "need refany");
14540                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14541                 }
14542
14543                 op1 = impPopStack().val;
14544                 // make certain it is normalized;
14545                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14546
14547                 // Call helper GETREFANY(classHandle, op1);
14548                 args = gtNewArgList(op2, op1);
14549                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14550
14551                 impPushOnStack(op1, tiRetVal);
14552                 break;
14553
14554             case CEE_REFANYTYPE:
14555
14556                 if (tiVerificationNeeded)
14557                 {
14558                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14559                            "need refany");
14560                 }
14561
14562                 op1 = impPopStack().val;
14563
14564                 // make certain it is normalized;
14565                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14566
14567                 if (op1->gtOper == GT_OBJ)
14568                 {
14569                     // Get the address of the refany
14570                     op1 = op1->gtOp.gtOp1;
14571
14572                     // Fetch the type from the correct slot
14573                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14574                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14575                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14576                 }
14577                 else
14578                 {
14579                     assertImp(op1->gtOper == GT_MKREFANY);
14580
14581                     // The pointer may have side-effects
14582                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14583                     {
14584                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14585 #ifdef DEBUG
14586                         impNoteLastILoffs();
14587 #endif
14588                     }
14589
14590                     // We already have the class handle
14591                     op1 = op1->gtOp.gtOp2;
14592                 }
14593
14594                 // convert native TypeHandle to RuntimeTypeHandle
14595                 {
14596                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14597
14598                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14599
14600                     // The handle struct is returned in register
14601                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14602
14603                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14604                 }
14605
14606                 impPushOnStack(op1, tiRetVal);
14607                 break;
14608
14609             case CEE_LDTOKEN:
14610             {
14611                 /* Get the Class index */
14612                 assertImp(sz == sizeof(unsigned));
14613                 lastLoadToken = codeAddr;
14614                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14615
14616                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14617
14618                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14619                 if (op1 == nullptr)
14620                 { // compDonotInline()
14621                     return;
14622                 }
14623
14624                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14625                 assert(resolvedToken.hClass != nullptr);
14626
14627                 if (resolvedToken.hMethod != nullptr)
14628                 {
14629                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14630                 }
14631                 else if (resolvedToken.hField != nullptr)
14632                 {
14633                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14634                 }
14635
14636                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14637
14638                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14639
14640                 // The handle struct is returned in register
14641                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14642
14643                 tiRetVal = verMakeTypeInfo(tokenType);
14644                 impPushOnStack(op1, tiRetVal);
14645             }
14646             break;
14647
14648             case CEE_UNBOX:
14649             case CEE_UNBOX_ANY:
14650             {
14651                 /* Get the Class index */
14652                 assertImp(sz == sizeof(unsigned));
14653
14654                 _impResolveToken(CORINFO_TOKENKIND_Class);
14655
14656                 JITDUMP(" %08X", resolvedToken.token);
14657
14658                 BOOL runtimeLookup;
14659                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14660                 if (op2 == nullptr)
14661                 {
14662                     assert(compDonotInline());
14663                     return;
14664                 }
14665
14666                 // Run this always so we can get access exceptions even with SkipVerification.
14667                 accessAllowedResult =
14668                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14669                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14670
14671                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14672                 {
14673                     if (tiVerificationNeeded)
14674                     {
14675                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14676                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14677                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14678                         tiRetVal.NormaliseForStack();
14679                     }
14680                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14681                     op1 = impPopStack().val;
14682                     goto CASTCLASS;
14683                 }
14684
14685                 /* Pop the object and create the unbox helper call */
14686                 /* You might think that for UNBOX_ANY we need to push a different */
14687                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14688                 /* for the intermediate pointer which we then transfer onto the OBJ */
14689                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14690                 if (tiVerificationNeeded)
14691                 {
14692                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14693                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14694
14695                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14696                     Verify(tiRetVal.IsValueClass(), "not value class");
14697                     tiRetVal.MakeByRef();
14698
14699                     // We always come from an objref, so this is safe byref
14700                     tiRetVal.SetIsPermanentHomeByRef();
14701                     tiRetVal.SetIsReadonlyByRef();
14702                 }
14703
14704                 op1 = impPopStack().val;
14705                 assertImp(op1->gtType == TYP_REF);
14706
14707                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14708                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14709
14710                 // Check legality and profitability of inline expansion for unboxing.
14711                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14712                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14713
14714                 if (canExpandInline && shouldExpandInline)
14715                 {
14716                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14717                     // we are doing normal unboxing
14718                     // inline the common case of the unbox helper
14719                     // UNBOX(exp) morphs into
14720                     // clone = pop(exp);
14721                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14722                     // push(clone + TARGET_POINTER_SIZE)
14723                     //
14724                     GenTree* cloneOperand;
14725                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14726                                        nullptr DEBUGARG("inline UNBOX clone1"));
14727                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14728
14729                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14730
14731                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14732                                        nullptr DEBUGARG("inline UNBOX clone2"));
14733                     op2 = impTokenToHandle(&resolvedToken);
14734                     if (op2 == nullptr)
14735                     { // compDonotInline()
14736                         return;
14737                     }
14738                     args = gtNewArgList(op2, op1);
14739                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14740
14741                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14742                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14743                     condBox->gtFlags |= GTF_RELOP_QMARK;
14744
14745                     // QMARK nodes cannot reside on the evaluation stack. Because there
14746                     // may be other trees on the evaluation stack that side-effect the
14747                     // sources of the UNBOX operation we must spill the stack.
14748
14749                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14750
14751                     // Create the address-expression to reference past the object header
14752                     // to the beginning of the value-type. Today this means adjusting
14753                     // past the base of the objects vtable field which is pointer sized.
14754
14755                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14756                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14757                 }
14758                 else
14759                 {
14760                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14761                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14762
14763                     // Don't optimize, just call the helper and be done with it
14764                     args = gtNewArgList(op2, op1);
14765                     op1 =
14766                         gtNewHelperCallNode(helper,
14767                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14768                 }
14769
14770                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14771                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14772                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14773                        );
14774
14775                 /*
14776                   ----------------------------------------------------------------------
14777                   | \ helper  |                         |                              |
14778                   |   \       |                         |                              |
14779                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14780                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14781                   | opcode  \ |                         |                              |
14782                   |---------------------------------------------------------------------
14783                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14784                   |           |                         | push the BYREF to this local |
14785                   |---------------------------------------------------------------------
14786                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14787                   |           | the BYREF               | For Linux when the           |
14788                   |           |                         |  struct is returned in two   |
14789                   |           |                         |  registers create a temp     |
14790                   |           |                         |  which address is passed to  |
14791                   |           |                         |  the unbox_nullable helper.  |
14792                   |---------------------------------------------------------------------
14793                 */
14794
14795                 if (opcode == CEE_UNBOX)
14796                 {
14797                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14798                     {
14799                         // Unbox nullable helper returns a struct type.
14800                         // We need to spill it to a temp so than can take the address of it.
14801                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14802                         // further along and potetially be exploitable.
14803
14804                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14805                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14806
14807                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14808                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14809                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14810
14811                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14812                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14813                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14814                     }
14815
14816                     assert(op1->gtType == TYP_BYREF);
14817                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14818                 }
14819                 else
14820                 {
14821                     assert(opcode == CEE_UNBOX_ANY);
14822
14823                     if (helper == CORINFO_HELP_UNBOX)
14824                     {
14825                         // Normal unbox helper returns a TYP_BYREF.
14826                         impPushOnStack(op1, tiRetVal);
14827                         oper = GT_OBJ;
14828                         goto OBJ;
14829                     }
14830
14831                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14832
14833 #if FEATURE_MULTIREG_RET
14834
14835                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14836                     {
14837                         // Unbox nullable helper returns a TYP_STRUCT.
14838                         // For the multi-reg case we need to spill it to a temp so that
14839                         // we can pass the address to the unbox_nullable jit helper.
14840
14841                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14842                         lvaTable[tmp].lvIsMultiRegArg = true;
14843                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14844
14845                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14846                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14847                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14848
14849                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14850                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14851                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14852
14853                         // In this case the return value of the unbox helper is TYP_BYREF.
14854                         // Make sure the right type is placed on the operand type stack.
14855                         impPushOnStack(op1, tiRetVal);
14856
14857                         // Load the struct.
14858                         oper = GT_OBJ;
14859
14860                         assert(op1->gtType == TYP_BYREF);
14861                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14862
14863                         goto OBJ;
14864                     }
14865                     else
14866
14867 #endif // !FEATURE_MULTIREG_RET
14868
14869                     {
14870                         // If non register passable struct we have it materialized in the RetBuf.
14871                         assert(op1->gtType == TYP_STRUCT);
14872                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14873                         assert(tiRetVal.IsValueClass());
14874                     }
14875                 }
14876
14877                 impPushOnStack(op1, tiRetVal);
14878             }
14879             break;
14880
14881             case CEE_BOX:
14882             {
14883                 /* Get the Class index */
14884                 assertImp(sz == sizeof(unsigned));
14885
14886                 _impResolveToken(CORINFO_TOKENKIND_Box);
14887
14888                 JITDUMP(" %08X", resolvedToken.token);
14889
14890                 if (tiVerificationNeeded)
14891                 {
14892                     typeInfo tiActual = impStackTop().seTypeInfo;
14893                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14894
14895                     Verify(verIsBoxable(tiBox), "boxable type expected");
14896
14897                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14898                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14899                            "boxed type has unsatisfied class constraints");
14900
14901                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14902
14903                     // Observation: the following code introduces a boxed value class on the stack, but,
14904                     // according to the ECMA spec, one would simply expect: tiRetVal =
14905                     // typeInfo(TI_REF,impGetObjectClass());
14906
14907                     // Push the result back on the stack,
14908                     // even if clsHnd is a value class we want the TI_REF
14909                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14910                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14911                 }
14912
14913                 accessAllowedResult =
14914                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14915                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14916
14917                 // Note BOX can be used on things that are not value classes, in which
14918                 // case we get a NOP.  However the verifier's view of the type on the
14919                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14920                 if (!eeIsValueClass(resolvedToken.hClass))
14921                 {
14922                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14923                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14924                     break;
14925                 }
14926
14927                 // Look ahead for unbox.any
14928                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14929                 {
14930                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14931
14932                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14933
14934                     // See if the resolved tokens describe types that are equal.
14935                     const TypeCompareState compare =
14936                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14937
14938                     // If so, box/unbox.any is a nop.
14939                     if (compare == TypeCompareState::Must)
14940                     {
14941                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14942                         // Skip the next unbox.any instruction
14943                         sz += sizeof(mdToken) + 1;
14944                         break;
14945                     }
14946                 }
14947
14948                 impImportAndPushBox(&resolvedToken);
14949                 if (compDonotInline())
14950                 {
14951                     return;
14952                 }
14953             }
14954             break;
14955
14956             case CEE_SIZEOF:
14957
14958                 /* Get the Class index */
14959                 assertImp(sz == sizeof(unsigned));
14960
14961                 _impResolveToken(CORINFO_TOKENKIND_Class);
14962
14963                 JITDUMP(" %08X", resolvedToken.token);
14964
14965                 if (tiVerificationNeeded)
14966                 {
14967                     tiRetVal = typeInfo(TI_INT);
14968                 }
14969
14970                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14971                 impPushOnStack(op1, tiRetVal);
14972                 break;
14973
14974             case CEE_CASTCLASS:
14975
14976                 /* Get the Class index */
14977
14978                 assertImp(sz == sizeof(unsigned));
14979
14980                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14981
14982                 JITDUMP(" %08X", resolvedToken.token);
14983
14984                 if (!opts.IsReadyToRun())
14985                 {
14986                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14987                     if (op2 == nullptr)
14988                     { // compDonotInline()
14989                         return;
14990                     }
14991                 }
14992
14993                 if (tiVerificationNeeded)
14994                 {
14995                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14996                     // box it
14997                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14998                 }
14999
15000                 accessAllowedResult =
15001                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15002                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15003
15004                 op1 = impPopStack().val;
15005
15006             /* Pop the address and create the 'checked cast' helper call */
15007
15008             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15009             // and op2 to contain code that creates the type handle corresponding to typeRef
15010             CASTCLASS:
15011             {
15012                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15013
15014                 if (optTree != nullptr)
15015                 {
15016                     impPushOnStack(optTree, tiRetVal);
15017                 }
15018                 else
15019                 {
15020
15021 #ifdef FEATURE_READYTORUN_COMPILER
15022                     if (opts.IsReadyToRun())
15023                     {
15024                         GenTreeCall* opLookup =
15025                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15026                                                       gtNewArgList(op1));
15027                         usingReadyToRunHelper = (opLookup != nullptr);
15028                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15029
15030                         if (!usingReadyToRunHelper)
15031                         {
15032                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15033                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15034                             //      1) Load the context
15035                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15036                             //      stub
15037                             //      3) Check the object on the stack for the type-cast
15038                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15039
15040                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15041                             if (op2 == nullptr)
15042                             { // compDonotInline()
15043                                 return;
15044                             }
15045                         }
15046                     }
15047
15048                     if (!usingReadyToRunHelper)
15049 #endif
15050                     {
15051                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15052                     }
15053                     if (compDonotInline())
15054                     {
15055                         return;
15056                     }
15057
15058                     /* Push the result back on the stack */
15059                     impPushOnStack(op1, tiRetVal);
15060                 }
15061             }
15062             break;
15063
15064             case CEE_THROW:
15065
15066                 if (compIsForInlining())
15067                 {
15068                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15069                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15070                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15071
15072                     /* Do we have just the exception on the stack ?*/
15073
15074                     if (verCurrentState.esStackDepth != 1)
15075                     {
15076                         /* if not, just don't inline the method */
15077
15078                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15079                         return;
15080                     }
15081                 }
15082
15083                 if (tiVerificationNeeded)
15084                 {
15085                     tiRetVal = impStackTop().seTypeInfo;
15086                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15087                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15088                     {
15089                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15090                     }
15091                 }
15092
15093                 block->bbSetRunRarely(); // any block with a throw is rare
15094                 /* Pop the exception object and create the 'throw' helper call */
15095
15096                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15097
15098             EVAL_APPEND:
15099                 if (verCurrentState.esStackDepth > 0)
15100                 {
15101                     impEvalSideEffects();
15102                 }
15103
15104                 assert(verCurrentState.esStackDepth == 0);
15105
15106                 goto APPEND;
15107
15108             case CEE_RETHROW:
15109
15110                 assert(!compIsForInlining());
15111
15112                 if (info.compXcptnsCount == 0)
15113                 {
15114                     BADCODE("rethrow outside catch");
15115                 }
15116
15117                 if (tiVerificationNeeded)
15118                 {
15119                     Verify(block->hasHndIndex(), "rethrow outside catch");
15120                     if (block->hasHndIndex())
15121                     {
15122                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15123                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15124                         if (HBtab->HasFilter())
15125                         {
15126                             // we better be in the handler clause part, not the filter part
15127                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15128                                    "rethrow in filter");
15129                         }
15130                     }
15131                 }
15132
15133                 /* Create the 'rethrow' helper call */
15134
15135                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15136
15137                 goto EVAL_APPEND;
15138
15139             case CEE_INITOBJ:
15140
15141                 assertImp(sz == sizeof(unsigned));
15142
15143                 _impResolveToken(CORINFO_TOKENKIND_Class);
15144
15145                 JITDUMP(" %08X", resolvedToken.token);
15146
15147                 if (tiVerificationNeeded)
15148                 {
15149                     typeInfo tiTo    = impStackTop().seTypeInfo;
15150                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15151
15152                     Verify(tiTo.IsByRef(), "byref expected");
15153                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15154
15155                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15156                            "type operand incompatible with type of address");
15157                 }
15158
15159                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15160                 op2  = gtNewIconNode(0);                                     // Value
15161                 op1  = impPopStack().val;                                    // Dest
15162                 op1  = gtNewBlockVal(op1, size);
15163                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15164                 goto SPILL_APPEND;
15165
15166             case CEE_INITBLK:
15167
15168                 if (tiVerificationNeeded)
15169                 {
15170                     Verify(false, "bad opcode");
15171                 }
15172
15173                 op3 = impPopStack().val; // Size
15174                 op2 = impPopStack().val; // Value
15175                 op1 = impPopStack().val; // Dest
15176
15177                 if (op3->IsCnsIntOrI())
15178                 {
15179                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15180                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15181                 }
15182                 else
15183                 {
15184                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15185                     size = 0;
15186                 }
15187                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15188
15189                 goto SPILL_APPEND;
15190
15191             case CEE_CPBLK:
15192
15193                 if (tiVerificationNeeded)
15194                 {
15195                     Verify(false, "bad opcode");
15196                 }
15197                 op3 = impPopStack().val; // Size
15198                 op2 = impPopStack().val; // Src
15199                 op1 = impPopStack().val; // Dest
15200
15201                 if (op3->IsCnsIntOrI())
15202                 {
15203                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15204                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15205                 }
15206                 else
15207                 {
15208                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15209                     size = 0;
15210                 }
15211                 if (op2->OperGet() == GT_ADDR)
15212                 {
15213                     op2 = op2->gtOp.gtOp1;
15214                 }
15215                 else
15216                 {
15217                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15218                 }
15219
15220                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15221                 goto SPILL_APPEND;
15222
15223             case CEE_CPOBJ:
15224
15225                 assertImp(sz == sizeof(unsigned));
15226
15227                 _impResolveToken(CORINFO_TOKENKIND_Class);
15228
15229                 JITDUMP(" %08X", resolvedToken.token);
15230
15231                 if (tiVerificationNeeded)
15232                 {
15233                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15234                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15235                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15236
15237                     Verify(tiFrom.IsByRef(), "expected byref source");
15238                     Verify(tiTo.IsByRef(), "expected byref destination");
15239
15240                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15241                            "type of source address incompatible with type operand");
15242                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15243                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15244                            "type operand incompatible with type of destination address");
15245                 }
15246
15247                 if (!eeIsValueClass(resolvedToken.hClass))
15248                 {
15249                     op1 = impPopStack().val; // address to load from
15250
15251                     impBashVarAddrsToI(op1);
15252
15253                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15254
15255                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15256                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15257
15258                     impPushOnStack(op1, typeInfo());
15259                     opcode = CEE_STIND_REF;
15260                     lclTyp = TYP_REF;
15261                     goto STIND_POST_VERIFY;
15262                 }
15263
15264                 op2 = impPopStack().val; // Src
15265                 op1 = impPopStack().val; // Dest
15266                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15267                 goto SPILL_APPEND;
15268
15269             case CEE_STOBJ:
15270             {
15271                 assertImp(sz == sizeof(unsigned));
15272
15273                 _impResolveToken(CORINFO_TOKENKIND_Class);
15274
15275                 JITDUMP(" %08X", resolvedToken.token);
15276
15277                 if (eeIsValueClass(resolvedToken.hClass))
15278                 {
15279                     lclTyp = TYP_STRUCT;
15280                 }
15281                 else
15282                 {
15283                     lclTyp = TYP_REF;
15284                 }
15285
15286                 if (tiVerificationNeeded)
15287                 {
15288
15289                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15290
15291                     // Make sure we have a good looking byref
15292                     Verify(tiPtr.IsByRef(), "pointer not byref");
15293                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15294                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15295                     {
15296                         compUnsafeCastUsed = true;
15297                     }
15298
15299                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15300                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15301
15302                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15303                     {
15304                         Verify(false, "type of value incompatible with type operand");
15305                         compUnsafeCastUsed = true;
15306                     }
15307
15308                     if (!tiCompatibleWith(argVal, ptrVal, false))
15309                     {
15310                         Verify(false, "type operand incompatible with type of address");
15311                         compUnsafeCastUsed = true;
15312                     }
15313                 }
15314                 else
15315                 {
15316                     compUnsafeCastUsed = true;
15317                 }
15318
15319                 if (lclTyp == TYP_REF)
15320                 {
15321                     opcode = CEE_STIND_REF;
15322                     goto STIND_POST_VERIFY;
15323                 }
15324
15325                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15326                 if (impIsPrimitive(jitTyp))
15327                 {
15328                     lclTyp = JITtype2varType(jitTyp);
15329                     goto STIND_POST_VERIFY;
15330                 }
15331
15332                 op2 = impPopStack().val; // Value
15333                 op1 = impPopStack().val; // Ptr
15334
15335                 assertImp(varTypeIsStruct(op2));
15336
15337                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15338
15339                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15340                 {
15341                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15342                 }
15343                 goto SPILL_APPEND;
15344             }
15345
15346             case CEE_MKREFANY:
15347
15348                 assert(!compIsForInlining());
15349
15350                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15351                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15352
15353                 JITDUMP("disabling struct promotion because of mkrefany\n");
15354                 fgNoStructPromotion = true;
15355
15356                 oper = GT_MKREFANY;
15357                 assertImp(sz == sizeof(unsigned));
15358
15359                 _impResolveToken(CORINFO_TOKENKIND_Class);
15360
15361                 JITDUMP(" %08X", resolvedToken.token);
15362
15363                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15364                 if (op2 == nullptr)
15365                 { // compDonotInline()
15366                     return;
15367                 }
15368
15369                 if (tiVerificationNeeded)
15370                 {
15371                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15372                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15373
15374                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15375                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15376                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15377                 }
15378
15379                 accessAllowedResult =
15380                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15381                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15382
15383                 op1 = impPopStack().val;
15384
15385                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15386                 // But JIT32 allowed it, so we continue to allow it.
15387                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15388
15389                 // MKREFANY returns a struct.  op2 is the class token.
15390                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15391
15392                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15393                 break;
15394
15395             case CEE_LDOBJ:
15396             {
15397                 oper = GT_OBJ;
15398                 assertImp(sz == sizeof(unsigned));
15399
15400                 _impResolveToken(CORINFO_TOKENKIND_Class);
15401
15402                 JITDUMP(" %08X", resolvedToken.token);
15403
15404             OBJ:
15405
15406                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15407
15408                 if (tiVerificationNeeded)
15409                 {
15410                     typeInfo tiPtr = impStackTop().seTypeInfo;
15411
15412                     // Make sure we have a byref
15413                     if (!tiPtr.IsByRef())
15414                     {
15415                         Verify(false, "pointer not byref");
15416                         compUnsafeCastUsed = true;
15417                     }
15418                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15419
15420                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15421                     {
15422                         Verify(false, "type of address incompatible with type operand");
15423                         compUnsafeCastUsed = true;
15424                     }
15425                     tiRetVal.NormaliseForStack();
15426                 }
15427                 else
15428                 {
15429                     compUnsafeCastUsed = true;
15430                 }
15431
15432                 if (eeIsValueClass(resolvedToken.hClass))
15433                 {
15434                     lclTyp = TYP_STRUCT;
15435                 }
15436                 else
15437                 {
15438                     lclTyp = TYP_REF;
15439                     opcode = CEE_LDIND_REF;
15440                     goto LDIND_POST_VERIFY;
15441                 }
15442
15443                 op1 = impPopStack().val;
15444
15445                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15446
15447                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15448                 if (impIsPrimitive(jitTyp))
15449                 {
15450                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15451
15452                     // Could point anywhere, example a boxed class static int
15453                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15454                     assertImp(varTypeIsArithmetic(op1->gtType));
15455                 }
15456                 else
15457                 {
15458                     // OBJ returns a struct
15459                     // and an inline argument which is the class token of the loaded obj
15460                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15461                 }
15462                 op1->gtFlags |= GTF_EXCEPT;
15463
15464                 if (prefixFlags & PREFIX_UNALIGNED)
15465                 {
15466                     op1->gtFlags |= GTF_IND_UNALIGNED;
15467                 }
15468
15469                 impPushOnStack(op1, tiRetVal);
15470                 break;
15471             }
15472
15473             case CEE_LDLEN:
15474                 if (tiVerificationNeeded)
15475                 {
15476                     typeInfo tiArray = impStackTop().seTypeInfo;
15477                     Verify(verIsSDArray(tiArray), "bad array");
15478                     tiRetVal = typeInfo(TI_INT);
15479                 }
15480
15481                 op1 = impPopStack().val;
15482                 if (!opts.MinOpts() && !opts.compDbgCode)
15483                 {
15484                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15485                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15486
15487                     /* Mark the block as containing a length expression */
15488
15489                     if (op1->gtOper == GT_LCL_VAR)
15490                     {
15491                         block->bbFlags |= BBF_HAS_IDX_LEN;
15492                     }
15493
15494                     op1 = arrLen;
15495                 }
15496                 else
15497                 {
15498                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15499                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15500                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15501                     op1 = gtNewIndir(TYP_INT, op1);
15502                     op1->gtFlags |= GTF_IND_ARR_LEN;
15503                 }
15504
15505                 /* Push the result back on the stack */
15506                 impPushOnStack(op1, tiRetVal);
15507                 break;
15508
15509             case CEE_BREAK:
15510                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15511                 goto SPILL_APPEND;
15512
15513             case CEE_NOP:
15514                 if (opts.compDbgCode)
15515                 {
15516                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15517                     goto SPILL_APPEND;
15518                 }
15519                 break;
15520
15521             /******************************** NYI *******************************/
15522
15523             case 0xCC:
15524                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15525
15526             case CEE_ILLEGAL:
15527             case CEE_MACRO_END:
15528
15529             default:
15530                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15531         }
15532
15533         codeAddr += sz;
15534         prevOpcode = opcode;
15535
15536         prefixFlags = 0;
15537     }
15538
15539     return;
15540 #undef _impResolveToken
15541 }
15542 #ifdef _PREFAST_
15543 #pragma warning(pop)
15544 #endif
15545
15546 // Push a local/argument treeon the operand stack
15547 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15548 {
15549     tiRetVal.NormaliseForStack();
15550
15551     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15552     {
15553         tiRetVal.SetUninitialisedObjRef();
15554     }
15555
15556     impPushOnStack(op, tiRetVal);
15557 }
15558
15559 // Load a local/argument on the operand stack
15560 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15561 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15562 {
15563     var_types lclTyp;
15564
15565     if (lvaTable[lclNum].lvNormalizeOnLoad())
15566     {
15567         lclTyp = lvaGetRealType(lclNum);
15568     }
15569     else
15570     {
15571         lclTyp = lvaGetActualType(lclNum);
15572     }
15573
15574     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15575 }
15576
15577 // Load an argument on the operand stack
15578 // Shared by the various CEE_LDARG opcodes
15579 // ilArgNum is the argument index as specified in IL.
15580 // It will be mapped to the correct lvaTable index
15581 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15582 {
15583     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15584
15585     if (compIsForInlining())
15586     {
15587         if (ilArgNum >= info.compArgsCount)
15588         {
15589             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15590             return;
15591         }
15592
15593         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15594                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15595     }
15596     else
15597     {
15598         if (ilArgNum >= info.compArgsCount)
15599         {
15600             BADCODE("Bad IL");
15601         }
15602
15603         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15604
15605         if (lclNum == info.compThisArg)
15606         {
15607             lclNum = lvaArg0Var;
15608         }
15609
15610         impLoadVar(lclNum, offset);
15611     }
15612 }
15613
15614 // Load a local on the operand stack
15615 // Shared by the various CEE_LDLOC opcodes
15616 // ilLclNum is the local index as specified in IL.
15617 // It will be mapped to the correct lvaTable index
15618 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15619 {
15620     if (tiVerificationNeeded)
15621     {
15622         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15623         Verify(info.compInitMem, "initLocals not set");
15624     }
15625
15626     if (compIsForInlining())
15627     {
15628         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15629         {
15630             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15631             return;
15632         }
15633
15634         // Get the local type
15635         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15636
15637         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15638
15639         /* Have we allocated a temp for this local? */
15640
15641         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15642
15643         // All vars of inlined methods should be !lvNormalizeOnLoad()
15644
15645         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15646         lclTyp = genActualType(lclTyp);
15647
15648         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15649     }
15650     else
15651     {
15652         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15653         {
15654             BADCODE("Bad IL");
15655         }
15656
15657         unsigned lclNum = info.compArgsCount + ilLclNum;
15658
15659         impLoadVar(lclNum, offset);
15660     }
15661 }
15662
15663 #ifdef _TARGET_ARM_
15664 /**************************************************************************************
15665  *
15666  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15667  *  dst struct, because struct promotion will turn it into a float/double variable while
15668  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15669  *  a float, but there is nothing that might prevent us from doing so. The tree however
15670  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15671  *
15672  *  tmpNum - the lcl dst variable num that is a struct.
15673  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15674  *  hClass - the type handle for the struct variable.
15675  *
15676  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15677  *        however, we could do a codegen of transferring from int to float registers
15678  *        (transfer, not a cast.)
15679  *
15680  */
15681 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15682 {
15683     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15684     {
15685         int       hfaSlots = GetHfaCount(hClass);
15686         var_types hfaType  = GetHfaType(hClass);
15687
15688         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15689         // type: struct/float at importer because the ABI calls out return in integer registers.
15690         // We don't want struct promotion to replace an expression like this:
15691         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15692         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15693         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15694             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15695         {
15696             // Make sure this struct type stays as struct so we can receive the call in a struct.
15697             lvaTable[tmpNum].lvIsMultiRegRet = true;
15698         }
15699     }
15700 }
15701 #endif // _TARGET_ARM_
15702
15703 #if FEATURE_MULTIREG_RET
15704 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15705 {
15706     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15707     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15708     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15709
15710     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15711     ret->gtFlags |= GTF_DONT_CSE;
15712
15713     assert(IsMultiRegReturnedType(hClass));
15714
15715     // Mark the var so that fields are not promoted and stay together.
15716     lvaTable[tmpNum].lvIsMultiRegRet = true;
15717
15718     return ret;
15719 }
15720 #endif // FEATURE_MULTIREG_RET
15721
15722 // do import for a return
15723 // returns false if inlining was aborted
15724 // opcode can be ret or call in the case of a tail.call
15725 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15726 {
15727     if (tiVerificationNeeded)
15728     {
15729         verVerifyThisPtrInitialised();
15730
15731         unsigned expectedStack = 0;
15732         if (info.compRetType != TYP_VOID)
15733         {
15734             typeInfo tiVal = impStackTop().seTypeInfo;
15735             typeInfo tiDeclared =
15736                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15737
15738             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15739
15740             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15741             expectedStack = 1;
15742         }
15743         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15744     }
15745
15746 #ifdef DEBUG
15747     // If we are importing an inlinee and have GC ref locals we always
15748     // need to have a spill temp for the return value.  This temp
15749     // should have been set up in advance, over in fgFindBasicBlocks.
15750     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15751     {
15752         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15753     }
15754 #endif // DEBUG
15755
15756     GenTree*             op2       = nullptr;
15757     GenTree*             op1       = nullptr;
15758     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15759
15760     if (info.compRetType != TYP_VOID)
15761     {
15762         StackEntry se = impPopStack();
15763         retClsHnd     = se.seTypeInfo.GetClassHandle();
15764         op2           = se.val;
15765
15766         if (!compIsForInlining())
15767         {
15768             impBashVarAddrsToI(op2);
15769             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15770             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15771             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15772                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15773                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15774                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15775                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15776
15777 #ifdef DEBUG
15778             if (opts.compGcChecks && info.compRetType == TYP_REF)
15779             {
15780                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15781                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15782                 // one-return BB.
15783
15784                 assert(op2->gtType == TYP_REF);
15785
15786                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15787                 GenTreeArgList* args = gtNewArgList(op2);
15788                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15789
15790                 if (verbose)
15791                 {
15792                     printf("\ncompGcChecks tree:\n");
15793                     gtDispTree(op2);
15794                 }
15795             }
15796 #endif
15797         }
15798         else
15799         {
15800             // inlinee's stack should be empty now.
15801             assert(verCurrentState.esStackDepth == 0);
15802
15803 #ifdef DEBUG
15804             if (verbose)
15805             {
15806                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15807                 gtDispTree(op2);
15808             }
15809 #endif
15810
15811             // Make sure the type matches the original call.
15812
15813             var_types returnType       = genActualType(op2->gtType);
15814             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15815             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15816             {
15817                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15818             }
15819
15820             if (returnType != originalCallType)
15821             {
15822                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15823                 return false;
15824             }
15825
15826             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15827             // expression. At this point, retExpr could already be set if there are multiple
15828             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15829             // the other blocks already set it. If there is only a single return block,
15830             // retExpr shouldn't be set. However, this is not true if we reimport a block
15831             // with a return. In that case, retExpr will be set, then the block will be
15832             // reimported, but retExpr won't get cleared as part of setting the block to
15833             // be reimported. The reimported retExpr value should be the same, so even if
15834             // we don't unconditionally overwrite it, it shouldn't matter.
15835             if (info.compRetNativeType != TYP_STRUCT)
15836             {
15837                 // compRetNativeType is not TYP_STRUCT.
15838                 // This implies it could be either a scalar type or SIMD vector type or
15839                 // a struct type that can be normalized to a scalar type.
15840
15841                 if (varTypeIsStruct(info.compRetType))
15842                 {
15843                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15844                     // adjust the type away from struct to integral
15845                     // and no normalizing
15846                     op2 = impFixupStructReturnType(op2, retClsHnd);
15847                 }
15848                 else
15849                 {
15850                     // Do we have to normalize?
15851                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15852                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15853                         fgCastNeeded(op2, fncRealRetType))
15854                     {
15855                         // Small-typed return values are normalized by the callee
15856                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15857                     }
15858                 }
15859
15860                 if (fgNeedReturnSpillTemp())
15861                 {
15862                     assert(info.compRetNativeType != TYP_VOID &&
15863                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15864
15865                     // If this method returns a ref type, track the actual types seen
15866                     // in the returns.
15867                     if (info.compRetType == TYP_REF)
15868                     {
15869                         bool                 isExact      = false;
15870                         bool                 isNonNull    = false;
15871                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15872
15873                         if (impInlineInfo->retExpr == nullptr)
15874                         {
15875                             // This is the first return, so best known type is the type
15876                             // of this return value.
15877                             impInlineInfo->retExprClassHnd        = returnClsHnd;
15878                             impInlineInfo->retExprClassHndIsExact = isExact;
15879                         }
15880                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15881                         {
15882                             // This return site type differs from earlier seen sites,
15883                             // so reset the info and we'll fall back to using the method's
15884                             // declared return type for the return spill temp.
15885                             impInlineInfo->retExprClassHnd        = nullptr;
15886                             impInlineInfo->retExprClassHndIsExact = false;
15887                         }
15888                     }
15889
15890                     // This is a bit of a workaround...
15891                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15892                     // not a struct (for example, the struct is composed of exactly one int, and the native
15893                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15894                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15895                     // to the *native* return type), and at least one of the return blocks is the result of
15896                     // a call, then we have a problem. The situation is like this (from a failed test case):
15897                     //
15898                     // inliner:
15899                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15900                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15901                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15902                     //
15903                     // inlinee:
15904                     //      ...
15905                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15906                     //      ret
15907                     //      ...
15908                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15909                     //      object&, class System.Func`1<!!0>)
15910                     //      ret
15911                     //
15912                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15913                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15914                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15915                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15916                     //
15917                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15918                     // native return type, which is what it will be set to eventually. We generate the
15919                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15920                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15921
15922                     bool restoreType = false;
15923                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15924                     {
15925                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15926                         op2->gtType = info.compRetNativeType;
15927                         restoreType = true;
15928                     }
15929
15930                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15931                                      (unsigned)CHECK_SPILL_ALL);
15932
15933                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15934
15935                     if (restoreType)
15936                     {
15937                         op2->gtType = TYP_STRUCT; // restore it to what it was
15938                     }
15939
15940                     op2 = tmpOp2;
15941
15942 #ifdef DEBUG
15943                     if (impInlineInfo->retExpr)
15944                     {
15945                         // Some other block(s) have seen the CEE_RET first.
15946                         // Better they spilled to the same temp.
15947                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15948                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15949                     }
15950 #endif
15951                 }
15952
15953 #ifdef DEBUG
15954                 if (verbose)
15955                 {
15956                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15957                     gtDispTree(op2);
15958                 }
15959 #endif
15960
15961                 // Report the return expression
15962                 impInlineInfo->retExpr = op2;
15963             }
15964             else
15965             {
15966                 // compRetNativeType is TYP_STRUCT.
15967                 // This implies that struct return via RetBuf arg or multi-reg struct return
15968
15969                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15970
15971                 // Assign the inlinee return into a spill temp.
15972                 // spill temp only exists if there are multiple return points
15973                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15974                 {
15975                     // in this case we have to insert multiple struct copies to the temp
15976                     // and the retexpr is just the temp.
15977                     assert(info.compRetNativeType != TYP_VOID);
15978                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15979
15980                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15981                                      (unsigned)CHECK_SPILL_ALL);
15982                 }
15983
15984 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15985 #if defined(_TARGET_ARM_)
15986                 // TODO-ARM64-NYI: HFA
15987                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15988                 // next ifdefs could be refactored in a single method with the ifdef inside.
15989                 if (IsHfa(retClsHnd))
15990                 {
15991 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15992 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15993                 ReturnTypeDesc retTypeDesc;
15994                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15995                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15996
15997                 if (retRegCount != 0)
15998                 {
15999                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16000                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16001                     // max allowed.)
16002                     assert(retRegCount == MAX_RET_REG_COUNT);
16003                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16004                     CLANG_FORMAT_COMMENT_ANCHOR;
16005 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16006
16007                     if (fgNeedReturnSpillTemp())
16008                     {
16009                         if (!impInlineInfo->retExpr)
16010                         {
16011 #if defined(_TARGET_ARM_)
16012                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16013 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16014                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16015                             impInlineInfo->retExpr =
16016                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16017 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16018                         }
16019                     }
16020                     else
16021                     {
16022                         impInlineInfo->retExpr = op2;
16023                     }
16024                 }
16025                 else
16026 #elif defined(_TARGET_ARM64_)
16027                 ReturnTypeDesc retTypeDesc;
16028                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16029                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16030
16031                 if (retRegCount != 0)
16032                 {
16033                     assert(!iciCall->HasRetBufArg());
16034                     assert(retRegCount >= 2);
16035                     if (fgNeedReturnSpillTemp())
16036                     {
16037                         if (!impInlineInfo->retExpr)
16038                         {
16039                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16040                             impInlineInfo->retExpr =
16041                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16042                         }
16043                     }
16044                     else
16045                     {
16046                         impInlineInfo->retExpr = op2;
16047                     }
16048                 }
16049                 else
16050 #endif // defined(_TARGET_ARM64_)
16051                 {
16052                     assert(iciCall->HasRetBufArg());
16053                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16054                     // spill temp only exists if there are multiple return points
16055                     if (fgNeedReturnSpillTemp())
16056                     {
16057                         // if this is the first return we have seen set the retExpr
16058                         if (!impInlineInfo->retExpr)
16059                         {
16060                             impInlineInfo->retExpr =
16061                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16062                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16063                         }
16064                     }
16065                     else
16066                     {
16067                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16068                     }
16069                 }
16070             }
16071         }
16072     }
16073
16074     if (compIsForInlining())
16075     {
16076         return true;
16077     }
16078
16079     if (info.compRetType == TYP_VOID)
16080     {
16081         // return void
16082         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16083     }
16084     else if (info.compRetBuffArg != BAD_VAR_NUM)
16085     {
16086         // Assign value to return buff (first param)
16087         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16088
16089         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16090         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16091
16092         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16093         CLANG_FORMAT_COMMENT_ANCHOR;
16094
16095 #if defined(_TARGET_AMD64_)
16096
16097         // x64 (System V and Win64) calling convention requires to
16098         // return the implicit return buffer explicitly (in RAX).
16099         // Change the return type to be BYREF.
16100         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16101 #else  // !defined(_TARGET_AMD64_)
16102         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16103         // In such case the return value of the function is changed to BYREF.
16104         // If profiler hook is not needed the return type of the function is TYP_VOID.
16105         if (compIsProfilerHookNeeded())
16106         {
16107             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16108         }
16109         else
16110         {
16111             // return void
16112             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16113         }
16114 #endif // !defined(_TARGET_AMD64_)
16115     }
16116     else if (varTypeIsStruct(info.compRetType))
16117     {
16118 #if !FEATURE_MULTIREG_RET
16119         // For both ARM architectures the HFA native types are maintained as structs.
16120         // Also on System V AMD64 the multireg structs returns are also left as structs.
16121         noway_assert(info.compRetNativeType != TYP_STRUCT);
16122 #endif
16123         op2 = impFixupStructReturnType(op2, retClsHnd);
16124         // return op2
16125         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16126     }
16127     else
16128     {
16129         // return op2
16130         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16131     }
16132
16133     // We must have imported a tailcall and jumped to RET
16134     if (prefixFlags & PREFIX_TAILCALL)
16135     {
16136 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16137         // Jit64 compat:
16138         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16139         //      tail.call
16140         //      pop
16141         //      ret
16142         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16143 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16144
16145         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16146
16147         // impImportCall() would have already appended TYP_VOID calls
16148         if (info.compRetType == TYP_VOID)
16149         {
16150             return true;
16151         }
16152     }
16153
16154     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16155 #ifdef DEBUG
16156     // Remember at which BC offset the tree was finished
16157     impNoteLastILoffs();
16158 #endif
16159     return true;
16160 }
16161
16162 /*****************************************************************************
16163  *  Mark the block as unimported.
16164  *  Note that the caller is responsible for calling impImportBlockPending(),
16165  *  with the appropriate stack-state
16166  */
16167
16168 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16169 {
16170 #ifdef DEBUG
16171     if (verbose && (block->bbFlags & BBF_IMPORTED))
16172     {
16173         printf("\nBB%02u will be reimported\n", block->bbNum);
16174     }
16175 #endif
16176
16177     block->bbFlags &= ~BBF_IMPORTED;
16178 }
16179
16180 /*****************************************************************************
16181  *  Mark the successors of the given block as unimported.
16182  *  Note that the caller is responsible for calling impImportBlockPending()
16183  *  for all the successors, with the appropriate stack-state.
16184  */
16185
16186 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16187 {
16188     const unsigned numSuccs = block->NumSucc();
16189     for (unsigned i = 0; i < numSuccs; i++)
16190     {
16191         impReimportMarkBlock(block->GetSucc(i));
16192     }
16193 }
16194
16195 /*****************************************************************************
16196  *
16197  *  Filter wrapper to handle only passed in exception code
16198  *  from it).
16199  */
16200
16201 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16202 {
16203     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16204     {
16205         return EXCEPTION_EXECUTE_HANDLER;
16206     }
16207
16208     return EXCEPTION_CONTINUE_SEARCH;
16209 }
16210
16211 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16212 {
16213     assert(block->hasTryIndex());
16214     assert(!compIsForInlining());
16215
16216     unsigned  tryIndex = block->getTryIndex();
16217     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16218
16219     if (isTryStart)
16220     {
16221         assert(block->bbFlags & BBF_TRY_BEG);
16222
16223         // The Stack must be empty
16224         //
16225         if (block->bbStkDepth != 0)
16226         {
16227             BADCODE("Evaluation stack must be empty on entry into a try block");
16228         }
16229     }
16230
16231     // Save the stack contents, we'll need to restore it later
16232     //
16233     SavedStack blockState;
16234     impSaveStackState(&blockState, false);
16235
16236     while (HBtab != nullptr)
16237     {
16238         if (isTryStart)
16239         {
16240             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16241             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16242             //
16243             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16244             {
16245                 // We  trigger an invalid program exception here unless we have a try/fault region.
16246                 //
16247                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16248                 {
16249                     BADCODE(
16250                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16251                 }
16252                 else
16253                 {
16254                     // Allow a try/fault region to proceed.
16255                     assert(HBtab->HasFaultHandler());
16256                 }
16257             }
16258
16259             /* Recursively process the handler block */
16260             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16261
16262             //  Construct the proper verification stack state
16263             //   either empty or one that contains just
16264             //   the Exception Object that we are dealing with
16265             //
16266             verCurrentState.esStackDepth = 0;
16267
16268             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16269             {
16270                 CORINFO_CLASS_HANDLE clsHnd;
16271
16272                 if (HBtab->HasFilter())
16273                 {
16274                     clsHnd = impGetObjectClass();
16275                 }
16276                 else
16277                 {
16278                     CORINFO_RESOLVED_TOKEN resolvedToken;
16279
16280                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16281                     resolvedToken.tokenScope   = info.compScopeHnd;
16282                     resolvedToken.token        = HBtab->ebdTyp;
16283                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16284                     info.compCompHnd->resolveToken(&resolvedToken);
16285
16286                     clsHnd = resolvedToken.hClass;
16287                 }
16288
16289                 // push catch arg the stack, spill to a temp if necessary
16290                 // Note: can update HBtab->ebdHndBeg!
16291                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16292             }
16293
16294             // Queue up the handler for importing
16295             //
16296             impImportBlockPending(hndBegBB);
16297
16298             if (HBtab->HasFilter())
16299             {
16300                 /* @VERIFICATION : Ideally the end of filter state should get
16301                    propagated to the catch handler, this is an incompleteness,
16302                    but is not a security/compliance issue, since the only
16303                    interesting state is the 'thisInit' state.
16304                    */
16305
16306                 verCurrentState.esStackDepth = 0;
16307
16308                 BasicBlock* filterBB = HBtab->ebdFilter;
16309
16310                 // push catch arg the stack, spill to a temp if necessary
16311                 // Note: can update HBtab->ebdFilter!
16312                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16313                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16314
16315                 impImportBlockPending(filterBB);
16316             }
16317         }
16318         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16319         {
16320             /* Recursively process the handler block */
16321
16322             verCurrentState.esStackDepth = 0;
16323
16324             // Queue up the fault handler for importing
16325             //
16326             impImportBlockPending(HBtab->ebdHndBeg);
16327         }
16328
16329         // Now process our enclosing try index (if any)
16330         //
16331         tryIndex = HBtab->ebdEnclosingTryIndex;
16332         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16333         {
16334             HBtab = nullptr;
16335         }
16336         else
16337         {
16338             HBtab = ehGetDsc(tryIndex);
16339         }
16340     }
16341
16342     // Restore the stack contents
16343     impRestoreStackState(&blockState);
16344 }
16345
16346 //***************************************************************
16347 // Import the instructions for the given basic block.  Perform
16348 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16349 // time, or whose verification pre-state is changed.
16350
16351 #ifdef _PREFAST_
16352 #pragma warning(push)
16353 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16354 #endif
16355 void Compiler::impImportBlock(BasicBlock* block)
16356 {
16357     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16358     // handle them specially. In particular, there is no IL to import for them, but we do need
16359     // to mark them as imported and put their successors on the pending import list.
16360     if (block->bbFlags & BBF_INTERNAL)
16361     {
16362         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16363         block->bbFlags |= BBF_IMPORTED;
16364
16365         const unsigned numSuccs = block->NumSucc();
16366         for (unsigned i = 0; i < numSuccs; i++)
16367         {
16368             impImportBlockPending(block->GetSucc(i));
16369         }
16370
16371         return;
16372     }
16373
16374     bool markImport;
16375
16376     assert(block);
16377
16378     /* Make the block globaly available */
16379
16380     compCurBB = block;
16381
16382 #ifdef DEBUG
16383     /* Initialize the debug variables */
16384     impCurOpcName = "unknown";
16385     impCurOpcOffs = block->bbCodeOffs;
16386 #endif
16387
16388     /* Set the current stack state to the merged result */
16389     verResetCurrentState(block, &verCurrentState);
16390
16391     /* Now walk the code and import the IL into GenTrees */
16392
16393     struct FilterVerificationExceptionsParam
16394     {
16395         Compiler*   pThis;
16396         BasicBlock* block;
16397     };
16398     FilterVerificationExceptionsParam param;
16399
16400     param.pThis = this;
16401     param.block = block;
16402
16403     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16404     {
16405         /* @VERIFICATION : For now, the only state propagation from try
16406            to it's handler is "thisInit" state (stack is empty at start of try).
16407            In general, for state that we track in verification, we need to
16408            model the possibility that an exception might happen at any IL
16409            instruction, so we really need to merge all states that obtain
16410            between IL instructions in a try block into the start states of
16411            all handlers.
16412
16413            However we do not allow the 'this' pointer to be uninitialized when
16414            entering most kinds try regions (only try/fault are allowed to have
16415            an uninitialized this pointer on entry to the try)
16416
16417            Fortunately, the stack is thrown away when an exception
16418            leads to a handler, so we don't have to worry about that.
16419            We DO, however, have to worry about the "thisInit" state.
16420            But only for the try/fault case.
16421
16422            The only allowed transition is from TIS_Uninit to TIS_Init.
16423
16424            So for a try/fault region for the fault handler block
16425            we will merge the start state of the try begin
16426            and the post-state of each block that is part of this try region
16427         */
16428
16429         // merge the start state of the try begin
16430         //
16431         if (pParam->block->bbFlags & BBF_TRY_BEG)
16432         {
16433             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16434         }
16435
16436         pParam->pThis->impImportBlockCode(pParam->block);
16437
16438         // As discussed above:
16439         // merge the post-state of each block that is part of this try region
16440         //
16441         if (pParam->block->hasTryIndex())
16442         {
16443             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16444         }
16445     }
16446     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16447     {
16448         verHandleVerificationFailure(block DEBUGARG(false));
16449     }
16450     PAL_ENDTRY
16451
16452     if (compDonotInline())
16453     {
16454         return;
16455     }
16456
16457     assert(!compDonotInline());
16458
16459     markImport = false;
16460
16461 SPILLSTACK:
16462
16463     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
16464     bool        reimportSpillClique = false;
16465     BasicBlock* tgtBlock            = nullptr;
16466
16467     /* If the stack is non-empty, we might have to spill its contents */
16468
16469     if (verCurrentState.esStackDepth != 0)
16470     {
16471         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16472                                   // on the stack, its lifetime is hard to determine, simply
16473                                   // don't reuse such temps.
16474
16475         GenTree* addStmt = nullptr;
16476
16477         /* Do the successors of 'block' have any other predecessors ?
16478            We do not want to do some of the optimizations related to multiRef
16479            if we can reimport blocks */
16480
16481         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16482
16483         switch (block->bbJumpKind)
16484         {
16485             case BBJ_COND:
16486
16487                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16488
16489                 assert(impTreeLast);
16490                 assert(impTreeLast->gtOper == GT_STMT);
16491                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16492
16493                 addStmt     = impTreeLast;
16494                 impTreeLast = impTreeLast->gtPrev;
16495
16496                 /* Note if the next block has more than one ancestor */
16497
16498                 multRef |= block->bbNext->bbRefs;
16499
16500                 /* Does the next block have temps assigned? */
16501
16502                 baseTmp  = block->bbNext->bbStkTempsIn;
16503                 tgtBlock = block->bbNext;
16504
16505                 if (baseTmp != NO_BASE_TMP)
16506                 {
16507                     break;
16508                 }
16509
16510                 /* Try the target of the jump then */
16511
16512                 multRef |= block->bbJumpDest->bbRefs;
16513                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16514                 tgtBlock = block->bbJumpDest;
16515                 break;
16516
16517             case BBJ_ALWAYS:
16518                 multRef |= block->bbJumpDest->bbRefs;
16519                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16520                 tgtBlock = block->bbJumpDest;
16521                 break;
16522
16523             case BBJ_NONE:
16524                 multRef |= block->bbNext->bbRefs;
16525                 baseTmp  = block->bbNext->bbStkTempsIn;
16526                 tgtBlock = block->bbNext;
16527                 break;
16528
16529             case BBJ_SWITCH:
16530
16531                 BasicBlock** jmpTab;
16532                 unsigned     jmpCnt;
16533
16534                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16535
16536                 assert(impTreeLast);
16537                 assert(impTreeLast->gtOper == GT_STMT);
16538                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16539
16540                 addStmt     = impTreeLast;
16541                 impTreeLast = impTreeLast->gtPrev;
16542
16543                 jmpCnt = block->bbJumpSwt->bbsCount;
16544                 jmpTab = block->bbJumpSwt->bbsDstTab;
16545
16546                 do
16547                 {
16548                     tgtBlock = (*jmpTab);
16549
16550                     multRef |= tgtBlock->bbRefs;
16551
16552                     // Thanks to spill cliques, we should have assigned all or none
16553                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16554                     baseTmp = tgtBlock->bbStkTempsIn;
16555                     if (multRef > 1)
16556                     {
16557                         break;
16558                     }
16559                 } while (++jmpTab, --jmpCnt);
16560
16561                 break;
16562
16563             case BBJ_CALLFINALLY:
16564             case BBJ_EHCATCHRET:
16565             case BBJ_RETURN:
16566             case BBJ_EHFINALLYRET:
16567             case BBJ_EHFILTERRET:
16568             case BBJ_THROW:
16569                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16570                 break;
16571
16572             default:
16573                 noway_assert(!"Unexpected bbJumpKind");
16574                 break;
16575         }
16576
16577         assert(multRef >= 1);
16578
16579         /* Do we have a base temp number? */
16580
16581         bool newTemps = (baseTmp == NO_BASE_TMP);
16582
16583         if (newTemps)
16584         {
16585             /* Grab enough temps for the whole stack */
16586             baseTmp = impGetSpillTmpBase(block);
16587         }
16588
16589         /* Spill all stack entries into temps */
16590         unsigned level, tempNum;
16591
16592         JITDUMP("\nSpilling stack entries into temps\n");
16593         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16594         {
16595             GenTree* tree = verCurrentState.esStack[level].val;
16596
16597             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16598                the other. This should merge to a byref in unverifiable code.
16599                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16600                successor would be imported assuming there was a TYP_I_IMPL on
16601                the stack. Thus the value would not get GC-tracked. Hence,
16602                change the temp to TYP_BYREF and reimport the successors.
16603                Note: We should only allow this in unverifiable code.
16604             */
16605             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16606             {
16607                 lvaTable[tempNum].lvType = TYP_BYREF;
16608                 impReimportMarkSuccessors(block);
16609                 markImport = true;
16610             }
16611
16612 #ifdef _TARGET_64BIT_
16613             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16614             {
16615                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16616                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16617                 {
16618                     // Merge the current state into the entry state of block;
16619                     // the call to verMergeEntryStates must have changed
16620                     // the entry state of the block by merging the int local var
16621                     // and the native-int stack entry.
16622                     bool changed = false;
16623                     if (verMergeEntryStates(tgtBlock, &changed))
16624                     {
16625                         impRetypeEntryStateTemps(tgtBlock);
16626                         impReimportBlockPending(tgtBlock);
16627                         assert(changed);
16628                     }
16629                     else
16630                     {
16631                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16632                         break;
16633                     }
16634                 }
16635
16636                 // Some other block in the spill clique set this to "int", but now we have "native int".
16637                 // Change the type and go back to re-import any blocks that used the wrong type.
16638                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16639                 reimportSpillClique      = true;
16640             }
16641             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16642             {
16643                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16644                 // Insert a sign-extension to "native int" so we match the clique.
16645                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16646             }
16647
16648             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16649             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16650             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16651             // behavior instead of asserting and then generating bad code (where we save/restore the
16652             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16653             // imported already, we need to change the type of the local and reimport the spill clique.
16654             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16655             // the 'byref' size.
16656             if (!tiVerificationNeeded)
16657             {
16658                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16659                 {
16660                     // Some other block in the spill clique set this to "int", but now we have "byref".
16661                     // Change the type and go back to re-import any blocks that used the wrong type.
16662                     lvaTable[tempNum].lvType = TYP_BYREF;
16663                     reimportSpillClique      = true;
16664                 }
16665                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16666                 {
16667                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16668                     // Insert a sign-extension to "native int" so we match the clique size.
16669                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16670                 }
16671             }
16672 #endif // _TARGET_64BIT_
16673
16674 #if FEATURE_X87_DOUBLES
16675             // X87 stack doesn't differentiate between float/double
16676             // so promoting is no big deal.
16677             // For everybody else keep it as float until we have a collision and then promote
16678             // Just like for x64's TYP_INT<->TYP_I_IMPL
16679
16680             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16681             {
16682                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16683             }
16684
16685 #else // !FEATURE_X87_DOUBLES
16686
16687             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16688             {
16689                 // Some other block in the spill clique set this to "float", but now we have "double".
16690                 // Change the type and go back to re-import any blocks that used the wrong type.
16691                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16692                 reimportSpillClique      = true;
16693             }
16694             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16695             {
16696                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16697                 // Insert a cast to "double" so we match the clique.
16698                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16699             }
16700
16701 #endif // FEATURE_X87_DOUBLES
16702
16703             /* If addStmt has a reference to tempNum (can only happen if we
16704                are spilling to the temps already used by a previous block),
16705                we need to spill addStmt */
16706
16707             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16708             {
16709                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16710
16711                 if (addTree->gtOper == GT_JTRUE)
16712                 {
16713                     GenTree* relOp = addTree->gtOp.gtOp1;
16714                     assert(relOp->OperIsCompare());
16715
16716                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16717
16718                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16719                     {
16720                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16721                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16722                         type              = genActualType(lvaTable[temp].TypeGet());
16723                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16724                     }
16725
16726                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16727                     {
16728                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16729                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16730                         type              = genActualType(lvaTable[temp].TypeGet());
16731                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16732                     }
16733                 }
16734                 else
16735                 {
16736                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16737
16738                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16739                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16740                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16741                 }
16742             }
16743
16744             /* Spill the stack entry, and replace with the temp */
16745
16746             if (!impSpillStackEntry(level, tempNum
16747 #ifdef DEBUG
16748                                     ,
16749                                     true, "Spill Stack Entry"
16750 #endif
16751                                     ))
16752             {
16753                 if (markImport)
16754                 {
16755                     BADCODE("bad stack state");
16756                 }
16757
16758                 // Oops. Something went wrong when spilling. Bad code.
16759                 verHandleVerificationFailure(block DEBUGARG(true));
16760
16761                 goto SPILLSTACK;
16762             }
16763         }
16764
16765         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16766
16767         if (addStmt)
16768         {
16769             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16770         }
16771     }
16772
16773     // Some of the append/spill logic works on compCurBB
16774
16775     assert(compCurBB == block);
16776
16777     /* Save the tree list in the block */
16778     impEndTreeList(block);
16779
16780     // impEndTreeList sets BBF_IMPORTED on the block
16781     // We do *NOT* want to set it later than this because
16782     // impReimportSpillClique might clear it if this block is both a
16783     // predecessor and successor in the current spill clique
16784     assert(block->bbFlags & BBF_IMPORTED);
16785
16786     // If we had a int/native int, or float/double collision, we need to re-import
16787     if (reimportSpillClique)
16788     {
16789         // This will re-import all the successors of block (as well as each of their predecessors)
16790         impReimportSpillClique(block);
16791
16792         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16793         const unsigned numSuccs = block->NumSucc();
16794         for (unsigned i = 0; i < numSuccs; i++)
16795         {
16796             BasicBlock* succ = block->GetSucc(i);
16797             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16798             {
16799                 impImportBlockPending(succ);
16800             }
16801         }
16802     }
16803     else // the normal case
16804     {
16805         // otherwise just import the successors of block
16806
16807         /* Does this block jump to any other blocks? */
16808         const unsigned numSuccs = block->NumSucc();
16809         for (unsigned i = 0; i < numSuccs; i++)
16810         {
16811             impImportBlockPending(block->GetSucc(i));
16812         }
16813     }
16814 }
16815 #ifdef _PREFAST_
16816 #pragma warning(pop)
16817 #endif
16818
16819 /*****************************************************************************/
16820 //
16821 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16822 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16823 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16824 // (its "pre-state").
16825
16826 void Compiler::impImportBlockPending(BasicBlock* block)
16827 {
16828 #ifdef DEBUG
16829     if (verbose)
16830     {
16831         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16832     }
16833 #endif
16834
16835     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16836     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16837     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16838
16839     // If the block has not been imported, add to pending set.
16840     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16841
16842     // Initialize bbEntryState just the first time we try to add this block to the pending list
16843     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16844     // We use NULL to indicate the 'common' state to avoid memory allocation
16845     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16846         (impGetPendingBlockMember(block) == 0))
16847     {
16848         verInitBBEntryState(block, &verCurrentState);
16849         assert(block->bbStkDepth == 0);
16850         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16851         assert(addToPending);
16852         assert(impGetPendingBlockMember(block) == 0);
16853     }
16854     else
16855     {
16856         // The stack should have the same height on entry to the block from all its predecessors.
16857         if (block->bbStkDepth != verCurrentState.esStackDepth)
16858         {
16859 #ifdef DEBUG
16860             char buffer[400];
16861             sprintf_s(buffer, sizeof(buffer),
16862                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16863                       "Previous depth was %d, current depth is %d",
16864                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16865                       verCurrentState.esStackDepth);
16866             buffer[400 - 1] = 0;
16867             NO_WAY(buffer);
16868 #else
16869             NO_WAY("Block entered with different stack depths");
16870 #endif
16871         }
16872
16873         // Additionally, if we need to verify, merge the verification state.
16874         if (tiVerificationNeeded)
16875         {
16876             // Merge the current state into the entry state of block; if this does not change the entry state
16877             // by merging, do not add the block to the pending-list.
16878             bool changed = false;
16879             if (!verMergeEntryStates(block, &changed))
16880             {
16881                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16882                 addToPending = true; // We will pop it off, and check the flag set above.
16883             }
16884             else if (changed)
16885             {
16886                 addToPending = true;
16887
16888                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16889             }
16890         }
16891
16892         if (!addToPending)
16893         {
16894             return;
16895         }
16896
16897         if (block->bbStkDepth > 0)
16898         {
16899             // We need to fix the types of any spill temps that might have changed:
16900             //   int->native int, float->double, int->byref, etc.
16901             impRetypeEntryStateTemps(block);
16902         }
16903
16904         // OK, we must add to the pending list, if it's not already in it.
16905         if (impGetPendingBlockMember(block) != 0)
16906         {
16907             return;
16908         }
16909     }
16910
16911     // Get an entry to add to the pending list
16912
16913     PendingDsc* dsc;
16914
16915     if (impPendingFree)
16916     {
16917         // We can reuse one of the freed up dscs.
16918         dsc            = impPendingFree;
16919         impPendingFree = dsc->pdNext;
16920     }
16921     else
16922     {
16923         // We have to create a new dsc
16924         dsc = new (this, CMK_Unknown) PendingDsc;
16925     }
16926
16927     dsc->pdBB                 = block;
16928     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16929     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16930
16931     // Save the stack trees for later
16932
16933     if (verCurrentState.esStackDepth)
16934     {
16935         impSaveStackState(&dsc->pdSavedStack, false);
16936     }
16937
16938     // Add the entry to the pending list
16939
16940     dsc->pdNext    = impPendingList;
16941     impPendingList = dsc;
16942     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16943
16944     // Various assertions require us to now to consider the block as not imported (at least for
16945     // the final time...)
16946     block->bbFlags &= ~BBF_IMPORTED;
16947
16948 #ifdef DEBUG
16949     if (verbose && 0)
16950     {
16951         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16952     }
16953 #endif
16954 }
16955
16956 /*****************************************************************************/
16957 //
16958 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16959 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16960 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16961
16962 void Compiler::impReimportBlockPending(BasicBlock* block)
16963 {
16964     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16965
16966     assert(block->bbFlags & BBF_IMPORTED);
16967
16968     // OK, we must add to the pending list, if it's not already in it.
16969     if (impGetPendingBlockMember(block) != 0)
16970     {
16971         return;
16972     }
16973
16974     // Get an entry to add to the pending list
16975
16976     PendingDsc* dsc;
16977
16978     if (impPendingFree)
16979     {
16980         // We can reuse one of the freed up dscs.
16981         dsc            = impPendingFree;
16982         impPendingFree = dsc->pdNext;
16983     }
16984     else
16985     {
16986         // We have to create a new dsc
16987         dsc = new (this, CMK_ImpStack) PendingDsc;
16988     }
16989
16990     dsc->pdBB = block;
16991
16992     if (block->bbEntryState)
16993     {
16994         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16995         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16996         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16997     }
16998     else
16999     {
17000         dsc->pdThisPtrInit        = TIS_Bottom;
17001         dsc->pdSavedStack.ssDepth = 0;
17002         dsc->pdSavedStack.ssTrees = nullptr;
17003     }
17004
17005     // Add the entry to the pending list
17006
17007     dsc->pdNext    = impPendingList;
17008     impPendingList = dsc;
17009     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17010
17011     // Various assertions require us to now to consider the block as not imported (at least for
17012     // the final time...)
17013     block->bbFlags &= ~BBF_IMPORTED;
17014
17015 #ifdef DEBUG
17016     if (verbose && 0)
17017     {
17018         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17019     }
17020 #endif
17021 }
17022
17023 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17024 {
17025     if (comp->impBlockListNodeFreeList == nullptr)
17026     {
17027         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17028     }
17029     else
17030     {
17031         BlockListNode* res             = comp->impBlockListNodeFreeList;
17032         comp->impBlockListNodeFreeList = res->m_next;
17033         return res;
17034     }
17035 }
17036
17037 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17038 {
17039     node->m_next             = impBlockListNodeFreeList;
17040     impBlockListNodeFreeList = node;
17041 }
17042
17043 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17044 {
17045     bool toDo = true;
17046
17047     noway_assert(!fgComputePredsDone);
17048     if (!fgCheapPredsValid)
17049     {
17050         fgComputeCheapPreds();
17051     }
17052
17053     BlockListNode* succCliqueToDo = nullptr;
17054     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17055     while (toDo)
17056     {
17057         toDo = false;
17058         // Look at the successors of every member of the predecessor to-do list.
17059         while (predCliqueToDo != nullptr)
17060         {
17061             BlockListNode* node = predCliqueToDo;
17062             predCliqueToDo      = node->m_next;
17063             BasicBlock* blk     = node->m_blk;
17064             FreeBlockListNode(node);
17065
17066             const unsigned numSuccs = blk->NumSucc();
17067             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17068             {
17069                 BasicBlock* succ = blk->GetSucc(succNum);
17070                 // If it's not already in the clique, add it, and also add it
17071                 // as a member of the successor "toDo" set.
17072                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17073                 {
17074                     callback->Visit(SpillCliqueSucc, succ);
17075                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17076                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17077                     toDo           = true;
17078                 }
17079             }
17080         }
17081         // Look at the predecessors of every member of the successor to-do list.
17082         while (succCliqueToDo != nullptr)
17083         {
17084             BlockListNode* node = succCliqueToDo;
17085             succCliqueToDo      = node->m_next;
17086             BasicBlock* blk     = node->m_blk;
17087             FreeBlockListNode(node);
17088
17089             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17090             {
17091                 BasicBlock* predBlock = pred->block;
17092                 // If it's not already in the clique, add it, and also add it
17093                 // as a member of the predecessor "toDo" set.
17094                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17095                 {
17096                     callback->Visit(SpillCliquePred, predBlock);
17097                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17098                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17099                     toDo           = true;
17100                 }
17101             }
17102         }
17103     }
17104
17105     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17106     // miss walking back to include the predecessor we started from.
17107     // This most likely cause: missing or out of date bbPreds
17108     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17109 }
17110
17111 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17112 {
17113     if (predOrSucc == SpillCliqueSucc)
17114     {
17115         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17116         blk->bbStkTempsIn = m_baseTmp;
17117     }
17118     else
17119     {
17120         assert(predOrSucc == SpillCliquePred);
17121         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17122         blk->bbStkTempsOut = m_baseTmp;
17123     }
17124 }
17125
17126 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17127 {
17128     // For Preds we could be a little smarter and just find the existing store
17129     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17130     // just re-import the whole block (just like we do for successors)
17131
17132     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17133     {
17134         // If we haven't imported this block and we're not going to (because it isn't on
17135         // the pending list) then just ignore it for now.
17136
17137         // This block has either never been imported (EntryState == NULL) or it failed
17138         // verification. Neither state requires us to force it to be imported now.
17139         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17140         return;
17141     }
17142
17143     // For successors we have a valid verCurrentState, so just mark them for reimport
17144     // the 'normal' way
17145     // Unlike predecessors, we *DO* need to reimport the current block because the
17146     // initial import had the wrong entry state types.
17147     // Similarly, blocks that are currently on the pending list, still need to call
17148     // impImportBlockPending to fixup their entry state.
17149     if (predOrSucc == SpillCliqueSucc)
17150     {
17151         m_pComp->impReimportMarkBlock(blk);
17152
17153         // Set the current stack state to that of the blk->bbEntryState
17154         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17155         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17156
17157         m_pComp->impImportBlockPending(blk);
17158     }
17159     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17160     {
17161         // As described above, we are only visiting predecessors so they can
17162         // add the appropriate casts, since we have already done that for the current
17163         // block, it does not need to be reimported.
17164         // Nor do we need to reimport blocks that are still pending, but not yet
17165         // imported.
17166         //
17167         // For predecessors, we have no state to seed the EntryState, so we just have
17168         // to assume the existing one is correct.
17169         // If the block is also a successor, it will get the EntryState properly
17170         // updated when it is visited as a successor in the above "if" block.
17171         assert(predOrSucc == SpillCliquePred);
17172         m_pComp->impReimportBlockPending(blk);
17173     }
17174 }
17175
17176 // Re-type the incoming lclVar nodes to match the varDsc.
17177 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17178 {
17179     if (blk->bbEntryState != nullptr)
17180     {
17181         EntryState* es = blk->bbEntryState;
17182         for (unsigned level = 0; level < es->esStackDepth; level++)
17183         {
17184             GenTree* tree = es->esStack[level].val;
17185             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17186             {
17187                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17188                 noway_assert(lclNum < lvaCount);
17189                 LclVarDsc* varDsc              = lvaTable + lclNum;
17190                 es->esStack[level].val->gtType = varDsc->TypeGet();
17191             }
17192         }
17193     }
17194 }
17195
17196 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17197 {
17198     if (block->bbStkTempsOut != NO_BASE_TMP)
17199     {
17200         return block->bbStkTempsOut;
17201     }
17202
17203 #ifdef DEBUG
17204     if (verbose)
17205     {
17206         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17207     }
17208 #endif // DEBUG
17209
17210     // Otherwise, choose one, and propagate to all members of the spill clique.
17211     // Grab enough temps for the whole stack.
17212     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17213     SetSpillTempsBase callback(baseTmp);
17214
17215     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17216     // to one spill clique, and similarly can only be the sucessor to one spill clique
17217     impWalkSpillCliqueFromPred(block, &callback);
17218
17219     return baseTmp;
17220 }
17221
17222 void Compiler::impReimportSpillClique(BasicBlock* block)
17223 {
17224 #ifdef DEBUG
17225     if (verbose)
17226     {
17227         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17228     }
17229 #endif // DEBUG
17230
17231     // If we get here, it is because this block is already part of a spill clique
17232     // and one predecessor had an outgoing live stack slot of type int, and this
17233     // block has an outgoing live stack slot of type native int.
17234     // We need to reset these before traversal because they have already been set
17235     // by the previous walk to determine all the members of the spill clique.
17236     impInlineRoot()->impSpillCliquePredMembers.Reset();
17237     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17238
17239     ReimportSpillClique callback(this);
17240
17241     impWalkSpillCliqueFromPred(block, &callback);
17242 }
17243
17244 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17245 // a copy of "srcState", cloning tree pointers as required.
17246 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17247 {
17248     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17249     {
17250         block->bbEntryState = nullptr;
17251         return;
17252     }
17253
17254     block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17255
17256     // block->bbEntryState.esRefcount = 1;
17257
17258     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17259     block->bbEntryState->thisInitialized = TIS_Bottom;
17260
17261     if (srcState->esStackDepth > 0)
17262     {
17263         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17264         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17265
17266         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17267         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17268         {
17269             GenTree* tree                           = srcState->esStack[level].val;
17270             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17271         }
17272     }
17273
17274     if (verTrackObjCtorInitState)
17275     {
17276         verSetThisInit(block, srcState->thisInitialized);
17277     }
17278
17279     return;
17280 }
17281
17282 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17283 {
17284     assert(tis != TIS_Bottom); // Precondition.
17285     if (block->bbEntryState == nullptr)
17286     {
17287         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17288     }
17289
17290     block->bbEntryState->thisInitialized = tis;
17291 }
17292
17293 /*
17294  * Resets the current state to the state at the start of the basic block
17295  */
17296 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17297 {
17298
17299     if (block->bbEntryState == nullptr)
17300     {
17301         destState->esStackDepth    = 0;
17302         destState->thisInitialized = TIS_Bottom;
17303         return;
17304     }
17305
17306     destState->esStackDepth = block->bbEntryState->esStackDepth;
17307
17308     if (destState->esStackDepth > 0)
17309     {
17310         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17311
17312         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17313     }
17314
17315     destState->thisInitialized = block->bbThisOnEntry();
17316
17317     return;
17318 }
17319
17320 ThisInitState BasicBlock::bbThisOnEntry()
17321 {
17322     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17323 }
17324
17325 unsigned BasicBlock::bbStackDepthOnEntry()
17326 {
17327     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17328 }
17329
17330 void BasicBlock::bbSetStack(void* stackBuffer)
17331 {
17332     assert(bbEntryState);
17333     assert(stackBuffer);
17334     bbEntryState->esStack = (StackEntry*)stackBuffer;
17335 }
17336
17337 StackEntry* BasicBlock::bbStackOnEntry()
17338 {
17339     assert(bbEntryState);
17340     return bbEntryState->esStack;
17341 }
17342
17343 void Compiler::verInitCurrentState()
17344 {
17345     verTrackObjCtorInitState        = FALSE;
17346     verCurrentState.thisInitialized = TIS_Bottom;
17347
17348     if (tiVerificationNeeded)
17349     {
17350         // Track this ptr initialization
17351         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17352         {
17353             verTrackObjCtorInitState        = TRUE;
17354             verCurrentState.thisInitialized = TIS_Uninit;
17355         }
17356     }
17357
17358     // initialize stack info
17359
17360     verCurrentState.esStackDepth = 0;
17361     assert(verCurrentState.esStack != nullptr);
17362
17363     // copy current state to entry state of first BB
17364     verInitBBEntryState(fgFirstBB, &verCurrentState);
17365 }
17366
17367 Compiler* Compiler::impInlineRoot()
17368 {
17369     if (impInlineInfo == nullptr)
17370     {
17371         return this;
17372     }
17373     else
17374     {
17375         return impInlineInfo->InlineRoot;
17376     }
17377 }
17378
17379 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17380 {
17381     if (predOrSucc == SpillCliquePred)
17382     {
17383         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17384     }
17385     else
17386     {
17387         assert(predOrSucc == SpillCliqueSucc);
17388         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17389     }
17390 }
17391
17392 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17393 {
17394     if (predOrSucc == SpillCliquePred)
17395     {
17396         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17397     }
17398     else
17399     {
17400         assert(predOrSucc == SpillCliqueSucc);
17401         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17402     }
17403 }
17404
17405 /*****************************************************************************
17406  *
17407  *  Convert the instrs ("import") into our internal format (trees). The
17408  *  basic flowgraph has already been constructed and is passed in.
17409  */
17410
17411 void Compiler::impImport(BasicBlock* method)
17412 {
17413 #ifdef DEBUG
17414     if (verbose)
17415     {
17416         printf("*************** In impImport() for %s\n", info.compFullName);
17417     }
17418 #endif
17419
17420     /* Allocate the stack contents */
17421
17422     if (info.compMaxStack <= _countof(impSmallStack))
17423     {
17424         /* Use local variable, don't waste time allocating on the heap */
17425
17426         impStkSize              = _countof(impSmallStack);
17427         verCurrentState.esStack = impSmallStack;
17428     }
17429     else
17430     {
17431         impStkSize              = info.compMaxStack;
17432         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17433     }
17434
17435     // initialize the entry state at start of method
17436     verInitCurrentState();
17437
17438     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17439     Compiler* inlineRoot = impInlineRoot();
17440     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17441     {
17442         // We have initialized these previously, but to size 0.  Make them larger.
17443         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17444         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17445         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17446     }
17447     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17448     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17449     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17450     impBlockListNodeFreeList = nullptr;
17451
17452 #ifdef DEBUG
17453     impLastILoffsStmt   = nullptr;
17454     impNestedStackSpill = false;
17455 #endif
17456     impBoxTemp = BAD_VAR_NUM;
17457
17458     impPendingList = impPendingFree = nullptr;
17459
17460     /* Add the entry-point to the worker-list */
17461
17462     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17463     // from EH normalization.
17464     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17465     // out.
17466     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17467     {
17468         // Treat these as imported.
17469         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17470         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17471         method->bbFlags |= BBF_IMPORTED;
17472     }
17473
17474     impImportBlockPending(method);
17475
17476     /* Import blocks in the worker-list until there are no more */
17477
17478     while (impPendingList)
17479     {
17480         /* Remove the entry at the front of the list */
17481
17482         PendingDsc* dsc = impPendingList;
17483         impPendingList  = impPendingList->pdNext;
17484         impSetPendingBlockMember(dsc->pdBB, 0);
17485
17486         /* Restore the stack state */
17487
17488         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17489         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17490         if (verCurrentState.esStackDepth)
17491         {
17492             impRestoreStackState(&dsc->pdSavedStack);
17493         }
17494
17495         /* Add the entry to the free list for reuse */
17496
17497         dsc->pdNext    = impPendingFree;
17498         impPendingFree = dsc;
17499
17500         /* Now import the block */
17501
17502         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17503         {
17504
17505 #ifdef _TARGET_64BIT_
17506             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17507             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17508             // method for further explanation on why we raise this exception instead of making the jitted
17509             // code throw the verification exception during execution.
17510             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17511             {
17512                 BADCODE("Basic block marked as not verifiable");
17513             }
17514             else
17515 #endif // _TARGET_64BIT_
17516             {
17517                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17518                 impEndTreeList(dsc->pdBB);
17519             }
17520         }
17521         else
17522         {
17523             impImportBlock(dsc->pdBB);
17524
17525             if (compDonotInline())
17526             {
17527                 return;
17528             }
17529             if (compIsForImportOnly() && !tiVerificationNeeded)
17530             {
17531                 return;
17532             }
17533         }
17534     }
17535
17536 #ifdef DEBUG
17537     if (verbose && info.compXcptnsCount)
17538     {
17539         printf("\nAfter impImport() added block for try,catch,finally");
17540         fgDispBasicBlocks();
17541         printf("\n");
17542     }
17543
17544     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17545     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17546     {
17547         block->bbFlags &= ~BBF_VISITED;
17548     }
17549 #endif
17550
17551     assert(!compIsForInlining() || !tiVerificationNeeded);
17552 }
17553
17554 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17555 // The invariant here is that if it's not a ref or a method and has a class handle
17556 // it's a valuetype
17557 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17558 {
17559     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17560     {
17561         return true;
17562     }
17563     else
17564     {
17565         return false;
17566     }
17567 }
17568
17569 /*****************************************************************************
17570  *  Check to see if the tree is the address of a local or
17571     the address of a field in a local.
17572
17573     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17574
17575  */
17576
17577 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17578 {
17579     if (tree->gtOper != GT_ADDR)
17580     {
17581         return FALSE;
17582     }
17583
17584     GenTree* op = tree->gtOp.gtOp1;
17585     while (op->gtOper == GT_FIELD)
17586     {
17587         op = op->gtField.gtFldObj;
17588         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17589         {
17590             op = op->gtOp.gtOp1;
17591         }
17592         else
17593         {
17594             return false;
17595         }
17596     }
17597
17598     if (op->gtOper == GT_LCL_VAR)
17599     {
17600         *lclVarTreeOut = op;
17601         return TRUE;
17602     }
17603     else
17604     {
17605         return FALSE;
17606     }
17607 }
17608
17609 //------------------------------------------------------------------------
17610 // impMakeDiscretionaryInlineObservations: make observations that help
17611 // determine the profitability of a discretionary inline
17612 //
17613 // Arguments:
17614 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17615 //    inlineResult -- InlineResult accumulating information about this inline
17616 //
17617 // Notes:
17618 //    If inlining or prejitting the root, this method also makes
17619 //    various observations about the method that factor into inline
17620 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17621
17622 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17623 {
17624     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17625            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17626            );
17627
17628     // If we're really inlining, we should just have one result in play.
17629     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17630
17631     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17632     // to the trouble of estimating the native code size. Even if it did, it
17633     // shouldn't be relying on the result of this method.
17634     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17635
17636     // Note if the caller contains NEWOBJ or NEWARR.
17637     Compiler* rootCompiler = impInlineRoot();
17638
17639     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17640     {
17641         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17642     }
17643
17644     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17645     {
17646         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17647     }
17648
17649     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17650     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17651
17652     if (isSpecialMethod)
17653     {
17654         if (calleeIsStatic)
17655         {
17656             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17657         }
17658         else
17659         {
17660             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17661         }
17662     }
17663     else if (!calleeIsStatic)
17664     {
17665         // Callee is an instance method.
17666         //
17667         // Check if the callee has the same 'this' as the root.
17668         if (pInlineInfo != nullptr)
17669         {
17670             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17671             assert(thisArg);
17672             bool isSameThis = impIsThis(thisArg);
17673             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17674         }
17675     }
17676
17677     // Note if the callee's class is a promotable struct
17678     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17679     {
17680         lvaStructPromotionInfo structPromotionInfo;
17681         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17682         if (structPromotionInfo.canPromote)
17683         {
17684             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17685         }
17686     }
17687
17688 #ifdef FEATURE_SIMD
17689
17690     // Note if this method is has SIMD args or return value
17691     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17692     {
17693         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17694     }
17695
17696 #endif // FEATURE_SIMD
17697
17698     // Roughly classify callsite frequency.
17699     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17700
17701     // If this is a prejit root, or a maximally hot block...
17702     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17703     {
17704         frequency = InlineCallsiteFrequency::HOT;
17705     }
17706     // No training data.  Look for loop-like things.
17707     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17708     // However, give it to things nearby.
17709     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17710              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17711     {
17712         frequency = InlineCallsiteFrequency::LOOP;
17713     }
17714     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17715     {
17716         frequency = InlineCallsiteFrequency::WARM;
17717     }
17718     // Now modify the multiplier based on where we're called from.
17719     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17720     {
17721         frequency = InlineCallsiteFrequency::RARE;
17722     }
17723     else
17724     {
17725         frequency = InlineCallsiteFrequency::BORING;
17726     }
17727
17728     // Also capture the block weight of the call site.  In the prejit
17729     // root case, assume there's some hot call site for this method.
17730     unsigned weight = 0;
17731
17732     if (pInlineInfo != nullptr)
17733     {
17734         weight = pInlineInfo->iciBlock->bbWeight;
17735     }
17736     else
17737     {
17738         weight = BB_MAX_WEIGHT;
17739     }
17740
17741     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17742     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17743 }
17744
17745 /*****************************************************************************
17746  This method makes STATIC inlining decision based on the IL code.
17747  It should not make any inlining decision based on the context.
17748  If forceInline is true, then the inlining decision should not depend on
17749  performance heuristics (code size, etc.).
17750  */
17751
17752 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17753                               CORINFO_METHOD_INFO*  methInfo,
17754                               bool                  forceInline,
17755                               InlineResult*         inlineResult)
17756 {
17757     unsigned codeSize = methInfo->ILCodeSize;
17758
17759     // We shouldn't have made up our minds yet...
17760     assert(!inlineResult->IsDecided());
17761
17762     if (methInfo->EHcount)
17763     {
17764         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17765         return;
17766     }
17767
17768     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17769     {
17770         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17771         return;
17772     }
17773
17774     // For now we don't inline varargs (import code can't handle it)
17775
17776     if (methInfo->args.isVarArg())
17777     {
17778         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17779         return;
17780     }
17781
17782     // Reject if it has too many locals.
17783     // This is currently an implementation limit due to fixed-size arrays in the
17784     // inline info, rather than a performance heuristic.
17785
17786     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17787
17788     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17789     {
17790         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17791         return;
17792     }
17793
17794     // Make sure there aren't too many arguments.
17795     // This is currently an implementation limit due to fixed-size arrays in the
17796     // inline info, rather than a performance heuristic.
17797
17798     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17799
17800     if (methInfo->args.numArgs > MAX_INL_ARGS)
17801     {
17802         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17803         return;
17804     }
17805
17806     // Note force inline state
17807
17808     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17809
17810     // Note IL code size
17811
17812     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17813
17814     if (inlineResult->IsFailure())
17815     {
17816         return;
17817     }
17818
17819     // Make sure maxstack is not too big
17820
17821     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17822
17823     if (inlineResult->IsFailure())
17824     {
17825         return;
17826     }
17827 }
17828
17829 /*****************************************************************************
17830  */
17831
17832 void Compiler::impCheckCanInline(GenTree*               call,
17833                                  CORINFO_METHOD_HANDLE  fncHandle,
17834                                  unsigned               methAttr,
17835                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17836                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17837                                  InlineResult*          inlineResult)
17838 {
17839     // Either EE or JIT might throw exceptions below.
17840     // If that happens, just don't inline the method.
17841
17842     struct Param
17843     {
17844         Compiler*              pThis;
17845         GenTree*               call;
17846         CORINFO_METHOD_HANDLE  fncHandle;
17847         unsigned               methAttr;
17848         CORINFO_CONTEXT_HANDLE exactContextHnd;
17849         InlineResult*          result;
17850         InlineCandidateInfo**  ppInlineCandidateInfo;
17851     } param;
17852     memset(&param, 0, sizeof(param));
17853
17854     param.pThis                 = this;
17855     param.call                  = call;
17856     param.fncHandle             = fncHandle;
17857     param.methAttr              = methAttr;
17858     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17859     param.result                = inlineResult;
17860     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17861
17862     bool success = eeRunWithErrorTrap<Param>(
17863         [](Param* pParam) {
17864             DWORD                  dwRestrictions = 0;
17865             CorInfoInitClassResult initClassResult;
17866
17867 #ifdef DEBUG
17868             const char* methodName;
17869             const char* className;
17870             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17871
17872             if (JitConfig.JitNoInline())
17873             {
17874                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17875                 goto _exit;
17876             }
17877 #endif
17878
17879             /* Try to get the code address/size for the method */
17880
17881             CORINFO_METHOD_INFO methInfo;
17882             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17883             {
17884                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17885                 goto _exit;
17886             }
17887
17888             bool forceInline;
17889             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17890
17891             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17892
17893             if (pParam->result->IsFailure())
17894             {
17895                 assert(pParam->result->IsNever());
17896                 goto _exit;
17897             }
17898
17899             // Speculatively check if initClass() can be done.
17900             // If it can be done, we will try to inline the method. If inlining
17901             // succeeds, then we will do the non-speculative initClass() and commit it.
17902             // If this speculative call to initClass() fails, there is no point
17903             // trying to inline this method.
17904             initClassResult =
17905                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17906                                                            pParam->exactContextHnd /* context */,
17907                                                            TRUE /* speculative */);
17908
17909             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17910             {
17911                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17912                 goto _exit;
17913             }
17914
17915             // Given the EE the final say in whether to inline or not.
17916             // This should be last since for verifiable code, this can be expensive
17917
17918             /* VM Inline check also ensures that the method is verifiable if needed */
17919             CorInfoInline vmResult;
17920             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17921                                                                   &dwRestrictions);
17922
17923             if (vmResult == INLINE_FAIL)
17924             {
17925                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17926             }
17927             else if (vmResult == INLINE_NEVER)
17928             {
17929                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17930             }
17931
17932             if (pParam->result->IsFailure())
17933             {
17934                 // Make sure not to report this one.  It was already reported by the VM.
17935                 pParam->result->SetReported();
17936                 goto _exit;
17937             }
17938
17939             // check for unsupported inlining restrictions
17940             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17941
17942             if (dwRestrictions & INLINE_SAME_THIS)
17943             {
17944                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17945                 assert(thisArg);
17946
17947                 if (!pParam->pThis->impIsThis(thisArg))
17948                 {
17949                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17950                     goto _exit;
17951                 }
17952             }
17953
17954             /* Get the method properties */
17955
17956             CORINFO_CLASS_HANDLE clsHandle;
17957             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17958             unsigned clsAttr;
17959             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17960
17961             /* Get the return type */
17962
17963             var_types fncRetType;
17964             fncRetType = pParam->call->TypeGet();
17965
17966 #ifdef DEBUG
17967             var_types fncRealRetType;
17968             fncRealRetType = JITtype2varType(methInfo.args.retType);
17969
17970             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17971                    // <BUGNUM> VSW 288602 </BUGNUM>
17972                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17973                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17974                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17975 #endif
17976
17977             //
17978             // Allocate an InlineCandidateInfo structure
17979             //
17980             InlineCandidateInfo* pInfo;
17981             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17982
17983             pInfo->dwRestrictions  = dwRestrictions;
17984             pInfo->methInfo        = methInfo;
17985             pInfo->methAttr        = pParam->methAttr;
17986             pInfo->clsHandle       = clsHandle;
17987             pInfo->clsAttr         = clsAttr;
17988             pInfo->fncRetType      = fncRetType;
17989             pInfo->exactContextHnd = pParam->exactContextHnd;
17990             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17991             pInfo->initClassResult = initClassResult;
17992
17993             *(pParam->ppInlineCandidateInfo) = pInfo;
17994
17995         _exit:;
17996         },
17997         &param);
17998     if (!success)
17999     {
18000         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18001     }
18002 }
18003
18004 //------------------------------------------------------------------------
18005 // impInlineRecordArgInfo: record information about an inline candidate argument
18006 //
18007 // Arguments:
18008 //   pInlineInfo - inline info for the inline candidate
18009 //   curArgVal - tree for the caller actual argument value
18010 //   argNum - logical index of this argument
18011 //   inlineResult - result of ongoing inline evaluation
18012 //
18013 // Notes:
18014 //
18015 //   Checks for various inline blocking conditions and makes notes in
18016 //   the inline info arg table about the properties of the actual. These
18017 //   properties are used later by impFetchArg to determine how best to
18018 //   pass the argument into the inlinee.
18019
18020 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18021                                       GenTree*      curArgVal,
18022                                       unsigned      argNum,
18023                                       InlineResult* inlineResult)
18024 {
18025     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18026
18027     if (curArgVal->gtOper == GT_MKREFANY)
18028     {
18029         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18030         return;
18031     }
18032
18033     inlCurArgInfo->argNode = curArgVal;
18034
18035     GenTree* lclVarTree;
18036     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18037     {
18038         inlCurArgInfo->argIsByRefToStructLocal = true;
18039 #ifdef FEATURE_SIMD
18040         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18041         {
18042             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18043         }
18044 #endif // FEATURE_SIMD
18045     }
18046
18047     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18048     {
18049         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18050         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18051     }
18052
18053     if (curArgVal->gtOper == GT_LCL_VAR)
18054     {
18055         inlCurArgInfo->argIsLclVar = true;
18056
18057         /* Remember the "original" argument number */
18058         curArgVal->gtLclVar.gtLclILoffs = argNum;
18059     }
18060
18061     if ((curArgVal->OperKind() & GTK_CONST) ||
18062         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18063     {
18064         inlCurArgInfo->argIsInvariant = true;
18065         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18066         {
18067             // Abort inlining at this call site
18068             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18069             return;
18070         }
18071     }
18072
18073     // If the arg is a local that is address-taken, we can't safely
18074     // directly substitute it into the inlinee.
18075     //
18076     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18077     // that has a stronger meaning: that the arg value can change in
18078     // the method body. Using that flag prevents type propagation,
18079     // which is safe in this case.
18080     //
18081     // Instead mark the arg as having a caller local ref.
18082     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18083     {
18084         inlCurArgInfo->argHasCallerLocalRef = true;
18085     }
18086
18087 #ifdef DEBUG
18088     if (verbose)
18089     {
18090         if (inlCurArgInfo->argIsThis)
18091         {
18092             printf("thisArg:");
18093         }
18094         else
18095         {
18096             printf("\nArgument #%u:", argNum);
18097         }
18098         if (inlCurArgInfo->argIsLclVar)
18099         {
18100             printf(" is a local var");
18101         }
18102         if (inlCurArgInfo->argIsInvariant)
18103         {
18104             printf(" is a constant");
18105         }
18106         if (inlCurArgInfo->argHasGlobRef)
18107         {
18108             printf(" has global refs");
18109         }
18110         if (inlCurArgInfo->argHasCallerLocalRef)
18111         {
18112             printf(" has caller local ref");
18113         }
18114         if (inlCurArgInfo->argHasSideEff)
18115         {
18116             printf(" has side effects");
18117         }
18118         if (inlCurArgInfo->argHasLdargaOp)
18119         {
18120             printf(" has ldarga effect");
18121         }
18122         if (inlCurArgInfo->argHasStargOp)
18123         {
18124             printf(" has starg effect");
18125         }
18126         if (inlCurArgInfo->argIsByRefToStructLocal)
18127         {
18128             printf(" is byref to a struct local");
18129         }
18130
18131         printf("\n");
18132         gtDispTree(curArgVal);
18133         printf("\n");
18134     }
18135 #endif
18136 }
18137
18138 //------------------------------------------------------------------------
18139 // impInlineInitVars: setup inline information for inlinee args and locals
18140 //
18141 // Arguments:
18142 //    pInlineInfo - inline info for the inline candidate
18143 //
18144 // Notes:
18145 //    This method primarily adds caller-supplied info to the inlArgInfo
18146 //    and sets up the lclVarInfo table.
18147 //
18148 //    For args, the inlArgInfo records properties of the actual argument
18149 //    including the tree node that produces the arg value. This node is
18150 //    usually the tree node present at the call, but may also differ in
18151 //    various ways:
18152 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18153 //      expr chain for the actual node. Note this will either be the original
18154 //      call (which will be a failed inline by this point), or the return
18155 //      expression from some set of inlines.
18156 //    - when argument type casting is needed the necessary casts are added
18157 //      around the argument node.
18158 //    - if an argment can be simplified by folding then the node here is the
18159 //      folded value.
18160 //
18161 //   The method may make observations that lead to marking this candidate as
18162 //   a failed inline. If this happens the initialization is abandoned immediately
18163 //   to try and reduce the jit time cost for a failed inline.
18164
18165 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18166 {
18167     assert(!compIsForInlining());
18168
18169     GenTree*             call         = pInlineInfo->iciCall;
18170     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18171     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18172     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18173     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18174     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18175
18176     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18177
18178     /* init the argument stuct */
18179
18180     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18181
18182     /* Get hold of the 'this' pointer and the argument list proper */
18183
18184     GenTree* thisArg = call->gtCall.gtCallObjp;
18185     GenTree* argList = call->gtCall.gtCallArgs;
18186     unsigned argCnt  = 0; // Count of the arguments
18187
18188     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18189
18190     if (thisArg)
18191     {
18192         inlArgInfo[0].argIsThis = true;
18193         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18194         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18195
18196         if (inlineResult->IsFailure())
18197         {
18198             return;
18199         }
18200
18201         /* Increment the argument count */
18202         argCnt++;
18203     }
18204
18205     /* Record some information about each of the arguments */
18206     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18207
18208 #if USER_ARGS_COME_LAST
18209     unsigned typeCtxtArg = thisArg ? 1 : 0;
18210 #else  // USER_ARGS_COME_LAST
18211     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18212 #endif // USER_ARGS_COME_LAST
18213
18214     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18215     {
18216         if (argTmp == argList && hasRetBuffArg)
18217         {
18218             continue;
18219         }
18220
18221         // Ignore the type context argument
18222         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18223         {
18224             pInlineInfo->typeContextArg = typeCtxtArg;
18225             typeCtxtArg                 = 0xFFFFFFFF;
18226             continue;
18227         }
18228
18229         assert(argTmp->gtOper == GT_LIST);
18230         GenTree* arg       = argTmp->gtOp.gtOp1;
18231         GenTree* actualArg = arg->gtRetExprVal();
18232         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18233
18234         if (inlineResult->IsFailure())
18235         {
18236             return;
18237         }
18238
18239         /* Increment the argument count */
18240         argCnt++;
18241     }
18242
18243     /* Make sure we got the arg number right */
18244     assert(argCnt == methInfo->args.totalILArgs());
18245
18246 #ifdef FEATURE_SIMD
18247     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18248 #endif // FEATURE_SIMD
18249
18250     /* We have typeless opcodes, get type information from the signature */
18251
18252     if (thisArg)
18253     {
18254         var_types sigType;
18255
18256         if (clsAttr & CORINFO_FLG_VALUECLASS)
18257         {
18258             sigType = TYP_BYREF;
18259         }
18260         else
18261         {
18262             sigType = TYP_REF;
18263         }
18264
18265         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18266         lclVarInfo[0].lclHasLdlocaOp = false;
18267
18268 #ifdef FEATURE_SIMD
18269         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18270         // the inlining multiplier) for anything in that assembly.
18271         // But we only need to normalize it if it is a TYP_STRUCT
18272         // (which we need to do even if we have already set foundSIMDType).
18273         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18274         {
18275             if (sigType == TYP_STRUCT)
18276             {
18277                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18278             }
18279             foundSIMDType = true;
18280         }
18281 #endif // FEATURE_SIMD
18282         lclVarInfo[0].lclTypeInfo = sigType;
18283
18284         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18285                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18286                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18287
18288         if (genActualType(thisArg->gtType) != genActualType(sigType))
18289         {
18290             if (sigType == TYP_REF)
18291             {
18292                 /* The argument cannot be bashed into a ref (see bug 750871) */
18293                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18294                 return;
18295             }
18296
18297             /* This can only happen with byrefs <-> ints/shorts */
18298
18299             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18300             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18301
18302             if (sigType == TYP_BYREF)
18303             {
18304                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18305             }
18306             else if (thisArg->gtType == TYP_BYREF)
18307             {
18308                 assert(sigType == TYP_I_IMPL);
18309
18310                 /* If possible change the BYREF to an int */
18311                 if (thisArg->IsVarAddr())
18312                 {
18313                     thisArg->gtType              = TYP_I_IMPL;
18314                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18315                 }
18316                 else
18317                 {
18318                     /* Arguments 'int <- byref' cannot be bashed */
18319                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18320                     return;
18321                 }
18322             }
18323         }
18324     }
18325
18326     /* Init the types of the arguments and make sure the types
18327      * from the trees match the types in the signature */
18328
18329     CORINFO_ARG_LIST_HANDLE argLst;
18330     argLst = methInfo->args.args;
18331
18332     unsigned i;
18333     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18334     {
18335         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18336
18337         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18338
18339 #ifdef FEATURE_SIMD
18340         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18341         {
18342             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18343             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18344             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18345             foundSIMDType = true;
18346             if (sigType == TYP_STRUCT)
18347             {
18348                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18349                 sigType              = structType;
18350             }
18351         }
18352 #endif // FEATURE_SIMD
18353
18354         lclVarInfo[i].lclTypeInfo    = sigType;
18355         lclVarInfo[i].lclHasLdlocaOp = false;
18356
18357         /* Does the tree type match the signature type? */
18358
18359         GenTree* inlArgNode = inlArgInfo[i].argNode;
18360
18361         if (sigType != inlArgNode->gtType)
18362         {
18363             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18364                but in bad IL cases with caller-callee signature mismatches we can see other types.
18365                Intentionally reject cases with mismatches so the jit is more flexible when
18366                encountering bad IL. */
18367
18368             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18369                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18370                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18371
18372             if (!isPlausibleTypeMatch)
18373             {
18374                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18375                 return;
18376             }
18377
18378             /* Is it a narrowing or widening cast?
18379              * Widening casts are ok since the value computed is already
18380              * normalized to an int (on the IL stack) */
18381
18382             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18383             {
18384                 if (sigType == TYP_BYREF)
18385                 {
18386                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18387                 }
18388                 else if (inlArgNode->gtType == TYP_BYREF)
18389                 {
18390                     assert(varTypeIsIntOrI(sigType));
18391
18392                     /* If possible bash the BYREF to an int */
18393                     if (inlArgNode->IsVarAddr())
18394                     {
18395                         inlArgNode->gtType           = TYP_I_IMPL;
18396                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18397                     }
18398                     else
18399                     {
18400                         /* Arguments 'int <- byref' cannot be changed */
18401                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18402                         return;
18403                     }
18404                 }
18405                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18406                 {
18407                     /* Narrowing cast */
18408
18409                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18410                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18411                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18412                     {
18413                         /* We don't need to insert a cast here as the variable
18414                            was assigned a normalized value of the right type */
18415
18416                         continue;
18417                     }
18418
18419                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
18420
18421                     inlArgInfo[i].argIsLclVar = false;
18422
18423                     /* Try to fold the node in case we have constant arguments */
18424
18425                     if (inlArgInfo[i].argIsInvariant)
18426                     {
18427                         inlArgNode            = gtFoldExprConst(inlArgNode);
18428                         inlArgInfo[i].argNode = inlArgNode;
18429                         assert(inlArgNode->OperIsConst());
18430                     }
18431                 }
18432 #ifdef _TARGET_64BIT_
18433                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18434                 {
18435                     // This should only happen for int -> native int widening
18436                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
18437
18438                     inlArgInfo[i].argIsLclVar = false;
18439
18440                     /* Try to fold the node in case we have constant arguments */
18441
18442                     if (inlArgInfo[i].argIsInvariant)
18443                     {
18444                         inlArgNode            = gtFoldExprConst(inlArgNode);
18445                         inlArgInfo[i].argNode = inlArgNode;
18446                         assert(inlArgNode->OperIsConst());
18447                     }
18448                 }
18449 #endif // _TARGET_64BIT_
18450             }
18451         }
18452     }
18453
18454     /* Init the types of the local variables */
18455
18456     CORINFO_ARG_LIST_HANDLE localsSig;
18457     localsSig = methInfo->locals.args;
18458
18459     for (i = 0; i < methInfo->locals.numArgs; i++)
18460     {
18461         bool      isPinned;
18462         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18463
18464         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18465         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
18466         lclVarInfo[i + argCnt].lclTypeInfo    = type;
18467
18468         if (varTypeIsGC(type))
18469         {
18470             pInlineInfo->numberOfGcRefLocals++;
18471         }
18472
18473         if (isPinned)
18474         {
18475             // Pinned locals may cause inlines to fail.
18476             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18477             if (inlineResult->IsFailure())
18478             {
18479                 return;
18480             }
18481         }
18482
18483         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18484
18485         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18486         // out on the inline.
18487         if (type == TYP_STRUCT)
18488         {
18489             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18490             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18491             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18492             {
18493                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18494                 if (inlineResult->IsFailure())
18495                 {
18496                     return;
18497                 }
18498
18499                 // Do further notification in the case where the call site is rare; some policies do
18500                 // not track the relative hotness of call sites for "always" inline cases.
18501                 if (pInlineInfo->iciBlock->isRunRarely())
18502                 {
18503                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18504                     if (inlineResult->IsFailure())
18505                     {
18506
18507                         return;
18508                     }
18509                 }
18510             }
18511         }
18512
18513         localsSig = info.compCompHnd->getArgNext(localsSig);
18514
18515 #ifdef FEATURE_SIMD
18516         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18517         {
18518             foundSIMDType = true;
18519             if (featureSIMD && type == TYP_STRUCT)
18520             {
18521                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18522                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18523             }
18524         }
18525 #endif // FEATURE_SIMD
18526     }
18527
18528 #ifdef FEATURE_SIMD
18529     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
18530     {
18531         foundSIMDType = true;
18532     }
18533     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18534 #endif // FEATURE_SIMD
18535 }
18536
18537 //------------------------------------------------------------------------
18538 // impInlineFetchLocal: get a local var that represents an inlinee local
18539 //
18540 // Arguments:
18541 //    lclNum -- number of the inlinee local
18542 //    reason -- debug string describing purpose of the local var
18543 //
18544 // Returns:
18545 //    Number of the local to use
18546 //
18547 // Notes:
18548 //    This method is invoked only for locals actually used in the
18549 //    inlinee body.
18550 //
18551 //    Allocates a new temp if necessary, and copies key properties
18552 //    over from the inlinee local var info.
18553
18554 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18555 {
18556     assert(compIsForInlining());
18557
18558     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18559
18560     if (tmpNum == BAD_VAR_NUM)
18561     {
18562         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18563         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18564
18565         // The lifetime of this local might span multiple BBs.
18566         // So it is a long lifetime local.
18567         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18568
18569         // Copy over key info
18570         lvaTable[tmpNum].lvType                 = lclTyp;
18571         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18572         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18573         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18574         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18575
18576         // Copy over class handle for ref types. Note this may be a
18577         // shared type -- someday perhaps we can get the exact
18578         // signature and pass in a more precise type.
18579         if (lclTyp == TYP_REF)
18580         {
18581             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18582         }
18583
18584         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18585         {
18586             if (varTypeIsStruct(lclTyp))
18587             {
18588                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18589             }
18590             else
18591             {
18592                 // This is a wrapped primitive.  Make sure the verstate knows that
18593                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18594             }
18595         }
18596
18597 #ifdef DEBUG
18598         // Sanity check that we're properly prepared for gc ref locals.
18599         if (varTypeIsGC(lclTyp))
18600         {
18601             // Since there are gc locals we should have seen them earlier
18602             // and if there was a return value, set up the spill temp.
18603             assert(impInlineInfo->HasGcRefLocals());
18604             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18605         }
18606         else
18607         {
18608             // Make sure all pinned locals count as gc refs.
18609             assert(!inlineeLocal.lclIsPinned);
18610         }
18611 #endif // DEBUG
18612     }
18613
18614     return tmpNum;
18615 }
18616
18617 //------------------------------------------------------------------------
18618 // impInlineFetchArg: return tree node for argument value in an inlinee
18619 //
18620 // Arguments:
18621 //    lclNum -- argument number in inlinee IL
18622 //    inlArgInfo -- argument info for inlinee
18623 //    lclVarInfo -- var info for inlinee
18624 //
18625 // Returns:
18626 //    Tree for the argument's value. Often an inlinee-scoped temp
18627 //    GT_LCL_VAR but can be other tree kinds, if the argument
18628 //    expression from the caller can be directly substituted into the
18629 //    inlinee body.
18630 //
18631 // Notes:
18632 //    Must be used only for arguments -- use impInlineFetchLocal for
18633 //    inlinee locals.
18634 //
18635 //    Direct substitution is performed when the formal argument cannot
18636 //    change value in the inlinee body (no starg or ldarga), and the
18637 //    actual argument expression's value cannot be changed if it is
18638 //    substituted it into the inlinee body.
18639 //
18640 //    Even if an inlinee-scoped temp is returned here, it may later be
18641 //    "bashed" to a caller-supplied tree when arguments are actually
18642 //    passed (see fgInlinePrependStatements). Bashing can happen if
18643 //    the argument ends up being single use and other conditions are
18644 //    met. So the contents of the tree returned here may not end up
18645 //    being the ones ultimately used for the argument.
18646 //
18647 //    This method will side effect inlArgInfo. It should only be called
18648 //    for actual uses of the argument in the inlinee.
18649
18650 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18651 {
18652     // Cache the relevant arg and lcl info for this argument.
18653     // We will modify argInfo but not lclVarInfo.
18654     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18655     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18656     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18657     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18658     GenTree*             op1              = nullptr;
18659
18660     if (argInfo.argIsInvariant && !argCanBeModified)
18661     {
18662         // Directly substitute constants or addresses of locals
18663         //
18664         // Clone the constant. Note that we cannot directly use
18665         // argNode in the trees even if !argInfo.argIsUsed as this
18666         // would introduce aliasing between inlArgInfo[].argNode and
18667         // impInlineExpr. Then gtFoldExpr() could change it, causing
18668         // further references to the argument working off of the
18669         // bashed copy.
18670         op1 = gtCloneExpr(argInfo.argNode);
18671         PREFIX_ASSUME(op1 != nullptr);
18672         argInfo.argTmpNum = BAD_VAR_NUM;
18673     }
18674     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18675     {
18676         // Directly substitute unaliased caller locals for args that cannot be modified
18677         //
18678         // Use the caller-supplied node if this is the first use.
18679         op1               = argInfo.argNode;
18680         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18681
18682         // Use an equivalent copy if this is the second or subsequent
18683         // use, or if we need to retype.
18684         //
18685         // Note argument type mismatches that prevent inlining should
18686         // have been caught in impInlineInitVars.
18687         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18688         {
18689             assert(op1->gtOper == GT_LCL_VAR);
18690             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18691
18692             var_types newTyp = lclTyp;
18693
18694             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18695             {
18696                 newTyp = genActualType(lclTyp);
18697             }
18698
18699             // Create a new lcl var node - remember the argument lclNum
18700             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18701         }
18702     }
18703     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18704     {
18705         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18706            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18707            This way we will increase the chance for this byref to be optimized away by
18708            a subsequent "dereference" operation.
18709
18710            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18711            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18712            For example, if the caller is:
18713                 ldloca.s   V_1  // V_1 is a local struct
18714                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18715            and the callee being inlined has:
18716                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18717                     ldarga.s   ptrToInts
18718                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18719            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18720            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18721         */
18722         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18723         op1 = gtCloneExpr(argInfo.argNode);
18724     }
18725     else
18726     {
18727         /* Argument is a complex expression - it must be evaluated into a temp */
18728
18729         if (argInfo.argHasTmp)
18730         {
18731             assert(argInfo.argIsUsed);
18732             assert(argInfo.argTmpNum < lvaCount);
18733
18734             /* Create a new lcl var node - remember the argument lclNum */
18735             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18736
18737             /* This is the second or later use of the this argument,
18738             so we have to use the temp (instead of the actual arg) */
18739             argInfo.argBashTmpNode = nullptr;
18740         }
18741         else
18742         {
18743             /* First time use */
18744             assert(!argInfo.argIsUsed);
18745
18746             /* Reserve a temp for the expression.
18747             * Use a large size node as we may change it later */
18748
18749             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18750
18751             lvaTable[tmpNum].lvType = lclTyp;
18752
18753             // For ref types, determine the type of the temp.
18754             if (lclTyp == TYP_REF)
18755             {
18756                 if (!argCanBeModified)
18757                 {
18758                     // If the arg can't be modified in the method
18759                     // body, use the type of the value, if
18760                     // known. Otherwise, use the declared type.
18761                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18762                 }
18763                 else
18764                 {
18765                     // Arg might be modified, use the declared type of
18766                     // the argument.
18767                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18768                 }
18769             }
18770
18771             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18772             if (argInfo.argHasLdargaOp)
18773             {
18774                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18775             }
18776
18777             if (lclInfo.lclVerTypeInfo.IsStruct())
18778             {
18779                 if (varTypeIsStruct(lclTyp))
18780                 {
18781                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18782                 }
18783                 else
18784                 {
18785                     // This is a wrapped primitive.  Make sure the verstate knows that
18786                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18787                 }
18788             }
18789
18790             argInfo.argHasTmp = true;
18791             argInfo.argTmpNum = tmpNum;
18792
18793             // If we require strict exception order, then arguments must
18794             // be evaluated in sequence before the body of the inlined method.
18795             // So we need to evaluate them to a temp.
18796             // Also, if arguments have global or local references, we need to
18797             // evaluate them to a temp before the inlined body as the
18798             // inlined body may be modifying the global ref.
18799             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18800             // if it is a struct, because it requires some additional handling.
18801
18802             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18803                 !argInfo.argHasCallerLocalRef)
18804             {
18805                 /* Get a *LARGE* LCL_VAR node */
18806                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18807
18808                 /* Record op1 as the very first use of this argument.
18809                 If there are no further uses of the arg, we may be
18810                 able to use the actual arg node instead of the temp.
18811                 If we do see any further uses, we will clear this. */
18812                 argInfo.argBashTmpNode = op1;
18813             }
18814             else
18815             {
18816                 /* Get a small LCL_VAR node */
18817                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18818                 /* No bashing of this argument */
18819                 argInfo.argBashTmpNode = nullptr;
18820             }
18821         }
18822     }
18823
18824     // Mark this argument as used.
18825     argInfo.argIsUsed = true;
18826
18827     return op1;
18828 }
18829
18830 /******************************************************************************
18831  Is this the original "this" argument to the call being inlined?
18832
18833  Note that we do not inline methods with "starg 0", and so we do not need to
18834  worry about it.
18835 */
18836
18837 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18838 {
18839     assert(compIsForInlining());
18840     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18841 }
18842
18843 //-----------------------------------------------------------------------------
18844 // This function checks if a dereference in the inlinee can guarantee that
18845 // the "this" is non-NULL.
18846 // If we haven't hit a branch or a side effect, and we are dereferencing
18847 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18848 // then we can avoid a separate null pointer check.
18849 //
18850 // "additionalTreesToBeEvaluatedBefore"
18851 // is the set of pending trees that have not yet been added to the statement list,
18852 // and which have been removed from verCurrentState.esStack[]
18853
18854 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
18855                                                                   GenTree*    variableBeingDereferenced,
18856                                                                   InlArgInfo* inlArgInfo)
18857 {
18858     assert(compIsForInlining());
18859     assert(opts.OptEnabled(CLFLG_INLINING));
18860
18861     BasicBlock* block = compCurBB;
18862
18863     GenTree* stmt;
18864     GenTree* expr;
18865
18866     if (block != fgFirstBB)
18867     {
18868         return FALSE;
18869     }
18870
18871     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18872     {
18873         return FALSE;
18874     }
18875
18876     if (additionalTreesToBeEvaluatedBefore &&
18877         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18878     {
18879         return FALSE;
18880     }
18881
18882     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18883     {
18884         expr = stmt->gtStmt.gtStmtExpr;
18885
18886         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18887         {
18888             return FALSE;
18889         }
18890     }
18891
18892     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18893     {
18894         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18895         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18896         {
18897             return FALSE;
18898         }
18899     }
18900
18901     return TRUE;
18902 }
18903
18904 //------------------------------------------------------------------------
18905 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18906 //
18907 // Arguments:
18908 //    callNode -- call under scrutiny
18909 //    exactContextHnd -- context handle for inlining
18910 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18911 //    callInfo -- call info from VM
18912 //
18913 // Notes:
18914 //    If callNode is an inline candidate, this method sets the flag
18915 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18916 //    filled in the associated InlineCandidateInfo.
18917 //
18918 //    If callNode is not an inline candidate, and the reason is
18919 //    something that is inherent to the method being called, the
18920 //    method may be marked as "noinline" to short-circuit any
18921 //    future assessments of calls to this method.
18922
18923 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
18924                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18925                                       bool                   exactContextNeedsRuntimeLookup,
18926                                       CORINFO_CALL_INFO*     callInfo)
18927 {
18928     // Let the strategy know there's another call
18929     impInlineRoot()->m_inlineStrategy->NoteCall();
18930
18931     if (!opts.OptEnabled(CLFLG_INLINING))
18932     {
18933         /* XXX Mon 8/18/2008
18934          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18935          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18936          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18937          * figure out why we did not set MAXOPT for this compile.
18938          */
18939         assert(!compIsForInlining());
18940         return;
18941     }
18942
18943     if (compIsForImportOnly())
18944     {
18945         // Don't bother creating the inline candidate during verification.
18946         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18947         // that leads to the creation of multiple instances of Compiler.
18948         return;
18949     }
18950
18951     GenTreeCall* call = callNode->AsCall();
18952     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18953
18954     // Don't inline if not optimizing root method
18955     if (opts.compDbgCode)
18956     {
18957         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18958         return;
18959     }
18960
18961     // Don't inline if inlining into root method is disabled.
18962     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18963     {
18964         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18965         return;
18966     }
18967
18968     // Inlining candidate determination needs to honor only IL tail prefix.
18969     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18970     if (call->IsTailPrefixedCall())
18971     {
18972         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18973         return;
18974     }
18975
18976     // Tail recursion elimination takes precedence over inlining.
18977     // TODO: We may want to do some of the additional checks from fgMorphCall
18978     // here to reduce the chance we don't inline a call that won't be optimized
18979     // as a fast tail call or turned into a loop.
18980     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18981     {
18982         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18983         return;
18984     }
18985
18986     if (call->IsVirtual())
18987     {
18988         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18989         return;
18990     }
18991
18992     /* Ignore helper calls */
18993
18994     if (call->gtCallType == CT_HELPER)
18995     {
18996         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18997         return;
18998     }
18999
19000     /* Ignore indirect calls */
19001     if (call->gtCallType == CT_INDIRECT)
19002     {
19003         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19004         return;
19005     }
19006
19007     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19008      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19009      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19010
19011     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19012     unsigned              methAttr;
19013
19014     // Reuse method flags from the original callInfo if possible
19015     if (fncHandle == callInfo->hMethod)
19016     {
19017         methAttr = callInfo->methodFlags;
19018     }
19019     else
19020     {
19021         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19022     }
19023
19024 #ifdef DEBUG
19025     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19026     {
19027         methAttr |= CORINFO_FLG_FORCEINLINE;
19028     }
19029 #endif
19030
19031     // Check for COMPlus_AggressiveInlining
19032     if (compDoAggressiveInlining)
19033     {
19034         methAttr |= CORINFO_FLG_FORCEINLINE;
19035     }
19036
19037     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19038     {
19039         /* Don't bother inline blocks that are in the filter region */
19040         if (bbInCatchHandlerILRange(compCurBB))
19041         {
19042 #ifdef DEBUG
19043             if (verbose)
19044             {
19045                 printf("\nWill not inline blocks that are in the catch handler region\n");
19046             }
19047
19048 #endif
19049
19050             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19051             return;
19052         }
19053
19054         if (bbInFilterILRange(compCurBB))
19055         {
19056 #ifdef DEBUG
19057             if (verbose)
19058             {
19059                 printf("\nWill not inline blocks that are in the filter region\n");
19060             }
19061 #endif
19062
19063             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19064             return;
19065         }
19066     }
19067
19068     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19069
19070     if (opts.compNeedSecurityCheck)
19071     {
19072         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19073         return;
19074     }
19075
19076     /* Check if we tried to inline this method before */
19077
19078     if (methAttr & CORINFO_FLG_DONT_INLINE)
19079     {
19080         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19081         return;
19082     }
19083
19084     /* Cannot inline synchronized methods */
19085
19086     if (methAttr & CORINFO_FLG_SYNCH)
19087     {
19088         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19089         return;
19090     }
19091
19092     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19093
19094     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19095     {
19096         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19097         return;
19098     }
19099
19100     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19101     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19102
19103     if (inlineResult.IsFailure())
19104     {
19105         return;
19106     }
19107
19108     // The old value should be NULL
19109     assert(call->gtInlineCandidateInfo == nullptr);
19110
19111     // The new value should not be NULL.
19112     assert(inlineCandidateInfo != nullptr);
19113     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19114
19115     call->gtInlineCandidateInfo = inlineCandidateInfo;
19116
19117     // Mark the call node as inline candidate.
19118     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19119
19120     // Let the strategy know there's another candidate.
19121     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19122
19123     // Since we're not actually inlining yet, and this call site is
19124     // still just an inline candidate, there's nothing to report.
19125     inlineResult.SetReported();
19126 }
19127
19128 /******************************************************************************/
19129 // Returns true if the given intrinsic will be implemented by target-specific
19130 // instructions
19131
19132 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19133 {
19134 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19135     switch (intrinsicId)
19136     {
19137         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19138         // instructions to directly compute round/ceiling/floor.
19139         //
19140         // TODO: Because the x86 backend only targets SSE for floating-point code,
19141         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19142         //       implemented those intrinsics as x87 instructions). If this poses
19143         //       a CQ problem, it may be necessary to change the implementation of
19144         //       the helper calls to decrease call overhead or switch back to the
19145         //       x87 instructions. This is tracked by #7097.
19146         case CORINFO_INTRINSIC_Sqrt:
19147         case CORINFO_INTRINSIC_Abs:
19148             return true;
19149
19150         case CORINFO_INTRINSIC_Round:
19151         case CORINFO_INTRINSIC_Ceiling:
19152         case CORINFO_INTRINSIC_Floor:
19153             return compSupports(InstructionSet_SSE41);
19154
19155         default:
19156             return false;
19157     }
19158 #elif defined(_TARGET_ARM64_)
19159     switch (intrinsicId)
19160     {
19161         case CORINFO_INTRINSIC_Sqrt:
19162         case CORINFO_INTRINSIC_Abs:
19163         case CORINFO_INTRINSIC_Round:
19164         case CORINFO_INTRINSIC_Floor:
19165         case CORINFO_INTRINSIC_Ceiling:
19166             return true;
19167
19168         default:
19169             return false;
19170     }
19171 #elif defined(_TARGET_ARM_)
19172     switch (intrinsicId)
19173     {
19174         case CORINFO_INTRINSIC_Sqrt:
19175         case CORINFO_INTRINSIC_Abs:
19176         case CORINFO_INTRINSIC_Round:
19177             return true;
19178
19179         default:
19180             return false;
19181     }
19182 #elif defined(_TARGET_X86_)
19183     switch (intrinsicId)
19184     {
19185         case CORINFO_INTRINSIC_Sin:
19186         case CORINFO_INTRINSIC_Cos:
19187         case CORINFO_INTRINSIC_Sqrt:
19188         case CORINFO_INTRINSIC_Abs:
19189         case CORINFO_INTRINSIC_Round:
19190             return true;
19191
19192         default:
19193             return false;
19194     }
19195 #else
19196     // TODO: This portion of logic is not implemented for other arch.
19197     // The reason for returning true is that on all other arch the only intrinsic
19198     // enabled are target intrinsics.
19199     return true;
19200 #endif //_TARGET_AMD64_
19201 }
19202
19203 /******************************************************************************/
19204 // Returns true if the given intrinsic will be implemented by calling System.Math
19205 // methods.
19206
19207 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19208 {
19209     // Currently, if an math intrisic is not implemented by target-specific
19210     // intructions, it will be implemented by a System.Math call. In the
19211     // future, if we turn to implementing some of them with helper callers,
19212     // this predicate needs to be revisited.
19213     return !IsTargetIntrinsic(intrinsicId);
19214 }
19215
19216 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19217 {
19218     switch (intrinsicId)
19219     {
19220         case CORINFO_INTRINSIC_Sin:
19221         case CORINFO_INTRINSIC_Cbrt:
19222         case CORINFO_INTRINSIC_Sqrt:
19223         case CORINFO_INTRINSIC_Abs:
19224         case CORINFO_INTRINSIC_Cos:
19225         case CORINFO_INTRINSIC_Round:
19226         case CORINFO_INTRINSIC_Cosh:
19227         case CORINFO_INTRINSIC_Sinh:
19228         case CORINFO_INTRINSIC_Tan:
19229         case CORINFO_INTRINSIC_Tanh:
19230         case CORINFO_INTRINSIC_Asin:
19231         case CORINFO_INTRINSIC_Asinh:
19232         case CORINFO_INTRINSIC_Acos:
19233         case CORINFO_INTRINSIC_Acosh:
19234         case CORINFO_INTRINSIC_Atan:
19235         case CORINFO_INTRINSIC_Atan2:
19236         case CORINFO_INTRINSIC_Atanh:
19237         case CORINFO_INTRINSIC_Log10:
19238         case CORINFO_INTRINSIC_Pow:
19239         case CORINFO_INTRINSIC_Exp:
19240         case CORINFO_INTRINSIC_Ceiling:
19241         case CORINFO_INTRINSIC_Floor:
19242             return true;
19243         default:
19244             return false;
19245     }
19246 }
19247
19248 bool Compiler::IsMathIntrinsic(GenTree* tree)
19249 {
19250     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19251 }
19252
19253 //------------------------------------------------------------------------
19254 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19255 //   normal call
19256 //
19257 // Arguments:
19258 //     call -- the call node to examine/modify
19259 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19260 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19261 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19262 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19263 //
19264 // Notes:
19265 //     Virtual calls in IL will always "invoke" the base class method.
19266 //
19267 //     This transformation looks for evidence that the type of 'this'
19268 //     in the call is exactly known, is a final class or would invoke
19269 //     a final method, and if that and other safety checks pan out,
19270 //     modifies the call and the call info to create a direct call.
19271 //
19272 //     This transformation is initially done in the importer and not
19273 //     in some subsequent optimization pass because we want it to be
19274 //     upstream of inline candidate identification.
19275 //
19276 //     However, later phases may supply improved type information that
19277 //     can enable further devirtualization. We currently reinvoke this
19278 //     code after inlining, if the return value of the inlined call is
19279 //     the 'this obj' of a subsequent virtual call.
19280 //
19281 //     If devirtualization succeeds and the call's this object is the
19282 //     result of a box, the jit will ask the EE for the unboxed entry
19283 //     point. If this exists, the jit will see if it can rework the box
19284 //     to instead make a local copy. If that is doable, the call is
19285 //     updated to invoke the unboxed entry on the local copy.
19286 //
19287 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19288                                    CORINFO_METHOD_HANDLE*  method,
19289                                    unsigned*               methodFlags,
19290                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19291                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19292 {
19293     assert(call != nullptr);
19294     assert(method != nullptr);
19295     assert(methodFlags != nullptr);
19296     assert(contextHandle != nullptr);
19297
19298     // This should be a virtual vtable or virtual stub call.
19299     assert(call->IsVirtual());
19300
19301     // Bail if not optimizing
19302     if (opts.MinOpts())
19303     {
19304         return;
19305     }
19306
19307     // Bail if debuggable codegen
19308     if (opts.compDbgCode)
19309     {
19310         return;
19311     }
19312
19313 #if defined(DEBUG)
19314     // Bail if devirt is disabled.
19315     if (JitConfig.JitEnableDevirtualization() == 0)
19316     {
19317         return;
19318     }
19319
19320     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19321 #endif // DEBUG
19322
19323     // Fetch information about the virtual method we're calling.
19324     CORINFO_METHOD_HANDLE baseMethod        = *method;
19325     unsigned              baseMethodAttribs = *methodFlags;
19326
19327     if (baseMethodAttribs == 0)
19328     {
19329         // For late devirt we may not have method attributes, so fetch them.
19330         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19331     }
19332     else
19333     {
19334 #if defined(DEBUG)
19335         // Validate that callInfo has up to date method flags
19336         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19337
19338         // All the base method attributes should agree, save that
19339         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19340         // because of concurrent jitting activity.
19341         //
19342         // Note we don't look at this particular flag bit below, and
19343         // later on (if we do try and inline) we will rediscover why
19344         // the method can't be inlined, so there's no danger here in
19345         // seeing this particular flag bit in different states between
19346         // the cached and fresh values.
19347         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19348         {
19349             assert(!"mismatched method attributes");
19350         }
19351 #endif // DEBUG
19352     }
19353
19354     // In R2R mode, we might see virtual stub calls to
19355     // non-virtuals. For instance cases where the non-virtual method
19356     // is in a different assembly but is called via CALLVIRT. For
19357     // verison resilience we must allow for the fact that the method
19358     // might become virtual in some update.
19359     //
19360     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19361     // regular call+nullcheck upstream, so we won't reach this
19362     // point.
19363     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19364     {
19365         assert(call->IsVirtualStub());
19366         assert(opts.IsReadyToRun());
19367         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19368         return;
19369     }
19370
19371     // See what we know about the type of 'this' in the call.
19372     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19373     GenTree*             actualThisObj = nullptr;
19374     bool                 isExact       = false;
19375     bool                 objIsNonNull  = false;
19376     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19377
19378     // See if we have special knowlege that can get us a type or a better type.
19379     if ((objClass == nullptr) || !isExact)
19380     {
19381         actualThisObj = thisObj;
19382
19383         // Walk back through any return expression placeholders
19384         while (actualThisObj->OperGet() == GT_RET_EXPR)
19385         {
19386             actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19387         }
19388
19389         // See if we landed on a call to a special intrinsic method
19390         if (actualThisObj->IsCall())
19391         {
19392             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19393             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19394             {
19395                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19396                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19397                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19398                 if (specialObjClass != nullptr)
19399                 {
19400                     objClass     = specialObjClass;
19401                     isExact      = true;
19402                     objIsNonNull = true;
19403                 }
19404             }
19405         }
19406     }
19407
19408     // Bail if we know nothing.
19409     if (objClass == nullptr)
19410     {
19411         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19412         return;
19413     }
19414
19415     // Fetch information about the class that introduced the virtual method.
19416     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19417     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19418
19419 #if !defined(FEATURE_CORECLR)
19420     // If base class is not beforefieldinit then devirtualizing may
19421     // cause us to miss a base class init trigger. Spec says we don't
19422     // need a trigger for ref class callvirts but desktop seems to
19423     // have one anyways. So defer.
19424     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19425     {
19426         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19427         return;
19428     }
19429 #endif // FEATURE_CORECLR
19430
19431     // Is the call an interface call?
19432     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19433
19434     // If the objClass is sealed (final), then we may be able to devirtualize.
19435     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19436     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19437
19438 #if defined(DEBUG)
19439     const char* callKind       = isInterface ? "interface" : "virtual";
19440     const char* objClassNote   = "[?]";
19441     const char* objClassName   = "?objClass";
19442     const char* baseClassName  = "?baseClass";
19443     const char* baseMethodName = "?baseMethod";
19444
19445     if (verbose || doPrint)
19446     {
19447         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19448         objClassName   = info.compCompHnd->getClassName(objClass);
19449         baseClassName  = info.compCompHnd->getClassName(baseClass);
19450         baseMethodName = eeGetMethodName(baseMethod, nullptr);
19451
19452         if (verbose)
19453         {
19454             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19455                    "    class for 'this' is %s%s (attrib %08x)\n"
19456                    "    base method is %s::%s\n",
19457                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19458         }
19459     }
19460 #endif // defined(DEBUG)
19461
19462     // Bail if obj class is an interface.
19463     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19464     //   IL_021d:  ldloc.0
19465     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
19466     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19467     {
19468         JITDUMP("--- obj class is interface, sorry\n");
19469         return;
19470     }
19471
19472     if (isInterface)
19473     {
19474         assert(call->IsVirtualStub());
19475         JITDUMP("--- base class is interface\n");
19476     }
19477
19478     // Fetch the method that would be called based on the declared type of 'this'
19479     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
19480     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19481
19482     // If we failed to get a handle, we can't devirtualize.  This can
19483     // happen when prejitting, if the devirtualization crosses
19484     // servicing bubble boundaries.
19485     if (derivedMethod == nullptr)
19486     {
19487         JITDUMP("--- no derived method, sorry\n");
19488         return;
19489     }
19490
19491     // Fetch method attributes to see if method is marked final.
19492     const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19493     const bool  derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19494
19495 #if defined(DEBUG)
19496     const char* derivedClassName  = "?derivedClass";
19497     const char* derivedMethodName = "?derivedMethod";
19498
19499     const char* note = "speculative";
19500     if (isExact)
19501     {
19502         note = "exact";
19503     }
19504     else if (objClassIsFinal)
19505     {
19506         note = "final class";
19507     }
19508     else if (derivedMethodIsFinal)
19509     {
19510         note = "final method";
19511     }
19512
19513     if (verbose || doPrint)
19514     {
19515         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19516         if (verbose)
19517         {
19518             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19519             gtDispTree(call);
19520         }
19521     }
19522 #endif // defined(DEBUG)
19523
19524     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19525     {
19526         // Type is not exact, and neither class or method is final.
19527         //
19528         // We could speculatively devirtualize, but there's no
19529         // reason to believe the derived method is the one that
19530         // is likely to be invoked.
19531         //
19532         // If there's currently no further overriding (that is, at
19533         // the time of jitting, objClass has no subclasses that
19534         // override this method), then perhaps we'd be willing to
19535         // make a bet...?
19536         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
19537         return;
19538     }
19539
19540     // For interface calls we must have an exact type or final class.
19541     if (isInterface && !isExact && !objClassIsFinal)
19542     {
19543         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
19544         return;
19545     }
19546
19547     JITDUMP("    %s; can devirtualize\n", note);
19548
19549     // Make the updates.
19550     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19551     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19552     call->gtCallMethHnd = derivedMethod;
19553     call->gtCallType    = CT_USER_FUNC;
19554
19555     // Virtual calls include an implicit null check, which we may
19556     // now need to make explicit.
19557     if (!objIsNonNull)
19558     {
19559         call->gtFlags |= GTF_CALL_NULLCHECK;
19560     }
19561
19562     // Clear the inline candidate info (may be non-null since
19563     // it's a union field used for other things by virtual
19564     // stubs)
19565     call->gtInlineCandidateInfo = nullptr;
19566
19567 #if defined(DEBUG)
19568     if (verbose)
19569     {
19570         printf("... after devirt...\n");
19571         gtDispTree(call);
19572     }
19573
19574     if (doPrint)
19575     {
19576         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19577                baseMethodName, derivedClassName, derivedMethodName, note);
19578     }
19579 #endif // defined(DEBUG)
19580
19581     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19582     if (thisObj->IsBoxedValue())
19583     {
19584         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19585
19586         // Note for some shared methods the unboxed entry point requires an extra parameter.
19587         // We defer optimizing if so.
19588         bool                  requiresInstMethodTableArg = false;
19589         CORINFO_METHOD_HANDLE unboxedEntryMethod =
19590             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19591
19592         if (unboxedEntryMethod != nullptr)
19593         {
19594             // Since the call is the only consumer of the box, we know the box can't escape
19595             // since it is being passed an interior pointer.
19596             //
19597             // So, revise the box to simply create a local copy, use the address of that copy
19598             // as the this pointer, and update the entry point to the unboxed entry.
19599             //
19600             // Ideally, we then inline the boxed method and and if it turns out not to modify
19601             // the copy, we can undo the copy too.
19602             if (requiresInstMethodTableArg)
19603             {
19604                 // We can likely handle this case by grabbing the argument passed to
19605                 // the newobj in the box. But defer for now.
19606                 JITDUMP("Found unboxed entry point, but it needs method table arg, deferring\n");
19607             }
19608             else
19609             {
19610                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19611                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19612
19613                 if (localCopyThis != nullptr)
19614                 {
19615                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
19616                     call->gtCallObjp    = localCopyThis;
19617                     call->gtCallMethHnd = unboxedEntryMethod;
19618                     derivedMethod       = unboxedEntryMethod;
19619                 }
19620                 else
19621                 {
19622                     JITDUMP("Sorry, failed to undo the box\n");
19623                 }
19624             }
19625         }
19626         else
19627         {
19628             // Many of the low-level methods on value classes won't have unboxed entries,
19629             // as they need access to the type of the object.
19630             //
19631             // Note this may be a cue for us to stack allocate the boxed object, since
19632             // we probably know that these objects don't escape.
19633             JITDUMP("Sorry, failed to find unboxed entry point\n");
19634         }
19635     }
19636
19637     // Fetch the class that introduced the derived method.
19638     //
19639     // Note this may not equal objClass, if there is a
19640     // final method that objClass inherits.
19641     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19642
19643     // Need to update call info too. This is fragile
19644     // but hopefully the derived method conforms to
19645     // the base in most other ways.
19646     *method        = derivedMethod;
19647     *methodFlags   = derivedMethodAttribs;
19648     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19649
19650     // Update context handle.
19651     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19652     {
19653         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19654     }
19655
19656 #ifdef FEATURE_READYTORUN_COMPILER
19657     if (opts.IsReadyToRun())
19658     {
19659         // For R2R, getCallInfo triggers bookkeeping on the zap
19660         // side so we need to call it here.
19661         //
19662         // First, cons up a suitable resolved token.
19663         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19664
19665         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19666         derivedResolvedToken.tokenContext = *contextHandle;
19667         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19668         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19669         derivedResolvedToken.hClass       = derivedClass;
19670         derivedResolvedToken.hMethod      = derivedMethod;
19671
19672         // Look up the new call info.
19673         CORINFO_CALL_INFO derivedCallInfo;
19674         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19675
19676         // Update the call.
19677         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19678         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19679         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19680     }
19681 #endif // FEATURE_READYTORUN_COMPILER
19682 }
19683
19684 //------------------------------------------------------------------------
19685 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19686 //   to an intrinsic returns an exact type
19687 //
19688 // Arguments:
19689 //     methodHnd -- handle for the special intrinsic method
19690 //
19691 // Returns:
19692 //     Exact class handle returned by the intrinsic call, if known.
19693 //     Nullptr if not known, or not likely to lead to beneficial optimization.
19694
19695 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19696 {
19697     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19698
19699     CORINFO_CLASS_HANDLE result = nullptr;
19700
19701     // See what intrinisc we have...
19702     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19703     switch (ni)
19704     {
19705         case NI_System_Collections_Generic_EqualityComparer_get_Default:
19706         {
19707             // Expect one class generic parameter; figure out which it is.
19708             CORINFO_SIG_INFO sig;
19709             info.compCompHnd->getMethodSig(methodHnd, &sig);
19710             assert(sig.sigInst.classInstCount == 1);
19711             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19712             assert(typeHnd != nullptr);
19713
19714             // Lookup can incorrect when we have __Canon as it won't appear
19715             // to implement any interface types.
19716             //
19717             // And if we do not have a final type, devirt & inlining is
19718             // unlikely to result in much simplification.
19719             //
19720             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19721             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19722             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19723
19724             if (isFinalType)
19725             {
19726                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19727                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19728                         result != nullptr ? eeGetClassName(result) : "unknown");
19729             }
19730             else
19731             {
19732                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19733             }
19734
19735             break;
19736         }
19737
19738         default:
19739         {
19740             JITDUMP("This special intrinsic not handled, sorry...\n");
19741             break;
19742         }
19743     }
19744
19745     return result;
19746 }
19747
19748 //------------------------------------------------------------------------
19749 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19750 //
19751 // Arguments:
19752 //    token - init value for the allocated token.
19753 //
19754 // Return Value:
19755 //    pointer to token into jit-allocated memory.
19756 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19757 {
19758     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19759     *memory                        = token;
19760     return memory;
19761 }
19762
19763 //------------------------------------------------------------------------
19764 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19765 //
19766 class SpillRetExprHelper
19767 {
19768 public:
19769     SpillRetExprHelper(Compiler* comp) : comp(comp)
19770     {
19771     }
19772
19773     void StoreRetExprResultsInArgs(GenTreeCall* call)
19774     {
19775         GenTree* args = call->gtCallArgs;
19776         if (args != nullptr)
19777         {
19778             comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19779         }
19780         GenTree* thisArg = call->gtCallObjp;
19781         if (thisArg != nullptr)
19782         {
19783             comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19784         }
19785     }
19786
19787 private:
19788     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19789     {
19790         assert((pTree != nullptr) && (*pTree != nullptr));
19791         GenTree* tree = *pTree;
19792         if ((tree->gtFlags & GTF_CALL) == 0)
19793         {
19794             // Trees with ret_expr are marked as GTF_CALL.
19795             return Compiler::WALK_SKIP_SUBTREES;
19796         }
19797         if (tree->OperGet() == GT_RET_EXPR)
19798         {
19799             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19800             walker->StoreRetExprAsLocalVar(pTree);
19801         }
19802         return Compiler::WALK_CONTINUE;
19803     }
19804
19805     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19806     {
19807         GenTree* retExpr = *pRetExpr;
19808         assert(retExpr->OperGet() == GT_RET_EXPR);
19809         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
19810         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19811         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19812         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19813     }
19814
19815 private:
19816     Compiler* comp;
19817 };
19818
19819 //------------------------------------------------------------------------
19820 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19821 //                         Spill ret_expr in the call node, because they can't be cloned.
19822 //
19823 // Arguments:
19824 //    call - fat calli candidate
19825 //
19826 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19827 {
19828     setMethodHasFatPointer();
19829     call->SetFatPointerCandidate();
19830     SpillRetExprHelper helper(this);
19831     helper.StoreRetExprResultsInArgs(call);
19832 }