cefb2fa001de3aff29ed282819ebe8142f65b4cc
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
240
241             // Preserve 'small' int types
242             if (!varTypeIsSmall(lclTyp))
243             {
244                 lclTyp = genActualType(lclTyp);
245             }
246
247             if (varTypeIsSmall(lclTyp))
248             {
249                 return false;
250             }
251
252             return true;
253         }
254         default:
255             break;
256     }
257
258     return false;
259 }
260
261 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
262 {
263     pResolvedToken->tokenContext = impTokenLookupContextHandle;
264     pResolvedToken->tokenScope   = info.compScopeHnd;
265     pResolvedToken->token        = getU4LittleEndian(addr);
266     pResolvedToken->tokenType    = kind;
267
268     if (!tiVerificationNeeded)
269     {
270         info.compCompHnd->resolveToken(pResolvedToken);
271     }
272     else
273     {
274         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
275     }
276 }
277
278 /*****************************************************************************
279  *
280  *  Pop one tree from the stack.
281  */
282
283 StackEntry Compiler::impPopStack()
284 {
285     if (verCurrentState.esStackDepth == 0)
286     {
287         BADCODE("stack underflow");
288     }
289
290 #ifdef DEBUG
291 #if VERBOSE_VERIFY
292     if (VERBOSE && tiVerificationNeeded)
293     {
294         JITDUMP("\n");
295         printf(TI_DUMP_PADDING);
296         printf("About to pop from the stack: ");
297         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
298         ti.Dump();
299     }
300 #endif // VERBOSE_VERIFY
301 #endif // DEBUG
302
303     return verCurrentState.esStack[--verCurrentState.esStackDepth];
304 }
305
306 /*****************************************************************************
307  *
308  *  Peep at n'th (0-based) tree on the top of the stack.
309  */
310
311 StackEntry& Compiler::impStackTop(unsigned n)
312 {
313     if (verCurrentState.esStackDepth <= n)
314     {
315         BADCODE("stack underflow");
316     }
317
318     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
319 }
320
321 unsigned Compiler::impStackHeight()
322 {
323     return verCurrentState.esStackDepth;
324 }
325
326 /*****************************************************************************
327  *  Some of the trees are spilled specially. While unspilling them, or
328  *  making a copy, these need to be handled specially. The function
329  *  enumerates the operators possible after spilling.
330  */
331
332 #ifdef DEBUG // only used in asserts
333 static bool impValidSpilledStackEntry(GenTree* tree)
334 {
335     if (tree->gtOper == GT_LCL_VAR)
336     {
337         return true;
338     }
339
340     if (tree->OperIsConst())
341     {
342         return true;
343     }
344
345     return false;
346 }
347 #endif
348
349 /*****************************************************************************
350  *
351  *  The following logic is used to save/restore stack contents.
352  *  If 'copy' is true, then we make a copy of the trees on the stack. These
353  *  have to all be cloneable/spilled values.
354  */
355
356 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
357 {
358     savePtr->ssDepth = verCurrentState.esStackDepth;
359
360     if (verCurrentState.esStackDepth)
361     {
362         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
363         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
364
365         if (copy)
366         {
367             StackEntry* table = savePtr->ssTrees;
368
369             /* Make a fresh copy of all the stack entries */
370
371             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
372             {
373                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
374                 GenTree* tree     = verCurrentState.esStack[level].val;
375
376                 assert(impValidSpilledStackEntry(tree));
377
378                 switch (tree->gtOper)
379                 {
380                     case GT_CNS_INT:
381                     case GT_CNS_LNG:
382                     case GT_CNS_DBL:
383                     case GT_CNS_STR:
384                     case GT_LCL_VAR:
385                         table->val = gtCloneExpr(tree);
386                         break;
387
388                     default:
389                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
390                         break;
391                 }
392             }
393         }
394         else
395         {
396             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
397         }
398     }
399 }
400
401 void Compiler::impRestoreStackState(SavedStack* savePtr)
402 {
403     verCurrentState.esStackDepth = savePtr->ssDepth;
404
405     if (verCurrentState.esStackDepth)
406     {
407         memcpy(verCurrentState.esStack, savePtr->ssTrees,
408                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
409     }
410 }
411
412 /*****************************************************************************
413  *
414  *  Get the tree list started for a new basic block.
415  */
416 inline void Compiler::impBeginTreeList()
417 {
418     assert(impTreeList == nullptr && impTreeLast == nullptr);
419
420     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
421 }
422
423 /*****************************************************************************
424  *
425  *  Store the given start and end stmt in the given basic block. This is
426  *  mostly called by impEndTreeList(BasicBlock *block). It is called
427  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
428  */
429
430 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
431 {
432     assert(firstStmt->gtOper == GT_STMT);
433     assert(lastStmt->gtOper == GT_STMT);
434
435     /* Make the list circular, so that we can easily walk it backwards */
436
437     firstStmt->gtPrev = lastStmt;
438
439     /* Store the tree list in the basic block */
440
441     block->bbTreeList = firstStmt;
442
443     /* The block should not already be marked as imported */
444     assert((block->bbFlags & BBF_IMPORTED) == 0);
445
446     block->bbFlags |= BBF_IMPORTED;
447 }
448
449 /*****************************************************************************
450  *
451  *  Store the current tree list in the given basic block.
452  */
453
454 inline void Compiler::impEndTreeList(BasicBlock* block)
455 {
456     assert(impTreeList->gtOper == GT_BEG_STMTS);
457
458     GenTree* firstTree = impTreeList->gtNext;
459
460     if (!firstTree)
461     {
462         /* The block should not already be marked as imported */
463         assert((block->bbFlags & BBF_IMPORTED) == 0);
464
465         // Empty block. Just mark it as imported
466         block->bbFlags |= BBF_IMPORTED;
467     }
468     else
469     {
470         // Ignore the GT_BEG_STMTS
471         assert(firstTree->gtPrev == impTreeList);
472
473         impEndTreeList(block, firstTree, impTreeLast);
474     }
475
476 #ifdef DEBUG
477     if (impLastILoffsStmt != nullptr)
478     {
479         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
480         impLastILoffsStmt                          = nullptr;
481     }
482
483     impTreeList = impTreeLast = nullptr;
484 #endif
485 }
486
487 /*****************************************************************************
488  *
489  *  Check that storing the given tree doesnt mess up the semantic order. Note
490  *  that this has only limited value as we can only check [0..chkLevel).
491  */
492
493 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
494 {
495 #ifndef DEBUG
496     return;
497 #else
498     assert(stmt->gtOper == GT_STMT);
499
500     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
501     {
502         chkLevel = verCurrentState.esStackDepth;
503     }
504
505     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
506     {
507         return;
508     }
509
510     GenTree* tree = stmt->gtStmt.gtStmtExpr;
511
512     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
513
514     if (tree->gtFlags & GTF_CALL)
515     {
516         for (unsigned level = 0; level < chkLevel; level++)
517         {
518             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
519         }
520     }
521
522     if (tree->gtOper == GT_ASG)
523     {
524         // For an assignment to a local variable, all references of that
525         // variable have to be spilled. If it is aliased, all calls and
526         // indirect accesses have to be spilled
527
528         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
529         {
530             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
531             for (unsigned level = 0; level < chkLevel; level++)
532             {
533                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
534                 assert(!lvaTable[lclNum].lvAddrExposed ||
535                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
536             }
537         }
538
539         // If the access may be to global memory, all side effects have to be spilled.
540
541         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
542         {
543             for (unsigned level = 0; level < chkLevel; level++)
544             {
545                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
546             }
547         }
548     }
549 #endif
550 }
551
552 /*****************************************************************************
553  *
554  *  Append the given GT_STMT node to the current block's tree list.
555  *  [0..chkLevel) is the portion of the stack which we will check for
556  *    interference with stmt and spill if needed.
557  */
558
559 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
560 {
561     assert(stmt->gtOper == GT_STMT);
562     noway_assert(impTreeLast != nullptr);
563
564     /* If the statement being appended has any side-effects, check the stack
565        to see if anything needs to be spilled to preserve correct ordering. */
566
567     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
568     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
569
570     // Assignment to (unaliased) locals don't count as a side-effect as
571     // we handle them specially using impSpillLclRefs(). Temp locals should
572     // be fine too.
573
574     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
575         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
576     {
577         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
578         assert(flags == (op2Flags | GTF_ASG));
579         flags = op2Flags;
580     }
581
582     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
583     {
584         chkLevel = verCurrentState.esStackDepth;
585     }
586
587     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
588     {
589         assert(chkLevel <= verCurrentState.esStackDepth);
590
591         if (flags)
592         {
593             // If there is a call, we have to spill global refs
594             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
595
596             if (expr->gtOper == GT_ASG)
597             {
598                 GenTree* lhs = expr->gtGetOp1();
599                 // If we are assigning to a global ref, we have to spill global refs on stack.
600                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
601                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
602                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
603                 if (!expr->OperIsBlkOp())
604                 {
605                     // If we are assigning to a global ref, we have to spill global refs on stack
606                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
607                     {
608                         spillGlobEffects = true;
609                     }
610                 }
611                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
612                          ((lhs->OperGet() == GT_LCL_VAR) &&
613                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
614                 {
615                     spillGlobEffects = true;
616                 }
617             }
618
619             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
620         }
621         else
622         {
623             impSpillSpecialSideEff();
624         }
625     }
626
627     impAppendStmtCheck(stmt, chkLevel);
628
629     /* Point 'prev' at the previous node, so that we can walk backwards */
630
631     stmt->gtPrev = impTreeLast;
632
633     /* Append the expression statement to the list */
634
635     impTreeLast->gtNext = stmt;
636     impTreeLast         = stmt;
637
638 #ifdef FEATURE_SIMD
639     impMarkContiguousSIMDFieldAssignments(stmt);
640 #endif
641
642     /* Once we set impCurStmtOffs in an appended tree, we are ready to
643        report the following offsets. So reset impCurStmtOffs */
644
645     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
646     {
647         impCurStmtOffsSet(BAD_IL_OFFSET);
648     }
649
650 #ifdef DEBUG
651     if (impLastILoffsStmt == nullptr)
652     {
653         impLastILoffsStmt = stmt;
654     }
655
656     if (verbose)
657     {
658         printf("\n\n");
659         gtDispTree(stmt);
660     }
661 #endif
662 }
663
664 /*****************************************************************************
665  *
666  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
667  */
668
669 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
670 {
671     assert(stmt->gtOper == GT_STMT);
672     assert(stmtBefore->gtOper == GT_STMT);
673
674     GenTree* stmtPrev  = stmtBefore->gtPrev;
675     stmt->gtPrev       = stmtPrev;
676     stmt->gtNext       = stmtBefore;
677     stmtPrev->gtNext   = stmt;
678     stmtBefore->gtPrev = stmt;
679 }
680
681 /*****************************************************************************
682  *
683  *  Append the given expression tree to the current block's tree list.
684  *  Return the newly created statement.
685  */
686
687 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
688 {
689     assert(tree);
690
691     /* Allocate an 'expression statement' node */
692
693     GenTree* expr = gtNewStmt(tree, offset);
694
695     /* Append the statement to the current block's stmt list */
696
697     impAppendStmt(expr, chkLevel);
698
699     return expr;
700 }
701
702 /*****************************************************************************
703  *
704  *  Insert the given exression tree before GT_STMT "stmtBefore"
705  */
706
707 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
708 {
709     assert(stmtBefore->gtOper == GT_STMT);
710
711     /* Allocate an 'expression statement' node */
712
713     GenTree* expr = gtNewStmt(tree, offset);
714
715     /* Append the statement to the current block's stmt list */
716
717     impInsertStmtBefore(expr, stmtBefore);
718 }
719
720 /*****************************************************************************
721  *
722  *  Append an assignment of the given value to a temp to the current tree list.
723  *  curLevel is the stack level for which the spill to the temp is being done.
724  */
725
726 void Compiler::impAssignTempGen(unsigned    tmp,
727                                 GenTree*    val,
728                                 unsigned    curLevel,
729                                 GenTree**   pAfterStmt, /* = NULL */
730                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
731                                 BasicBlock* block       /* = NULL */
732                                 )
733 {
734     GenTree* asg = gtNewTempAssign(tmp, val);
735
736     if (!asg->IsNothingNode())
737     {
738         if (pAfterStmt)
739         {
740             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
741             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
742         }
743         else
744         {
745             impAppendTree(asg, curLevel, impCurStmtOffs);
746         }
747     }
748 }
749
750 /*****************************************************************************
751  * same as above, but handle the valueclass case too
752  */
753
754 void Compiler::impAssignTempGen(unsigned             tmpNum,
755                                 GenTree*             val,
756                                 CORINFO_CLASS_HANDLE structType,
757                                 unsigned             curLevel,
758                                 GenTree**            pAfterStmt, /* = NULL */
759                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
760                                 BasicBlock*          block       /* = NULL */
761                                 )
762 {
763     GenTree* asg;
764
765     if (varTypeIsStruct(val))
766     {
767         assert(tmpNum < lvaCount);
768         assert(structType != NO_CLASS_HANDLE);
769
770         // if the method is non-verifiable the assert is not true
771         // so at least ignore it in the case when verification is turned on
772         // since any block that tries to use the temp would have failed verification.
773         var_types varType = lvaTable[tmpNum].lvType;
774         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
775         lvaSetStruct(tmpNum, structType, false);
776
777         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
778         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
779         // that has been passed in for the value being assigned to the temp, in which case we
780         // need to set 'val' to that same type.
781         // Note also that if we always normalized the types of any node that might be a struct
782         // type, this would not be necessary - but that requires additional JIT/EE interface
783         // calls that may not actually be required - e.g. if we only access a field of a struct.
784
785         val->gtType = lvaTable[tmpNum].lvType;
786
787         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
788         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block);
789     }
790     else
791     {
792         asg = gtNewTempAssign(tmpNum, val);
793     }
794
795     if (!asg->IsNothingNode())
796     {
797         if (pAfterStmt)
798         {
799             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
800             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
801         }
802         else
803         {
804             impAppendTree(asg, curLevel, impCurStmtOffs);
805         }
806     }
807 }
808
809 /*****************************************************************************
810  *
811  *  Pop the given number of values from the stack and return a list node with
812  *  their values.
813  *  The 'prefixTree' argument may optionally contain an argument
814  *  list that is prepended to the list returned from this function.
815  *
816  *  The notion of prepended is a bit misleading in that the list is backwards
817  *  from the way I would expect: The first element popped is at the end of
818  *  the returned list, and prefixTree is 'before' that, meaning closer to
819  *  the end of the list.  To get to prefixTree, you have to walk to the
820  *  end of the list.
821  *
822  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
823  *  such we reverse its meaning such that returnValue has a reversed
824  *  prefixTree at the head of the list.
825  */
826
827 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
828 {
829     assert(sig == nullptr || count == sig->numArgs);
830
831     CORINFO_CLASS_HANDLE structType;
832     GenTreeArgList*      treeList;
833
834     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
835     {
836         treeList = nullptr;
837     }
838     else
839     { // ARG_ORDER_L2R
840         treeList = prefixTree;
841     }
842
843     while (count--)
844     {
845         StackEntry se   = impPopStack();
846         typeInfo   ti   = se.seTypeInfo;
847         GenTree*   temp = se.val;
848
849         if (varTypeIsStruct(temp))
850         {
851             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
852             assert(ti.IsType(TI_STRUCT));
853             structType = ti.GetClassHandleForValueClass();
854 #ifdef DEBUG
855             if (verbose)
856             {
857                 printf("Calling impNormStructVal on:\n");
858                 gtDispTree(temp);
859             }
860 #endif
861             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
862 #ifdef DEBUG
863             if (verbose)
864             {
865                 printf("resulting tree:\n");
866                 gtDispTree(temp);
867             }
868 #endif
869         }
870
871         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
872         treeList = gtNewListNode(temp, treeList);
873     }
874
875     if (sig != nullptr)
876     {
877         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
878             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
879         {
880             // Make sure that all valuetypes (including enums) that we push are loaded.
881             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
882             // all valuetypes in the method signature are already loaded.
883             // We need to be able to find the size of the valuetypes, but we cannot
884             // do a class-load from within GC.
885             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
886         }
887
888         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
889         CORINFO_CLASS_HANDLE    argClass;
890         CORINFO_CLASS_HANDLE    argRealClass;
891         GenTreeArgList*         args;
892
893         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
894         {
895             PREFIX_ASSUME(args != nullptr);
896
897             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
898
899             // insert implied casts (from float to double or double to float)
900
901             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
902             {
903                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
904             }
905             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
906             {
907                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
908             }
909
910             // insert any widening or narrowing casts for backwards compatibility
911
912             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
913
914             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
915                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
916             {
917                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
918                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
919                 // primitive types.
920                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
921                 // details).
922                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
923                 {
924                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
925                 }
926
927                 // Make sure that all valuetypes (including enums) that we push are loaded.
928                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
929                 // all valuetypes in the method signature are already loaded.
930                 // We need to be able to find the size of the valuetypes, but we cannot
931                 // do a class-load from within GC.
932                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
933             }
934
935             argLst = info.compCompHnd->getArgNext(argLst);
936         }
937     }
938
939     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
940     {
941         // Prepend the prefixTree
942
943         // Simple in-place reversal to place treeList
944         // at the end of a reversed prefixTree
945         while (prefixTree != nullptr)
946         {
947             GenTreeArgList* next = prefixTree->Rest();
948             prefixTree->Rest()   = treeList;
949             treeList             = prefixTree;
950             prefixTree           = next;
951         }
952     }
953     return treeList;
954 }
955
956 /*****************************************************************************
957  *
958  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
959  *  The first "skipReverseCount" items are not reversed.
960  */
961
962 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
963
964 {
965     assert(skipReverseCount <= count);
966
967     GenTreeArgList* list = impPopList(count, sig);
968
969     // reverse the list
970     if (list == nullptr || skipReverseCount == count)
971     {
972         return list;
973     }
974
975     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
976     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
977
978     if (skipReverseCount == 0)
979     {
980         ptr = list;
981     }
982     else
983     {
984         lastSkipNode = list;
985         // Get to the first node that needs to be reversed
986         for (unsigned i = 0; i < skipReverseCount - 1; i++)
987         {
988             lastSkipNode = lastSkipNode->Rest();
989         }
990
991         PREFIX_ASSUME(lastSkipNode != nullptr);
992         ptr = lastSkipNode->Rest();
993     }
994
995     GenTreeArgList* reversedList = nullptr;
996
997     do
998     {
999         GenTreeArgList* tmp = ptr->Rest();
1000         ptr->Rest()         = reversedList;
1001         reversedList        = ptr;
1002         ptr                 = tmp;
1003     } while (ptr != nullptr);
1004
1005     if (skipReverseCount)
1006     {
1007         lastSkipNode->Rest() = reversedList;
1008         return list;
1009     }
1010     else
1011     {
1012         return reversedList;
1013     }
1014 }
1015
1016 //------------------------------------------------------------------------
1017 // impAssignStruct: Assign (copy) the structure from 'src' to 'dest'.
1018 //
1019 // Arguments:
1020 //    dest         - destination of the assignment
1021 //    src          - source of the assignment
1022 //    structHnd    - handle representing the struct type
1023 //    curLevel     - stack level for which a spill may be being done
1024 //    pAfterStmt   - statement to insert any additional statements after
1025 //    ilOffset     - il offset for new statements
1026 //    block        - block to insert any additional statements in
1027 //
1028 // Return Value:
1029 //    The tree that should be appended to the statement list that represents the assignment.
1030 //
1031 // Notes:
1032 //    Temp assignments may be appended to impTreeList if spilling is necessary.
1033
1034 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1035                                    GenTree*             src,
1036                                    CORINFO_CLASS_HANDLE structHnd,
1037                                    unsigned             curLevel,
1038                                    GenTree**            pAfterStmt, /* = nullptr */
1039                                    IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
1040                                    BasicBlock*          block       /* = nullptr */
1041                                    )
1042 {
1043     assert(varTypeIsStruct(dest));
1044
1045     if (ilOffset == BAD_IL_OFFSET)
1046     {
1047         ilOffset = impCurStmtOffs;
1048     }
1049
1050     while (dest->gtOper == GT_COMMA)
1051     {
1052         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1053
1054         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1055         if (pAfterStmt)
1056         {
1057             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, ilOffset));
1058         }
1059         else
1060         {
1061             impAppendTree(dest->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1062         }
1063
1064         // set dest to the second thing
1065         dest = dest->gtOp.gtOp2;
1066     }
1067
1068     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1069            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1070
1071     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1072         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1073     {
1074         // Make this a NOP
1075         return gtNewNothingNode();
1076     }
1077
1078     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1079     // or re-creating a Blk node if it is.
1080     GenTree* destAddr;
1081
1082     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1083     {
1084         destAddr = dest->gtOp.gtOp1;
1085     }
1086     else
1087     {
1088         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1089     }
1090
1091     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, ilOffset, block));
1092 }
1093
1094 //------------------------------------------------------------------------
1095 // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
1096 //
1097 // Arguments:
1098 //    destAddr     - address of the destination of the assignment
1099 //    src          - source of the assignment
1100 //    structHnd    - handle representing the struct type
1101 //    curLevel     - stack level for which a spill may be being done
1102 //    pAfterStmt   - statement to insert any additional statements after
1103 //    ilOffset     - il offset for new statements
1104 //    block        - block to insert any additional statements in
1105 //
1106 // Return Value:
1107 //    The tree that should be appended to the statement list that represents the assignment.
1108 //
1109 // Notes:
1110 //    Temp assignments may be appended to impTreeList if spilling is necessary.
1111
1112 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1113                                       GenTree*             src,
1114                                       CORINFO_CLASS_HANDLE structHnd,
1115                                       unsigned             curLevel,
1116                                       GenTree**            pAfterStmt, /* = NULL */
1117                                       IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
1118                                       BasicBlock*          block       /* = NULL */
1119                                       )
1120 {
1121     var_types destType;
1122     GenTree*  dest      = nullptr;
1123     unsigned  destFlags = 0;
1124
1125     if (ilOffset == BAD_IL_OFFSET)
1126     {
1127         ilOffset = impCurStmtOffs;
1128     }
1129
1130 #if defined(UNIX_AMD64_ABI)
1131     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1132     // TODO-ARM-BUG: Does ARM need this?
1133     // TODO-ARM64-BUG: Does ARM64 need this?
1134     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1135            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1136            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1137            (src->TypeGet() != TYP_STRUCT &&
1138             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1139 #else  // !defined(UNIX_AMD64_ABI)
1140     assert(varTypeIsStruct(src));
1141
1142     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1143            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1144            src->gtOper == GT_COMMA ||
1145            (src->TypeGet() != TYP_STRUCT &&
1146             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1147 #endif // !defined(UNIX_AMD64_ABI)
1148     if (destAddr->OperGet() == GT_ADDR)
1149     {
1150         GenTree* destNode = destAddr->gtGetOp1();
1151         // If the actual destination is a local, or already a block node, or is a node that
1152         // will be morphed, don't insert an OBJ(ADDR).
1153         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk() ||
1154             ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet())))
1155         {
1156             dest = destNode;
1157         }
1158         destType = destNode->TypeGet();
1159     }
1160     else
1161     {
1162         destType = src->TypeGet();
1163     }
1164
1165     var_types asgType = src->TypeGet();
1166
1167     if (src->gtOper == GT_CALL)
1168     {
1169         if (src->AsCall()->TreatAsHasRetBufArg(this))
1170         {
1171             // Case of call returning a struct via hidden retbuf arg
1172
1173             // insert the return value buffer into the argument list as first byref parameter
1174             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1175
1176             // now returns void, not a struct
1177             src->gtType = TYP_VOID;
1178
1179             // return the morphed call node
1180             return src;
1181         }
1182         else
1183         {
1184             // Case of call returning a struct in one or more registers.
1185
1186             var_types returnType = (var_types)src->gtCall.gtReturnType;
1187
1188             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1189             src->gtType = genActualType(returnType);
1190
1191             // First we try to change this to "LclVar/LclFld = call"
1192             //
1193             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1194             {
1195                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1196                 // That is, the IR will be of the form lclVar = call for multi-reg return
1197                 //
1198                 GenTree* lcl = destAddr->gtOp.gtOp1;
1199                 if (src->AsCall()->HasMultiRegRetVal())
1200                 {
1201                     // Mark the struct LclVar as used in a MultiReg return context
1202                     //  which currently makes it non promotable.
1203                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1204                     // handle multireg returns.
1205                     lcl->gtFlags |= GTF_DONT_CSE;
1206                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1207                 }
1208                 else // The call result is not a multireg return
1209                 {
1210                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1211                     lcl->ChangeOper(GT_LCL_FLD);
1212                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1213                     lcl->gtType = src->gtType;
1214                     asgType     = src->gtType;
1215                 }
1216
1217                 dest = lcl;
1218
1219 #if defined(_TARGET_ARM_)
1220                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1221                 // but that method has not been updadted to include ARM.
1222                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1223                 lcl->gtFlags |= GTF_DONT_CSE;
1224 #elif defined(UNIX_AMD64_ABI)
1225                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1226                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1227
1228                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1229                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1230                 // handle multireg returns.
1231                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1232                 // non-multireg returns.
1233                 lcl->gtFlags |= GTF_DONT_CSE;
1234                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1235 #endif
1236             }
1237             else // we don't have a GT_ADDR of a GT_LCL_VAR
1238             {
1239                 // !!! The destination could be on stack. !!!
1240                 // This flag will let us choose the correct write barrier.
1241                 asgType   = returnType;
1242                 destFlags = GTF_IND_TGTANYWHERE;
1243             }
1244         }
1245     }
1246     else if (src->gtOper == GT_RET_EXPR)
1247     {
1248         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1249         noway_assert(call->gtOper == GT_CALL);
1250
1251         if (call->HasRetBufArg())
1252         {
1253             // insert the return value buffer into the argument list as first byref parameter
1254             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1255
1256             // now returns void, not a struct
1257             src->gtType  = TYP_VOID;
1258             call->gtType = TYP_VOID;
1259
1260             // We already have appended the write to 'dest' GT_CALL's args
1261             // So now we just return an empty node (pruning the GT_RET_EXPR)
1262             return src;
1263         }
1264         else
1265         {
1266             // Case of inline method returning a struct in one or more registers.
1267             //
1268             var_types returnType = (var_types)call->gtReturnType;
1269
1270             // We won't need a return buffer
1271             asgType      = returnType;
1272             src->gtType  = genActualType(returnType);
1273             call->gtType = src->gtType;
1274
1275             // If we've changed the type, and it no longer matches a local destination,
1276             // we must use an indirection.
1277             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1278             {
1279                 dest = nullptr;
1280             }
1281
1282             // !!! The destination could be on stack. !!!
1283             // This flag will let us choose the correct write barrier.
1284             destFlags = GTF_IND_TGTANYWHERE;
1285         }
1286     }
1287     else if (src->OperIsBlk())
1288     {
1289         asgType = impNormStructType(structHnd);
1290         if (src->gtOper == GT_OBJ)
1291         {
1292             assert(src->gtObj.gtClass == structHnd);
1293         }
1294     }
1295     else if (src->gtOper == GT_INDEX)
1296     {
1297         asgType = impNormStructType(structHnd);
1298         assert(src->gtIndex.gtStructElemClass == structHnd);
1299     }
1300     else if (src->gtOper == GT_MKREFANY)
1301     {
1302         // Since we are assigning the result of a GT_MKREFANY,
1303         // "destAddr" must point to a refany.
1304
1305         GenTree* destAddrClone;
1306         destAddr =
1307             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1308
1309         assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
1310         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1311         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1312         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1313         GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
1314         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1315         GenTree* typeSlot =
1316             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1317
1318         // append the assign of the pointer value
1319         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1320         if (pAfterStmt)
1321         {
1322             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, ilOffset));
1323         }
1324         else
1325         {
1326             impAppendTree(asg, curLevel, ilOffset);
1327         }
1328
1329         // return the assign of the type value, to be appended
1330         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1331     }
1332     else if (src->gtOper == GT_COMMA)
1333     {
1334         // The second thing is the struct or its address.
1335         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1336         if (pAfterStmt)
1337         {
1338             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, ilOffset));
1339         }
1340         else
1341         {
1342             impAppendTree(src->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1343         }
1344
1345         // Evaluate the second thing using recursion.
1346         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
1347     }
1348     else if (src->IsLocal())
1349     {
1350         asgType = src->TypeGet();
1351     }
1352     else if (asgType == TYP_STRUCT)
1353     {
1354         asgType     = impNormStructType(structHnd);
1355         src->gtType = asgType;
1356     }
1357     if (dest == nullptr)
1358     {
1359         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1360         // if this is a known struct type.
1361         if (asgType == TYP_STRUCT)
1362         {
1363             dest = gtNewObjNode(structHnd, destAddr);
1364             gtSetObjGcInfo(dest->AsObj());
1365             // Although an obj as a call argument was always assumed to be a globRef
1366             // (which is itself overly conservative), that is not true of the operands
1367             // of a block assignment.
1368             dest->gtFlags &= ~GTF_GLOB_REF;
1369             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1370         }
1371         else if (varTypeIsStruct(asgType))
1372         {
1373             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1374         }
1375         else
1376         {
1377             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1378         }
1379     }
1380     else
1381     {
1382         dest->gtType = asgType;
1383     }
1384
1385     dest->gtFlags |= destFlags;
1386     destFlags = dest->gtFlags;
1387
1388     // return an assignment node, to be appended
1389     GenTree* asgNode = gtNewAssignNode(dest, src);
1390     gtBlockOpInit(asgNode, dest, src, false);
1391
1392     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1393     // of assignments.
1394     if ((destFlags & GTF_DONT_CSE) == 0)
1395     {
1396         dest->gtFlags &= ~(GTF_DONT_CSE);
1397     }
1398     return asgNode;
1399 }
1400
1401 /*****************************************************************************
1402    Given a struct value, and the class handle for that structure, return
1403    the expression for the address for that structure value.
1404
1405    willDeref - does the caller guarantee to dereference the pointer.
1406 */
1407
1408 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1409                                     CORINFO_CLASS_HANDLE structHnd,
1410                                     unsigned             curLevel,
1411                                     bool                 willDeref)
1412 {
1413     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1414
1415     var_types type = structVal->TypeGet();
1416
1417     genTreeOps oper = structVal->gtOper;
1418
1419     if (oper == GT_OBJ && willDeref)
1420     {
1421         assert(structVal->gtObj.gtClass == structHnd);
1422         return (structVal->gtObj.Addr());
1423     }
1424     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1425              structVal->OperIsSimdHWIntrinsic())
1426     {
1427         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1428
1429         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1430
1431         // The 'return value' is now the temp itself
1432
1433         type          = genActualType(lvaTable[tmpNum].TypeGet());
1434         GenTree* temp = gtNewLclvNode(tmpNum, type);
1435         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1436         return temp;
1437     }
1438     else if (oper == GT_COMMA)
1439     {
1440         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1441
1442         GenTree* oldTreeLast  = impTreeLast;
1443         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1444         structVal->gtType     = TYP_BYREF;
1445
1446         if (oldTreeLast != impTreeLast)
1447         {
1448             // Some temp assignment statement was placed on the statement list
1449             // for Op2, but that would be out of order with op1, so we need to
1450             // spill op1 onto the statement list after whatever was last
1451             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1452             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1453             structVal->gtOp.gtOp1 = gtNewNothingNode();
1454         }
1455
1456         return (structVal);
1457     }
1458
1459     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1460 }
1461
1462 //------------------------------------------------------------------------
1463 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1464 //                    and optionally determine the GC layout of the struct.
1465 //
1466 // Arguments:
1467 //    structHnd       - The class handle for the struct type of interest.
1468 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1469 //                      into which the gcLayout will be written.
1470 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1471 //                      which will be set to the number of GC fields in the struct.
1472 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1473 //                      type, set to the SIMD base type
1474 //
1475 // Return Value:
1476 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1477 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1478 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1479 //
1480 // Assumptions:
1481 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1482 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1483 //
1484 // Notes:
1485 //    Normalizing the type involves examining the struct type to determine if it should
1486 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1487 //    for full enregistration, e.g. TYP_SIMD16.
1488
1489 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1490                                       BYTE*                gcLayout,
1491                                       unsigned*            pNumGCVars,
1492                                       var_types*           pSimdBaseType)
1493 {
1494     assert(structHnd != NO_CLASS_HANDLE);
1495
1496     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1497     var_types   structType  = TYP_STRUCT;
1498
1499     // On coreclr the check for GC includes a "may" to account for the special
1500     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1501     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1502     // pointer.
1503     const bool mayContainGCPtrs =
1504         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1505
1506 #ifdef FEATURE_SIMD
1507     // Check to see if this is a SIMD type.
1508     if (featureSIMD && !mayContainGCPtrs)
1509     {
1510         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1511
1512         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1513         {
1514             unsigned int sizeBytes;
1515             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1516             if (simdBaseType != TYP_UNKNOWN)
1517             {
1518                 assert(sizeBytes == originalSize);
1519                 structType = getSIMDTypeForSize(sizeBytes);
1520                 if (pSimdBaseType != nullptr)
1521                 {
1522                     *pSimdBaseType = simdBaseType;
1523                 }
1524                 // Also indicate that we use floating point registers.
1525                 compFloatingPointUsed = true;
1526             }
1527         }
1528     }
1529 #endif // FEATURE_SIMD
1530
1531     // Fetch GC layout info if requested
1532     if (gcLayout != nullptr)
1533     {
1534         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1535
1536         // Verify that the quick test up above via the class attributes gave a
1537         // safe view of the type's GCness.
1538         //
1539         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1540         // does not report any gc fields.
1541
1542         assert(mayContainGCPtrs || (numGCVars == 0));
1543
1544         if (pNumGCVars != nullptr)
1545         {
1546             *pNumGCVars = numGCVars;
1547         }
1548     }
1549     else
1550     {
1551         // Can't safely ask for number of GC pointers without also
1552         // asking for layout.
1553         assert(pNumGCVars == nullptr);
1554     }
1555
1556     return structType;
1557 }
1558
1559 //****************************************************************************
1560 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1561 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1562 //
1563 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1564                                     CORINFO_CLASS_HANDLE structHnd,
1565                                     unsigned             curLevel,
1566                                     bool                 forceNormalization /*=false*/)
1567 {
1568     assert(forceNormalization || varTypeIsStruct(structVal));
1569     assert(structHnd != NO_CLASS_HANDLE);
1570     var_types structType = structVal->TypeGet();
1571     bool      makeTemp   = false;
1572     if (structType == TYP_STRUCT)
1573     {
1574         structType = impNormStructType(structHnd);
1575     }
1576     bool                 alreadyNormalized = false;
1577     GenTreeLclVarCommon* structLcl         = nullptr;
1578
1579     genTreeOps oper = structVal->OperGet();
1580     switch (oper)
1581     {
1582         // GT_RETURN and GT_MKREFANY don't capture the handle.
1583         case GT_RETURN:
1584             break;
1585         case GT_MKREFANY:
1586             alreadyNormalized = true;
1587             break;
1588
1589         case GT_CALL:
1590             structVal->gtCall.gtRetClsHnd = structHnd;
1591             makeTemp                      = true;
1592             break;
1593
1594         case GT_RET_EXPR:
1595             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1596             makeTemp                         = true;
1597             break;
1598
1599         case GT_ARGPLACE:
1600             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1601             break;
1602
1603         case GT_INDEX:
1604             // This will be transformed to an OBJ later.
1605             alreadyNormalized                    = true;
1606             structVal->gtIndex.gtStructElemClass = structHnd;
1607             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1608             break;
1609
1610         case GT_FIELD:
1611             // Wrap it in a GT_OBJ.
1612             structVal->gtType = structType;
1613             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1614             break;
1615
1616         case GT_LCL_VAR:
1617         case GT_LCL_FLD:
1618             structLcl = structVal->AsLclVarCommon();
1619             // Wrap it in a GT_OBJ.
1620             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1621             __fallthrough;
1622
1623         case GT_OBJ:
1624         case GT_BLK:
1625         case GT_DYN_BLK:
1626         case GT_ASG:
1627             // These should already have the appropriate type.
1628             assert(structVal->gtType == structType);
1629             alreadyNormalized = true;
1630             break;
1631
1632         case GT_IND:
1633             assert(structVal->gtType == structType);
1634             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1635             alreadyNormalized = true;
1636             break;
1637
1638 #ifdef FEATURE_SIMD
1639         case GT_SIMD:
1640             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1641             break;
1642 #endif // FEATURE_SIMD
1643 #ifdef FEATURE_HW_INTRINSICS
1644         case GT_HWIntrinsic:
1645             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1646             break;
1647 #endif
1648
1649         case GT_COMMA:
1650         {
1651             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1652             GenTree* blockNode = structVal->gtOp.gtOp2;
1653             assert(blockNode->gtType == structType);
1654
1655             // Is this GT_COMMA(op1, GT_COMMA())?
1656             GenTree* parent = structVal;
1657             if (blockNode->OperGet() == GT_COMMA)
1658             {
1659                 // Find the last node in the comma chain.
1660                 do
1661                 {
1662                     assert(blockNode->gtType == structType);
1663                     parent    = blockNode;
1664                     blockNode = blockNode->gtOp.gtOp2;
1665                 } while (blockNode->OperGet() == GT_COMMA);
1666             }
1667
1668             if (blockNode->OperGet() == GT_FIELD)
1669             {
1670                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1671                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1672             }
1673
1674 #ifdef FEATURE_SIMD
1675             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1676             {
1677                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1678                 alreadyNormalized  = true;
1679             }
1680             else
1681 #endif
1682             {
1683                 noway_assert(blockNode->OperIsBlk());
1684
1685                 // Sink the GT_COMMA below the blockNode addr.
1686                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1687                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1688                 //
1689                 // In case of a chained GT_COMMA case, we sink the last
1690                 // GT_COMMA below the blockNode addr.
1691                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1692                 assert(blockNodeAddr->gtType == TYP_BYREF);
1693                 GenTree* commaNode    = parent;
1694                 commaNode->gtType     = TYP_BYREF;
1695                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1696                 blockNode->gtOp.gtOp1 = commaNode;
1697                 if (parent == structVal)
1698                 {
1699                     structVal = blockNode;
1700                 }
1701                 alreadyNormalized = true;
1702             }
1703         }
1704         break;
1705
1706         default:
1707             noway_assert(!"Unexpected node in impNormStructVal()");
1708             break;
1709     }
1710     structVal->gtType  = structType;
1711     GenTree* structObj = structVal;
1712
1713     if (!alreadyNormalized || forceNormalization)
1714     {
1715         if (makeTemp)
1716         {
1717             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1718
1719             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1720
1721             // The structVal is now the temp itself
1722
1723             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1724             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1725             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1726         }
1727         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1728         {
1729             // Wrap it in a GT_OBJ
1730             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1731         }
1732     }
1733
1734     if (structLcl != nullptr)
1735     {
1736         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1737         // so we don't set GTF_EXCEPT here.
1738         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1739         {
1740             structObj->gtFlags &= ~GTF_GLOB_REF;
1741         }
1742     }
1743     else
1744     {
1745         // In general a OBJ is an indirection and could raise an exception.
1746         structObj->gtFlags |= GTF_EXCEPT;
1747     }
1748     return (structObj);
1749 }
1750
1751 /******************************************************************************/
1752 // Given a type token, generate code that will evaluate to the correct
1753 // handle representation of that token (type handle, field handle, or method handle)
1754 //
1755 // For most cases, the handle is determined at compile-time, and the code
1756 // generated is simply an embedded handle.
1757 //
1758 // Run-time lookup is required if the enclosing method is shared between instantiations
1759 // and the token refers to formal type parameters whose instantiation is not known
1760 // at compile-time.
1761 //
1762 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1763                                     BOOL*                   pRuntimeLookup /* = NULL */,
1764                                     BOOL                    mustRestoreHandle /* = FALSE */,
1765                                     BOOL                    importParent /* = FALSE */)
1766 {
1767     assert(!fgGlobalMorph);
1768
1769     CORINFO_GENERICHANDLE_RESULT embedInfo;
1770     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1771
1772     if (pRuntimeLookup)
1773     {
1774         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1775     }
1776
1777     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         switch (embedInfo.handleType)
1780         {
1781             case CORINFO_HANDLETYPE_CLASS:
1782                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1783                 break;
1784
1785             case CORINFO_HANDLETYPE_METHOD:
1786                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1787                 break;
1788
1789             case CORINFO_HANDLETYPE_FIELD:
1790                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1791                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1792                 break;
1793
1794             default:
1795                 break;
1796         }
1797     }
1798
1799     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1800     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1801                                       embedInfo.compileTimeHandle);
1802
1803     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1804     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1805     {
1806         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1807     }
1808
1809     return result;
1810 }
1811
1812 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1813                                    CORINFO_LOOKUP*         pLookup,
1814                                    unsigned                handleFlags,
1815                                    void*                   compileTimeHandle)
1816 {
1817     if (!pLookup->lookupKind.needsRuntimeLookup)
1818     {
1819         // No runtime lookup is required.
1820         // Access is direct or memory-indirect (of a fixed address) reference
1821
1822         CORINFO_GENERIC_HANDLE handle       = nullptr;
1823         void*                  pIndirection = nullptr;
1824         assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1825
1826         if (pLookup->constLookup.accessType == IAT_VALUE)
1827         {
1828             handle = pLookup->constLookup.handle;
1829         }
1830         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1831         {
1832             pIndirection = pLookup->constLookup.addr;
1833         }
1834         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1835     }
1836     else if (compIsForInlining())
1837     {
1838         // Don't import runtime lookups when inlining
1839         // Inlining has to be aborted in such a case
1840         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1841         return nullptr;
1842     }
1843     else
1844     {
1845         // Need to use dictionary-based access which depends on the typeContext
1846         // which is only available at runtime, not at compile-time.
1847
1848         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1849     }
1850 }
1851
1852 #ifdef FEATURE_READYTORUN_COMPILER
1853 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1854                                              unsigned              handleFlags,
1855                                              void*                 compileTimeHandle)
1856 {
1857     CORINFO_GENERIC_HANDLE handle       = nullptr;
1858     void*                  pIndirection = nullptr;
1859     assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1860
1861     if (pLookup->accessType == IAT_VALUE)
1862     {
1863         handle = pLookup->handle;
1864     }
1865     else if (pLookup->accessType == IAT_PVALUE)
1866     {
1867         pIndirection = pLookup->addr;
1868     }
1869     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1870 }
1871
1872 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1873     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1874     CorInfoHelpFunc         helper,
1875     var_types               type,
1876     GenTreeArgList*         args /* =NULL*/,
1877     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1878 {
1879     CORINFO_CONST_LOOKUP lookup;
1880     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1881     {
1882         return nullptr;
1883     }
1884
1885     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1886
1887     op1->setEntryPoint(lookup);
1888
1889     return op1;
1890 }
1891 #endif
1892
1893 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1894 {
1895     GenTree* op1 = nullptr;
1896
1897     switch (pCallInfo->kind)
1898     {
1899         case CORINFO_CALL:
1900             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1901
1902 #ifdef FEATURE_READYTORUN_COMPILER
1903             if (opts.IsReadyToRun())
1904             {
1905                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1906             }
1907             else
1908             {
1909                 op1->gtFptrVal.gtEntryPoint.addr       = nullptr;
1910                 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1911             }
1912 #endif
1913             break;
1914
1915         case CORINFO_CALL_CODE_POINTER:
1916             if (compIsForInlining())
1917             {
1918                 // Don't import runtime lookups when inlining
1919                 // Inlining has to be aborted in such a case
1920                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1921                 return nullptr;
1922             }
1923
1924             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1925             break;
1926
1927         default:
1928             noway_assert(!"unknown call kind");
1929             break;
1930     }
1931
1932     return op1;
1933 }
1934
1935 //------------------------------------------------------------------------
1936 // getRuntimeContextTree: find pointer to context for runtime lookup.
1937 //
1938 // Arguments:
1939 //    kind - lookup kind.
1940 //
1941 // Return Value:
1942 //    Return GenTree pointer to generic shared context.
1943 //
1944 // Notes:
1945 //    Reports about generic context using.
1946
1947 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1948 {
1949     GenTree* ctxTree = nullptr;
1950
1951     // Collectible types requires that for shared generic code, if we use the generic context parameter
1952     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1953     // context parameter is this that we don't need the eager reporting logic.)
1954     lvaGenericsContextUseCount++;
1955
1956     if (kind == CORINFO_LOOKUP_THISOBJ)
1957     {
1958         // this Object
1959         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1960
1961         // Vtable pointer of this object
1962         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1963         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1964         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1965     }
1966     else
1967     {
1968         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1969
1970         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1971     }
1972     return ctxTree;
1973 }
1974
1975 /*****************************************************************************/
1976 /* Import a dictionary lookup to access a handle in code shared between
1977    generic instantiations.
1978    The lookup depends on the typeContext which is only available at
1979    runtime, and not at compile-time.
1980    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1981    The cases are:
1982
1983    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1984       instantiation-specific handle, and the tokens to lookup the handle.
1985    2. pLookup->indirections != CORINFO_USEHELPER :
1986       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1987           to get the handle.
1988       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1989           If it is non-NULL, it is the handle required. Else, call a helper
1990           to lookup the handle.
1991  */
1992
1993 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1994                                           CORINFO_LOOKUP*         pLookup,
1995                                           void*                   compileTimeHandle)
1996 {
1997
1998     // This method can only be called from the importer instance of the Compiler.
1999     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
2000     assert(!compIsForInlining());
2001
2002     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
2003
2004     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
2005     // It's available only via the run-time helper function
2006     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
2007     {
2008 #ifdef FEATURE_READYTORUN_COMPILER
2009         if (opts.IsReadyToRun())
2010         {
2011             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
2012                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
2013         }
2014 #endif
2015         GenTree* argNode =
2016             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2017         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2018
2019         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2020     }
2021
2022     // Slot pointer
2023     GenTree* slotPtrTree = ctxTree;
2024
2025     if (pRuntimeLookup->testForNull)
2026     {
2027         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2028                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2029     }
2030
2031     GenTree* indOffTree = nullptr;
2032
2033     // Applied repeated indirections
2034     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2035     {
2036         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2037         {
2038             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2039                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2040         }
2041
2042         if (i != 0)
2043         {
2044             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2045             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2046             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2047         }
2048
2049         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2050         {
2051             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2052         }
2053
2054         if (pRuntimeLookup->offsets[i] != 0)
2055         {
2056             slotPtrTree =
2057                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2058         }
2059     }
2060
2061     // No null test required
2062     if (!pRuntimeLookup->testForNull)
2063     {
2064         if (pRuntimeLookup->indirections == 0)
2065         {
2066             return slotPtrTree;
2067         }
2068
2069         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2070         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2071
2072         if (!pRuntimeLookup->testForFixup)
2073         {
2074             return slotPtrTree;
2075         }
2076
2077         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2078
2079         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2080         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2081
2082         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2083         // downcast the pointer to a TYP_INT on 64-bit targets
2084         slot = impImplicitIorI4Cast(slot, TYP_INT);
2085         // Use a GT_AND to check for the lowest bit and indirect if it is set
2086         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2087         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2088
2089         // slot = GT_IND(slot - 1)
2090         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2091         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2092         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2093         indir->gtFlags |= GTF_IND_NONFAULTING;
2094         indir->gtFlags |= GTF_IND_INVARIANT;
2095
2096         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2097         GenTree* asg   = gtNewAssignNode(slot, indir);
2098         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2099         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2100         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2101
2102         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2103     }
2104
2105     assert(pRuntimeLookup->indirections != 0);
2106
2107     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2108
2109     // Extract the handle
2110     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2111     handle->gtFlags |= GTF_IND_NONFAULTING;
2112
2113     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2114                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2115
2116     // Call to helper
2117     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2118
2119     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2120     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2121
2122     // Check for null and possibly call helper
2123     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2124
2125     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2126                                                        gtNewNothingNode(), // do nothing if nonnull
2127                                                        helperCall);
2128
2129     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2130
2131     unsigned tmp;
2132     if (handleCopy->IsLocal())
2133     {
2134         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2135     }
2136     else
2137     {
2138         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2139     }
2140
2141     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2142     return gtNewLclvNode(tmp, TYP_I_IMPL);
2143 }
2144
2145 /******************************************************************************
2146  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2147  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2148  *     else, grab a new temp.
2149  *  For structs (which can be pushed on the stack using obj, etc),
2150  *  special handling is needed
2151  */
2152
2153 struct RecursiveGuard
2154 {
2155 public:
2156     RecursiveGuard()
2157     {
2158         m_pAddress = nullptr;
2159     }
2160
2161     ~RecursiveGuard()
2162     {
2163         if (m_pAddress)
2164         {
2165             *m_pAddress = false;
2166         }
2167     }
2168
2169     void Init(bool* pAddress, bool bInitialize)
2170     {
2171         assert(pAddress && *pAddress == false && "Recursive guard violation");
2172         m_pAddress = pAddress;
2173
2174         if (bInitialize)
2175         {
2176             *m_pAddress = true;
2177         }
2178     }
2179
2180 protected:
2181     bool* m_pAddress;
2182 };
2183
2184 bool Compiler::impSpillStackEntry(unsigned level,
2185                                   unsigned tnum
2186 #ifdef DEBUG
2187                                   ,
2188                                   bool        bAssertOnRecursion,
2189                                   const char* reason
2190 #endif
2191                                   )
2192 {
2193
2194 #ifdef DEBUG
2195     RecursiveGuard guard;
2196     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2197 #endif
2198
2199     GenTree* tree = verCurrentState.esStack[level].val;
2200
2201     /* Allocate a temp if we haven't been asked to use a particular one */
2202
2203     if (tiVerificationNeeded)
2204     {
2205         // Ignore bad temp requests (they will happen with bad code and will be
2206         // catched when importing the destblock)
2207         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2208         {
2209             return false;
2210         }
2211     }
2212     else
2213     {
2214         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2215         {
2216             return false;
2217         }
2218     }
2219
2220     bool isNewTemp = false;
2221
2222     if (tnum == BAD_VAR_NUM)
2223     {
2224         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2225         isNewTemp = true;
2226     }
2227     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2228     {
2229         // if verification is needed and tnum's type is incompatible with
2230         // type on that stack, we grab a new temp. This is safe since
2231         // we will throw a verification exception in the dest block.
2232
2233         var_types valTyp = tree->TypeGet();
2234         var_types dstTyp = lvaTable[tnum].TypeGet();
2235
2236         // if the two types are different, we return. This will only happen with bad code and will
2237         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2238         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2239             !(
2240 #ifndef _TARGET_64BIT_
2241                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2242 #endif // !_TARGET_64BIT_
2243                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2244         {
2245             if (verNeedsVerification())
2246             {
2247                 return false;
2248             }
2249         }
2250     }
2251
2252     /* Assign the spilled entry to the temp */
2253     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2254
2255     // If temp is newly introduced and a ref type, grab what type info we can.
2256     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2257     {
2258         assert(lvaTable[tnum].lvSingleDef == 0);
2259         lvaTable[tnum].lvSingleDef = 1;
2260         JITDUMP("Marked V%02u as a single def temp\n", tnum);
2261         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2262         lvaSetClass(tnum, tree, stkHnd);
2263
2264         // If we're assigning a GT_RET_EXPR, note the temp over on the call,
2265         // so the inliner can use it in case it needs a return spill temp.
2266         if (tree->OperGet() == GT_RET_EXPR)
2267         {
2268             JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
2269             GenTree*             call = tree->gtRetExpr.gtInlineCandidate;
2270             InlineCandidateInfo* ici  = call->gtCall.gtInlineCandidateInfo;
2271             ici->preexistingSpillTemp = tnum;
2272         }
2273     }
2274
2275     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2276     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2277     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2278     verCurrentState.esStack[level].val = temp;
2279
2280     return true;
2281 }
2282
2283 /*****************************************************************************
2284  *
2285  *  Ensure that the stack has only spilled values
2286  */
2287
2288 void Compiler::impSpillStackEnsure(bool spillLeaves)
2289 {
2290     assert(!spillLeaves || opts.compDbgCode);
2291
2292     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2293     {
2294         GenTree* tree = verCurrentState.esStack[level].val;
2295
2296         if (!spillLeaves && tree->OperIsLeaf())
2297         {
2298             continue;
2299         }
2300
2301         // Temps introduced by the importer itself don't need to be spilled
2302
2303         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2304
2305         if (isTempLcl)
2306         {
2307             continue;
2308         }
2309
2310         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2311     }
2312 }
2313
2314 void Compiler::impSpillEvalStack()
2315 {
2316     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2317     {
2318         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2319     }
2320 }
2321
2322 /*****************************************************************************
2323  *
2324  *  If the stack contains any trees with side effects in them, assign those
2325  *  trees to temps and append the assignments to the statement list.
2326  *  On return the stack is guaranteed to be empty.
2327  */
2328
2329 inline void Compiler::impEvalSideEffects()
2330 {
2331     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2332     verCurrentState.esStackDepth = 0;
2333 }
2334
2335 /*****************************************************************************
2336  *
2337  *  If the stack contains any trees with side effects in them, assign those
2338  *  trees to temps and replace them on the stack with refs to their temps.
2339  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2340  */
2341
2342 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2343 {
2344     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2345
2346     /* Before we make any appends to the tree list we must spill the
2347      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2348
2349     impSpillSpecialSideEff();
2350
2351     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2352     {
2353         chkLevel = verCurrentState.esStackDepth;
2354     }
2355
2356     assert(chkLevel <= verCurrentState.esStackDepth);
2357
2358     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2359
2360     for (unsigned i = 0; i < chkLevel; i++)
2361     {
2362         GenTree* tree = verCurrentState.esStack[i].val;
2363
2364         GenTree* lclVarTree;
2365
2366         if ((tree->gtFlags & spillFlags) != 0 ||
2367             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2368              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2369              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2370                                            // lvAddrTaken flag.
2371         {
2372             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2373         }
2374     }
2375 }
2376
2377 /*****************************************************************************
2378  *
2379  *  If the stack contains any trees with special side effects in them, assign
2380  *  those trees to temps and replace them on the stack with refs to their temps.
2381  */
2382
2383 inline void Compiler::impSpillSpecialSideEff()
2384 {
2385     // Only exception objects need to be carefully handled
2386
2387     if (!compCurBB->bbCatchTyp)
2388     {
2389         return;
2390     }
2391
2392     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2393     {
2394         GenTree* tree = verCurrentState.esStack[level].val;
2395         // Make sure if we have an exception object in the sub tree we spill ourselves.
2396         if (gtHasCatchArg(tree))
2397         {
2398             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2399         }
2400     }
2401 }
2402
2403 /*****************************************************************************
2404  *
2405  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2406  */
2407
2408 void Compiler::impSpillValueClasses()
2409 {
2410     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2411     {
2412         GenTree* tree = verCurrentState.esStack[level].val;
2413
2414         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2415         {
2416             // Tree walk was aborted, which means that we found a
2417             // value class on the stack.  Need to spill that
2418             // stack entry.
2419
2420             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2421         }
2422     }
2423 }
2424
2425 /*****************************************************************************
2426  *
2427  *  Callback that checks if a tree node is TYP_STRUCT
2428  */
2429
2430 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2431 {
2432     fgWalkResult walkResult = WALK_CONTINUE;
2433
2434     if ((*pTree)->gtType == TYP_STRUCT)
2435     {
2436         // Abort the walk and indicate that we found a value class
2437
2438         walkResult = WALK_ABORT;
2439     }
2440
2441     return walkResult;
2442 }
2443
2444 /*****************************************************************************
2445  *
2446  *  If the stack contains any trees with references to local #lclNum, assign
2447  *  those trees to temps and replace their place on the stack with refs to
2448  *  their temps.
2449  */
2450
2451 void Compiler::impSpillLclRefs(ssize_t lclNum)
2452 {
2453     /* Before we make any appends to the tree list we must spill the
2454      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2455
2456     impSpillSpecialSideEff();
2457
2458     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2459     {
2460         GenTree* tree = verCurrentState.esStack[level].val;
2461
2462         /* If the tree may throw an exception, and the block has a handler,
2463            then we need to spill assignments to the local if the local is
2464            live on entry to the handler.
2465            Just spill 'em all without considering the liveness */
2466
2467         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2468
2469         /* Skip the tree if it doesn't have an affected reference,
2470            unless xcptnCaught */
2471
2472         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2473         {
2474             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2475         }
2476     }
2477 }
2478
2479 /*****************************************************************************
2480  *
2481  *  Push catch arg onto the stack.
2482  *  If there are jumps to the beginning of the handler, insert basic block
2483  *  and spill catch arg to a temp. Update the handler block if necessary.
2484  *
2485  *  Returns the basic block of the actual handler.
2486  */
2487
2488 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2489 {
2490     // Do not inject the basic block twice on reimport. This should be
2491     // hit only under JIT stress. See if the block is the one we injected.
2492     // Note that EH canonicalization can inject internal blocks here. We might
2493     // be able to re-use such a block (but we don't, right now).
2494     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2495         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2496     {
2497         GenTree* tree = hndBlk->bbTreeList;
2498
2499         if (tree != nullptr && tree->gtOper == GT_STMT)
2500         {
2501             tree = tree->gtStmt.gtStmtExpr;
2502             assert(tree != nullptr);
2503
2504             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2505                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2506             {
2507                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2508
2509                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2510
2511                 return hndBlk->bbNext;
2512             }
2513         }
2514
2515         // If we get here, it must have been some other kind of internal block. It's possible that
2516         // someone prepended something to our injected block, but that's unlikely.
2517     }
2518
2519     /* Push the exception address value on the stack */
2520     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2521
2522     /* Mark the node as having a side-effect - i.e. cannot be
2523      * moved around since it is tied to a fixed location (EAX) */
2524     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2525
2526 #if defined(JIT32_GCENCODER)
2527     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2528 #else
2529     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2530 #endif // defined(JIT32_GCENCODER)
2531
2532     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2533     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2534     {
2535         if (hndBlk->bbRefs == 1)
2536         {
2537             hndBlk->bbRefs++;
2538         }
2539
2540         /* Create extra basic block for the spill */
2541         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2542         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2543         newBlk->setBBWeight(hndBlk->bbWeight);
2544         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2545
2546         /* Account for the new link we are about to create */
2547         hndBlk->bbRefs++;
2548
2549         /* Spill into a temp */
2550         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2551         lvaTable[tempNum].lvType = TYP_REF;
2552         arg                      = gtNewTempAssign(tempNum, arg);
2553
2554         hndBlk->bbStkTempsIn = tempNum;
2555
2556         /* Report the debug info. impImportBlockCode won't treat
2557          * the actual handler as exception block and thus won't do it for us. */
2558         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2559         {
2560             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2561             arg            = gtNewStmt(arg, impCurStmtOffs);
2562         }
2563
2564         fgInsertStmtAtEnd(newBlk, arg);
2565
2566         arg = gtNewLclvNode(tempNum, TYP_REF);
2567     }
2568
2569     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2570
2571     return hndBlk;
2572 }
2573
2574 /*****************************************************************************
2575  *
2576  *  Given a tree, clone it. *pClone is set to the cloned tree.
2577  *  Returns the original tree if the cloning was easy,
2578  *   else returns the temp to which the tree had to be spilled to.
2579  *  If the tree has side-effects, it will be spilled to a temp.
2580  */
2581
2582 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2583                                 GenTree**            pClone,
2584                                 CORINFO_CLASS_HANDLE structHnd,
2585                                 unsigned             curLevel,
2586                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2587 {
2588     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2589     {
2590         GenTree* clone = gtClone(tree, true);
2591
2592         if (clone)
2593         {
2594             *pClone = clone;
2595             return tree;
2596         }
2597     }
2598
2599     /* Store the operand in a temp and return the temp */
2600
2601     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2602
2603     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2604     // return a struct type. It also may modify the struct type to a more
2605     // specialized type (e.g. a SIMD type).  So we will get the type from
2606     // the lclVar AFTER calling impAssignTempGen().
2607
2608     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2609     var_types type = genActualType(lvaTable[temp].TypeGet());
2610
2611     *pClone = gtNewLclvNode(temp, type);
2612     return gtNewLclvNode(temp, type);
2613 }
2614
2615 /*****************************************************************************
2616  * Remember the IL offset (including stack-empty info) for the trees we will
2617  * generate now.
2618  */
2619
2620 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2621 {
2622     if (compIsForInlining())
2623     {
2624         GenTree* callStmt = impInlineInfo->iciStmt;
2625         assert(callStmt->gtOper == GT_STMT);
2626         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2627     }
2628     else
2629     {
2630         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2631         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2632         impCurStmtOffs    = offs | stkBit;
2633     }
2634 }
2635
2636 /*****************************************************************************
2637  * Returns current IL offset with stack-empty and call-instruction info incorporated
2638  */
2639 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2640 {
2641     if (compIsForInlining())
2642     {
2643         return BAD_IL_OFFSET;
2644     }
2645     else
2646     {
2647         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2648         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2649         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2650         return offs | stkBit | callInstructionBit;
2651     }
2652 }
2653
2654 //------------------------------------------------------------------------
2655 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2656 //
2657 // Arguments:
2658 //    prevOpcode - last importer opcode
2659 //
2660 // Return Value:
2661 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2662 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2663 {
2664     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2665     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2666     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2667 }
2668
2669 /*****************************************************************************
2670  *
2671  *  Remember the instr offset for the statements
2672  *
2673  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2674  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2675  *  as some of the trees corresponding to code up to impCurOpcOffs might
2676  *  still be sitting on the stack.
2677  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2678  *  This should be called when an opcode finally/explicitly causes
2679  *  impAppendTree(tree) to be called (as opposed to being called because of
2680  *  a spill caused by the opcode)
2681  */
2682
2683 #ifdef DEBUG
2684
2685 void Compiler::impNoteLastILoffs()
2686 {
2687     if (impLastILoffsStmt == nullptr)
2688     {
2689         // We should have added a statement for the current basic block
2690         // Is this assert correct ?
2691
2692         assert(impTreeLast);
2693         assert(impTreeLast->gtOper == GT_STMT);
2694
2695         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2696     }
2697     else
2698     {
2699         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2700         impLastILoffsStmt                          = nullptr;
2701     }
2702 }
2703
2704 #endif // DEBUG
2705
2706 /*****************************************************************************
2707  * We don't create any GenTree (excluding spills) for a branch.
2708  * For debugging info, we need a placeholder so that we can note
2709  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2710  */
2711
2712 void Compiler::impNoteBranchOffs()
2713 {
2714     if (opts.compDbgCode)
2715     {
2716         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2717     }
2718 }
2719
2720 /*****************************************************************************
2721  * Locate the next stmt boundary for which we need to record info.
2722  * We will have to spill the stack at such boundaries if it is not
2723  * already empty.
2724  * Returns the next stmt boundary (after the start of the block)
2725  */
2726
2727 unsigned Compiler::impInitBlockLineInfo()
2728 {
2729     /* Assume the block does not correspond with any IL offset. This prevents
2730        us from reporting extra offsets. Extra mappings can cause confusing
2731        stepping, especially if the extra mapping is a jump-target, and the
2732        debugger does not ignore extra mappings, but instead rewinds to the
2733        nearest known offset */
2734
2735     impCurStmtOffsSet(BAD_IL_OFFSET);
2736
2737     if (compIsForInlining())
2738     {
2739         return ~0;
2740     }
2741
2742     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2743
2744     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2745     {
2746         impCurStmtOffsSet(blockOffs);
2747     }
2748
2749     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2750     {
2751         impCurStmtOffsSet(blockOffs);
2752     }
2753
2754     /* Always report IL offset 0 or some tests get confused.
2755        Probably a good idea anyways */
2756
2757     if (blockOffs == 0)
2758     {
2759         impCurStmtOffsSet(blockOffs);
2760     }
2761
2762     if (!info.compStmtOffsetsCount)
2763     {
2764         return ~0;
2765     }
2766
2767     /* Find the lowest explicit stmt boundary within the block */
2768
2769     /* Start looking at an entry that is based on our instr offset */
2770
2771     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2772
2773     if (index >= info.compStmtOffsetsCount)
2774     {
2775         index = info.compStmtOffsetsCount - 1;
2776     }
2777
2778     /* If we've guessed too far, back up */
2779
2780     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2781     {
2782         index--;
2783     }
2784
2785     /* If we guessed short, advance ahead */
2786
2787     while (info.compStmtOffsets[index] < blockOffs)
2788     {
2789         index++;
2790
2791         if (index == info.compStmtOffsetsCount)
2792         {
2793             return info.compStmtOffsetsCount;
2794         }
2795     }
2796
2797     assert(index < info.compStmtOffsetsCount);
2798
2799     if (info.compStmtOffsets[index] == blockOffs)
2800     {
2801         /* There is an explicit boundary for the start of this basic block.
2802            So we will start with bbCodeOffs. Else we will wait until we
2803            get to the next explicit boundary */
2804
2805         impCurStmtOffsSet(blockOffs);
2806
2807         index++;
2808     }
2809
2810     return index;
2811 }
2812
2813 /*****************************************************************************/
2814
2815 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2816 {
2817     switch (opcode)
2818     {
2819         case CEE_CALL:
2820         case CEE_CALLI:
2821         case CEE_CALLVIRT:
2822             return true;
2823
2824         default:
2825             return false;
2826     }
2827 }
2828
2829 /*****************************************************************************/
2830
2831 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2832 {
2833     switch (opcode)
2834     {
2835         case CEE_CALL:
2836         case CEE_CALLI:
2837         case CEE_CALLVIRT:
2838         case CEE_JMP:
2839         case CEE_NEWOBJ:
2840         case CEE_NEWARR:
2841             return true;
2842
2843         default:
2844             return false;
2845     }
2846 }
2847
2848 /*****************************************************************************/
2849
2850 // One might think it is worth caching these values, but results indicate
2851 // that it isn't.
2852 // In addition, caching them causes SuperPMI to be unable to completely
2853 // encapsulate an individual method context.
2854 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2855 {
2856     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2857     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2858     return refAnyClass;
2859 }
2860
2861 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2862 {
2863     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2864     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2865     return typeHandleClass;
2866 }
2867
2868 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2869 {
2870     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2871     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2872     return argIteratorClass;
2873 }
2874
2875 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2876 {
2877     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2878     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2879     return stringClass;
2880 }
2881
2882 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2883 {
2884     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2885     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2886     return objectClass;
2887 }
2888
2889 /*****************************************************************************
2890  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2891  *  set its type to TYP_BYREF when we create it. We know if it can be
2892  *  changed to TYP_I_IMPL only at the point where we use it
2893  */
2894
2895 /* static */
2896 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2897 {
2898     if (tree1->IsVarAddr())
2899     {
2900         tree1->gtType = TYP_I_IMPL;
2901     }
2902
2903     if (tree2 && tree2->IsVarAddr())
2904     {
2905         tree2->gtType = TYP_I_IMPL;
2906     }
2907 }
2908
2909 /*****************************************************************************
2910  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2911  *  to make that an explicit cast in our trees, so any implicit casts that
2912  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2913  *  turned into explicit casts here.
2914  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2915  */
2916
2917 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2918 {
2919     var_types currType   = genActualType(tree->gtType);
2920     var_types wantedType = genActualType(dstTyp);
2921
2922     if (wantedType != currType)
2923     {
2924         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2925         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2926         {
2927             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2928             {
2929                 tree->gtType = TYP_I_IMPL;
2930             }
2931         }
2932 #ifdef _TARGET_64BIT_
2933         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2934         {
2935             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2936             tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2937         }
2938         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2939         {
2940             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2941             tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2942         }
2943 #endif // _TARGET_64BIT_
2944     }
2945
2946     return tree;
2947 }
2948
2949 /*****************************************************************************
2950  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2951  *  but we want to make that an explicit cast in our trees, so any implicit casts
2952  *  that exist in the IL are turned into explicit casts here.
2953  */
2954
2955 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2956 {
2957     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2958     {
2959         tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2960     }
2961
2962     return tree;
2963 }
2964
2965 //------------------------------------------------------------------------
2966 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2967 //    with a GT_COPYBLK node.
2968 //
2969 // Arguments:
2970 //    sig - The InitializeArray signature.
2971 //
2972 // Return Value:
2973 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2974 //    nullptr otherwise.
2975 //
2976 // Notes:
2977 //    The function recognizes the following IL pattern:
2978 //      ldc <length> or a list of ldc <lower bound>/<length>
2979 //      newarr or newobj
2980 //      dup
2981 //      ldtoken <field handle>
2982 //      call InitializeArray
2983 //    The lower bounds need not be constant except when the array rank is 1.
2984 //    The function recognizes all kinds of arrays thus enabling a small runtime
2985 //    such as CoreRT to skip providing an implementation for InitializeArray.
2986
2987 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2988 {
2989     assert(sig->numArgs == 2);
2990
2991     GenTree* fieldTokenNode = impStackTop(0).val;
2992     GenTree* arrayLocalNode = impStackTop(1).val;
2993
2994     //
2995     // Verify that the field token is known and valid.  Note that It's also
2996     // possible for the token to come from reflection, in which case we cannot do
2997     // the optimization and must therefore revert to calling the helper.  You can
2998     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2999     //
3000
3001     // Check to see if the ldtoken helper call is what we see here.
3002     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
3003         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
3004     {
3005         return nullptr;
3006     }
3007
3008     // Strip helper call away
3009     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
3010
3011     if (fieldTokenNode->gtOper == GT_IND)
3012     {
3013         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
3014     }
3015
3016     // Check for constant
3017     if (fieldTokenNode->gtOper != GT_CNS_INT)
3018     {
3019         return nullptr;
3020     }
3021
3022     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
3023     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
3024     {
3025         return nullptr;
3026     }
3027
3028     //
3029     // We need to get the number of elements in the array and the size of each element.
3030     // We verify that the newarr statement is exactly what we expect it to be.
3031     // If it's not then we just return NULL and we don't optimize this call
3032     //
3033
3034     //
3035     // It is possible the we don't have any statements in the block yet
3036     //
3037     if (impTreeLast->gtOper != GT_STMT)
3038     {
3039         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3040         return nullptr;
3041     }
3042
3043     //
3044     // We start by looking at the last statement, making sure it's an assignment, and
3045     // that the target of the assignment is the array passed to InitializeArray.
3046     //
3047     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3048     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3049         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3050         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3051     {
3052         return nullptr;
3053     }
3054
3055     //
3056     // Make sure that the object being assigned is a helper call.
3057     //
3058
3059     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3060     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3061     {
3062         return nullptr;
3063     }
3064
3065     //
3066     // Verify that it is one of the new array helpers.
3067     //
3068
3069     bool isMDArray = false;
3070
3071     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3072         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3073         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3074         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3075 #ifdef FEATURE_READYTORUN_COMPILER
3076         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3077         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3078 #endif
3079             )
3080     {
3081         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3082         {
3083             return nullptr;
3084         }
3085
3086         isMDArray = true;
3087     }
3088
3089     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3090
3091     //
3092     // Make sure we found a compile time handle to the array
3093     //
3094
3095     if (!arrayClsHnd)
3096     {
3097         return nullptr;
3098     }
3099
3100     unsigned rank = 0;
3101     S_UINT32 numElements;
3102
3103     if (isMDArray)
3104     {
3105         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3106
3107         if (rank == 0)
3108         {
3109             return nullptr;
3110         }
3111
3112         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3113         assert(tokenArg != nullptr);
3114         GenTreeArgList* numArgsArg = tokenArg->Rest();
3115         assert(numArgsArg != nullptr);
3116         GenTreeArgList* argsArg = numArgsArg->Rest();
3117         assert(argsArg != nullptr);
3118
3119         //
3120         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3121         // so at least one length must be present and the rank can't exceed 32 so there can
3122         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3123         //
3124
3125         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3126             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3127         {
3128             return nullptr;
3129         }
3130
3131         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3132         bool     lowerBoundsSpecified;
3133
3134         if (numArgs == rank * 2)
3135         {
3136             lowerBoundsSpecified = true;
3137         }
3138         else if (numArgs == rank)
3139         {
3140             lowerBoundsSpecified = false;
3141
3142             //
3143             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3144             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3145             // we get a SDArray as well, see the for loop below.
3146             //
3147
3148             if (rank == 1)
3149             {
3150                 isMDArray = false;
3151             }
3152         }
3153         else
3154         {
3155             return nullptr;
3156         }
3157
3158         //
3159         // The rank is known to be at least 1 so we can start with numElements being 1
3160         // to avoid the need to special case the first dimension.
3161         //
3162
3163         numElements = S_UINT32(1);
3164
3165         struct Match
3166         {
3167             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3168             {
3169                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3170                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3171             }
3172
3173             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3174             {
3175                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3176                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3177                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3178             }
3179
3180             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3181             {
3182                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3183                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3184             }
3185
3186             static bool IsComma(GenTree* tree)
3187             {
3188                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3189             }
3190         };
3191
3192         unsigned argIndex = 0;
3193         GenTree* comma;
3194
3195         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3196         {
3197             if (lowerBoundsSpecified)
3198             {
3199                 //
3200                 // In general lower bounds can be ignored because they're not needed to
3201                 // calculate the total number of elements. But for single dimensional arrays
3202                 // we need to know if the lower bound is 0 because in this case the runtime
3203                 // creates a SDArray and this affects the way the array data offset is calculated.
3204                 //
3205
3206                 if (rank == 1)
3207                 {
3208                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3209                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3210                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3211
3212                     if (lowerBoundNode->IsIntegralConst(0))
3213                     {
3214                         isMDArray = false;
3215                     }
3216                 }
3217
3218                 comma = comma->gtGetOp2();
3219                 argIndex++;
3220             }
3221
3222             GenTree* lengthNodeAssign = comma->gtGetOp1();
3223             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3224             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3225
3226             if (!lengthNode->IsCnsIntOrI())
3227             {
3228                 return nullptr;
3229             }
3230
3231             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3232             argIndex++;
3233         }
3234
3235         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3236
3237         if (argIndex != numArgs)
3238         {
3239             return nullptr;
3240         }
3241     }
3242     else
3243     {
3244         //
3245         // Make sure there are exactly two arguments:  the array class and
3246         // the number of elements.
3247         //
3248
3249         GenTree* arrayLengthNode;
3250
3251         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3252 #ifdef FEATURE_READYTORUN_COMPILER
3253         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3254         {
3255             // Array length is 1st argument for readytorun helper
3256             arrayLengthNode = args->Current();
3257         }
3258         else
3259 #endif
3260         {
3261             // Array length is 2nd argument for regular helper
3262             arrayLengthNode = args->Rest()->Current();
3263         }
3264
3265         //
3266         // Make sure that the number of elements look valid.
3267         //
3268         if (arrayLengthNode->gtOper != GT_CNS_INT)
3269         {
3270             return nullptr;
3271         }
3272
3273         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3274
3275         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3276         {
3277             return nullptr;
3278         }
3279     }
3280
3281     CORINFO_CLASS_HANDLE elemClsHnd;
3282     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3283
3284     //
3285     // Note that genTypeSize will return zero for non primitive types, which is exactly
3286     // what we want (size will then be 0, and we will catch this in the conditional below).
3287     // Note that we don't expect this to fail for valid binaries, so we assert in the
3288     // non-verification case (the verification case should not assert but rather correctly
3289     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3290     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3291     // why.
3292     //
3293
3294     S_UINT32 elemSize(genTypeSize(elementType));
3295     S_UINT32 size = elemSize * S_UINT32(numElements);
3296
3297     if (size.IsOverflow())
3298     {
3299         return nullptr;
3300     }
3301
3302     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3303     {
3304         assert(verNeedsVerification());
3305         return nullptr;
3306     }
3307
3308     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3309     if (!initData)
3310     {
3311         return nullptr;
3312     }
3313
3314     //
3315     // At this point we are ready to commit to implementing the InitializeArray
3316     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3317     // return the struct assignment node.
3318     //
3319
3320     impPopStack();
3321     impPopStack();
3322
3323     const unsigned blkSize = size.Value();
3324     unsigned       dataOffset;
3325
3326     if (isMDArray)
3327     {
3328         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3329     }
3330     else
3331     {
3332         dataOffset = eeGetArrayDataOffset(elementType);
3333     }
3334
3335     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3336     GenTree* blk = gtNewBlockVal(dst, blkSize);
3337     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3338
3339     return gtNewBlkOpNode(blk,     // dst
3340                           src,     // src
3341                           blkSize, // size
3342                           false,   // volatil
3343                           true);   // copyBlock
3344 }
3345
3346 //------------------------------------------------------------------------
3347 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3348 //
3349 // Arguments:
3350 //    newobjThis - for constructor calls, the tree for the newly allocated object
3351 //    clsHnd - handle for the intrinsic method's class
3352 //    method - handle for the intrinsic method
3353 //    sig    - signature of the intrinsic method
3354 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3355 //    memberRef - the token for the intrinsic method
3356 //    readonlyCall - true if call has a readonly prefix
3357 //    tailCall - true if call is in tail position
3358 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3359 //       if call is not constrained
3360 //    constraintCallThisTransform -- this transform to apply for a constrained call
3361 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3362 //       for "traditional" jit intrinsics
3363 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3364 //       that is amenable to special downstream optimization opportunities
3365 //
3366 // Returns:
3367 //    IR tree to use in place of the call, or nullptr if the jit should treat
3368 //    the intrinsic call like a normal call.
3369 //
3370 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3371 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3372 //
3373 //    isSpecial set true if the expansion is subject to special
3374 //    optimizations later in the jit processing
3375 //
3376 // Notes:
3377 //    On success the IR tree may be a call to a different method or an inline
3378 //    sequence. If it is a call, then the intrinsic processing here is responsible
3379 //    for handling all the special cases, as upon return to impImportCall
3380 //    expanded intrinsics bypass most of the normal call processing.
3381 //
3382 //    Intrinsics are generally not recognized in minopts and debug codegen.
3383 //
3384 //    However, certain traditional intrinsics are identifed as "must expand"
3385 //    if there is no fallback implmentation to invoke; these must be handled
3386 //    in all codegen modes.
3387 //
3388 //    New style intrinsics (where the fallback implementation is in IL) are
3389 //    identified as "must expand" if they are invoked from within their
3390 //    own method bodies.
3391 //
3392
3393 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3394                                 CORINFO_CLASS_HANDLE    clsHnd,
3395                                 CORINFO_METHOD_HANDLE   method,
3396                                 CORINFO_SIG_INFO*       sig,
3397                                 unsigned                methodFlags,
3398                                 int                     memberRef,
3399                                 bool                    readonlyCall,
3400                                 bool                    tailCall,
3401                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3402                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3403                                 CorInfoIntrinsics*      pIntrinsicID,
3404                                 bool*                   isSpecialIntrinsic)
3405 {
3406     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3407
3408     bool              mustExpand  = false;
3409     bool              isSpecial   = false;
3410     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3411     NamedIntrinsic    ni          = NI_Illegal;
3412
3413     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3414     {
3415         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3416     }
3417
3418     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3419     {
3420         // The recursive calls to Jit intrinsics are must-expand by convention.
3421         mustExpand = mustExpand || gtIsRecursiveCall(method);
3422
3423         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3424         {
3425             ni = lookupNamedIntrinsic(method);
3426
3427 #ifdef FEATURE_HW_INTRINSICS
3428             switch (ni)
3429             {
3430 #if defined(_TARGET_ARM64_)
3431                 case NI_Base_Vector64_AsByte:
3432                 case NI_Base_Vector64_AsInt16:
3433                 case NI_Base_Vector64_AsInt32:
3434                 case NI_Base_Vector64_AsSByte:
3435                 case NI_Base_Vector64_AsSingle:
3436                 case NI_Base_Vector64_AsUInt16:
3437                 case NI_Base_Vector64_AsUInt32:
3438 #endif // _TARGET_ARM64_
3439                 case NI_Base_Vector128_As:
3440                 case NI_Base_Vector128_AsByte:
3441                 case NI_Base_Vector128_AsDouble:
3442                 case NI_Base_Vector128_AsInt16:
3443                 case NI_Base_Vector128_AsInt32:
3444                 case NI_Base_Vector128_AsInt64:
3445                 case NI_Base_Vector128_AsSByte:
3446                 case NI_Base_Vector128_AsSingle:
3447                 case NI_Base_Vector128_AsUInt16:
3448                 case NI_Base_Vector128_AsUInt32:
3449                 case NI_Base_Vector128_AsUInt64:
3450 #if defined(_TARGET_XARCH_)
3451                 case NI_Base_Vector128_Zero:
3452                 case NI_Base_Vector256_As:
3453                 case NI_Base_Vector256_AsByte:
3454                 case NI_Base_Vector256_AsDouble:
3455                 case NI_Base_Vector256_AsInt16:
3456                 case NI_Base_Vector256_AsInt32:
3457                 case NI_Base_Vector256_AsInt64:
3458                 case NI_Base_Vector256_AsSByte:
3459                 case NI_Base_Vector256_AsSingle:
3460                 case NI_Base_Vector256_AsUInt16:
3461                 case NI_Base_Vector256_AsUInt32:
3462                 case NI_Base_Vector256_AsUInt64:
3463                 case NI_Base_Vector256_Zero:
3464 #endif // _TARGET_XARCH_
3465                 {
3466                     return impBaseIntrinsic(ni, method, sig);
3467                 }
3468
3469                 default:
3470                 {
3471                     break;
3472                 }
3473             }
3474
3475             if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
3476             {
3477                 GenTree* hwintrinsic = impHWIntrinsic(ni, method, sig, mustExpand);
3478
3479                 if (mustExpand && (hwintrinsic == nullptr))
3480                 {
3481                     return impUnsupportedHWIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
3482                 }
3483
3484                 return hwintrinsic;
3485             }
3486 #endif // FEATURE_HW_INTRINSICS
3487         }
3488     }
3489
3490     *pIntrinsicID = intrinsicID;
3491
3492 #ifndef _TARGET_ARM_
3493     genTreeOps interlockedOperator;
3494 #endif
3495
3496     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3497     {
3498         // must be done regardless of DbgCode and MinOpts
3499         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3500     }
3501 #ifdef _TARGET_64BIT_
3502     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3503     {
3504         // must be done regardless of DbgCode and MinOpts
3505         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3506     }
3507 #else
3508     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3509 #endif
3510
3511     GenTree* retNode = nullptr;
3512
3513     // Under debug and minopts, only expand what is required.
3514     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3515     {
3516         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3517         return retNode;
3518     }
3519
3520     var_types callType = JITtype2varType(sig->retType);
3521
3522     /* First do the intrinsics which are always smaller than a call */
3523
3524     switch (intrinsicID)
3525     {
3526         GenTree* op1;
3527         GenTree* op2;
3528
3529         case CORINFO_INTRINSIC_Sin:
3530         case CORINFO_INTRINSIC_Cbrt:
3531         case CORINFO_INTRINSIC_Sqrt:
3532         case CORINFO_INTRINSIC_Abs:
3533         case CORINFO_INTRINSIC_Cos:
3534         case CORINFO_INTRINSIC_Round:
3535         case CORINFO_INTRINSIC_Cosh:
3536         case CORINFO_INTRINSIC_Sinh:
3537         case CORINFO_INTRINSIC_Tan:
3538         case CORINFO_INTRINSIC_Tanh:
3539         case CORINFO_INTRINSIC_Asin:
3540         case CORINFO_INTRINSIC_Asinh:
3541         case CORINFO_INTRINSIC_Acos:
3542         case CORINFO_INTRINSIC_Acosh:
3543         case CORINFO_INTRINSIC_Atan:
3544         case CORINFO_INTRINSIC_Atan2:
3545         case CORINFO_INTRINSIC_Atanh:
3546         case CORINFO_INTRINSIC_Log10:
3547         case CORINFO_INTRINSIC_Pow:
3548         case CORINFO_INTRINSIC_Exp:
3549         case CORINFO_INTRINSIC_Ceiling:
3550         case CORINFO_INTRINSIC_Floor:
3551             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3552             break;
3553
3554 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3555         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3556
3557         // Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
3558         // Anyway, we can import them as XADD and leave it to lowering/codegen to perform
3559         // whatever optimizations may arise from the fact that result value is not used.
3560         case CORINFO_INTRINSIC_InterlockedAdd32:
3561         case CORINFO_INTRINSIC_InterlockedXAdd32:
3562             interlockedOperator = GT_XADD;
3563             goto InterlockedBinOpCommon;
3564         case CORINFO_INTRINSIC_InterlockedXchg32:
3565             interlockedOperator = GT_XCHG;
3566             goto InterlockedBinOpCommon;
3567
3568 #ifdef _TARGET_64BIT_
3569         case CORINFO_INTRINSIC_InterlockedAdd64:
3570         case CORINFO_INTRINSIC_InterlockedXAdd64:
3571             interlockedOperator = GT_XADD;
3572             goto InterlockedBinOpCommon;
3573         case CORINFO_INTRINSIC_InterlockedXchg64:
3574             interlockedOperator = GT_XCHG;
3575             goto InterlockedBinOpCommon;
3576 #endif // _TARGET_AMD64_
3577
3578         InterlockedBinOpCommon:
3579             assert(callType != TYP_STRUCT);
3580             assert(sig->numArgs == 2);
3581
3582             op2 = impPopStack().val;
3583             op1 = impPopStack().val;
3584
3585             // This creates:
3586             //   val
3587             // XAdd
3588             //   addr
3589             //     field (for example)
3590             //
3591             // In the case where the first argument is the address of a local, we might
3592             // want to make this *not* make the var address-taken -- but atomic instructions
3593             // on a local are probably pretty useless anyway, so we probably don't care.
3594
3595             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3596             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3597             retNode = op1;
3598             break;
3599 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3600
3601         case CORINFO_INTRINSIC_MemoryBarrier:
3602
3603             assert(sig->numArgs == 0);
3604
3605             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3606             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3607             retNode = op1;
3608             break;
3609
3610 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3611         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3612         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3613 #ifdef _TARGET_64BIT_
3614         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3615 #endif
3616         {
3617             assert(callType != TYP_STRUCT);
3618             assert(sig->numArgs == 3);
3619             GenTree* op3;
3620
3621             op3 = impPopStack().val; // comparand
3622             op2 = impPopStack().val; // value
3623             op1 = impPopStack().val; // location
3624
3625             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3626
3627             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3628             retNode = node;
3629             break;
3630         }
3631 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3632
3633         case CORINFO_INTRINSIC_StringLength:
3634             op1 = impPopStack().val;
3635             if (!opts.MinOpts() && !opts.compDbgCode)
3636             {
3637                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
3638                 op1                   = arrLen;
3639             }
3640             else
3641             {
3642                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3643                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3644                                     gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
3645                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3646             }
3647
3648             // Getting the length of a null string should throw
3649             op1->gtFlags |= GTF_EXCEPT;
3650
3651             retNode = op1;
3652             break;
3653
3654         case CORINFO_INTRINSIC_StringGetChar:
3655             op2 = impPopStack().val;
3656             op1 = impPopStack().val;
3657             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3658             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3659             retNode = op1;
3660             break;
3661
3662         case CORINFO_INTRINSIC_InitializeArray:
3663             retNode = impInitializeArrayIntrinsic(sig);
3664             break;
3665
3666         case CORINFO_INTRINSIC_Array_Address:
3667         case CORINFO_INTRINSIC_Array_Get:
3668         case CORINFO_INTRINSIC_Array_Set:
3669             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3670             break;
3671
3672         case CORINFO_INTRINSIC_GetTypeFromHandle:
3673             op1 = impStackTop(0).val;
3674             CorInfoHelpFunc typeHandleHelper;
3675             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3676                 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
3677             {
3678                 op1 = impPopStack().val;
3679                 // Replace helper with a more specialized helper that returns RuntimeType
3680                 if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
3681                 {
3682                     typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
3683                 }
3684                 else
3685                 {
3686                     assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
3687                     typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
3688                 }
3689                 assert(op1->gtCall.gtCallArgs->gtOp.gtOp2 == nullptr);
3690                 op1         = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->gtCall.gtCallArgs);
3691                 op1->gtType = TYP_REF;
3692                 retNode     = op1;
3693             }
3694             // Call the regular function.
3695             break;
3696
3697         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3698             op1 = impStackTop(0).val;
3699             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3700                 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
3701             {
3702                 // Old tree
3703                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3704                 //
3705                 // New tree
3706                 // TreeToGetNativeTypeHandle
3707
3708                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3709                 // to that helper.
3710
3711                 op1 = impPopStack().val;
3712
3713                 // Get native TypeHandle argument to old helper
3714                 op1 = op1->gtCall.gtCallArgs;
3715                 assert(op1->OperIsList());
3716                 assert(op1->gtOp.gtOp2 == nullptr);
3717                 op1     = op1->gtOp.gtOp1;
3718                 retNode = op1;
3719             }
3720             // Call the regular function.
3721             break;
3722
3723         case CORINFO_INTRINSIC_Object_GetType:
3724         {
3725             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3726             op1 = impStackTop(0).val;
3727
3728             // If we're calling GetType on a boxed value, just get the type directly.
3729             if (op1->IsBoxedValue())
3730             {
3731                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3732
3733                 // Try and clean up the box. Obtain the handle we
3734                 // were going to pass to the newobj.
3735                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3736
3737                 if (boxTypeHandle != nullptr)
3738                 {
3739                     // Note we don't need to play the TYP_STRUCT games here like
3740                     // do for LDTOKEN since the return value of this operator is Type,
3741                     // not RuntimeTypeHandle.
3742                     impPopStack();
3743                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3744                     GenTree*        runtimeType =
3745                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3746                     retNode = runtimeType;
3747                 }
3748             }
3749
3750             // If we have a constrained callvirt with a "box this" transform
3751             // we know we have a value class and hence an exact type.
3752             //
3753             // If so, instead of boxing and then extracting the type, just
3754             // construct the type directly.
3755             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3756                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3757             {
3758                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3759                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3760                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3761
3762                 if (isSafeToOptimize)
3763                 {
3764                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3765                     impPopStack();
3766                     GenTree* typeHandleOp =
3767                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3768                     if (typeHandleOp == nullptr)
3769                     {
3770                         assert(compDonotInline());
3771                         return nullptr;
3772                     }
3773                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3774                     GenTree*        runtimeType =
3775                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3776                     retNode = runtimeType;
3777                 }
3778             }
3779
3780 #ifdef DEBUG
3781             if (retNode != nullptr)
3782             {
3783                 JITDUMP("Optimized result for call to GetType is\n");
3784                 if (verbose)
3785                 {
3786                     gtDispTree(retNode);
3787                 }
3788             }
3789 #endif
3790
3791             // Else expand as an intrinsic, unless the call is constrained,
3792             // in which case we defer expansion to allow impImportCall do the
3793             // special constraint processing.
3794             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3795             {
3796                 JITDUMP("Expanding as special intrinsic\n");
3797                 impPopStack();
3798                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3799
3800                 // Set the CALL flag to indicate that the operator is implemented by a call.
3801                 // Set also the EXCEPTION flag because the native implementation of
3802                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3803                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3804                 retNode = op1;
3805                 // Might be further optimizable, so arrange to leave a mark behind
3806                 isSpecial = true;
3807             }
3808
3809             if (retNode == nullptr)
3810             {
3811                 JITDUMP("Leaving as normal call\n");
3812                 // Might be further optimizable, so arrange to leave a mark behind
3813                 isSpecial = true;
3814             }
3815
3816             break;
3817         }
3818
3819         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3820         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3821         // substitution.  The parameter byref will be assigned into the newly allocated object.
3822         case CORINFO_INTRINSIC_ByReference_Ctor:
3823         {
3824             // Remove call to constructor and directly assign the byref passed
3825             // to the call to the first slot of the ByReference struct.
3826             op1                                    = impPopStack().val;
3827             GenTree*             thisptr           = newobjThis;
3828             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3829             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
3830             GenTree*             assign            = gtNewAssignNode(field, op1);
3831             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3832             assert(byReferenceStruct != nullptr);
3833             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3834             retNode = assign;
3835             break;
3836         }
3837         // Implement ptr value getter for ByReference struct.
3838         case CORINFO_INTRINSIC_ByReference_Value:
3839         {
3840             op1                         = impPopStack().val;
3841             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3842             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
3843             retNode                     = field;
3844             break;
3845         }
3846         case CORINFO_INTRINSIC_Span_GetItem:
3847         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3848         {
3849             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3850             //
3851             // For Span<T>
3852             //   Comma
3853             //     BoundsCheck(index, s->_length)
3854             //     s->_pointer + index * sizeof(T)
3855             //
3856             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3857             //
3858             // Signature should show one class type parameter, which
3859             // we need to examine.
3860             assert(sig->sigInst.classInstCount == 1);
3861             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3862             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3863             assert(elemSize > 0);
3864
3865             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3866
3867             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3868                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3869
3870             GenTree* index          = impPopStack().val;
3871             GenTree* ptrToSpan      = impPopStack().val;
3872             GenTree* indexClone     = nullptr;
3873             GenTree* ptrToSpanClone = nullptr;
3874             assert(varTypeIsIntegral(index));
3875             assert(ptrToSpan->TypeGet() == TYP_BYREF);
3876
3877 #if defined(DEBUG)
3878             if (verbose)
3879             {
3880                 printf("with ptr-to-span\n");
3881                 gtDispTree(ptrToSpan);
3882                 printf("and index\n");
3883                 gtDispTree(index);
3884             }
3885 #endif // defined(DEBUG)
3886
3887             // We need to use both index and ptr-to-span twice, so clone or spill.
3888             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3889                                  nullptr DEBUGARG("Span.get_Item index"));
3890             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3891                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3892
3893             // Bounds check
3894             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3895             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3896             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
3897             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3898                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3899
3900             // Element access
3901             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3902             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3903             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3904             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3905             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3906             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
3907             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3908
3909             // Prepare result
3910             var_types resultType = JITtype2varType(sig->retType);
3911             assert(resultType == result->TypeGet());
3912             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3913
3914             break;
3915         }
3916
3917         case CORINFO_INTRINSIC_GetRawHandle:
3918         {
3919             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3920             CORINFO_RESOLVED_TOKEN resolvedToken;
3921             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3922             resolvedToken.tokenScope   = info.compScopeHnd;
3923             resolvedToken.token        = memberRef;
3924             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3925
3926             CORINFO_GENERICHANDLE_RESULT embedInfo;
3927             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3928
3929             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3930                                                  embedInfo.compileTimeHandle);
3931             if (rawHandle == nullptr)
3932             {
3933                 return nullptr;
3934             }
3935
3936             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3937
3938             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3939             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3940
3941             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3942             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3943             var_types resultType = JITtype2varType(sig->retType);
3944             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3945
3946             break;
3947         }
3948
3949         case CORINFO_INTRINSIC_TypeEQ:
3950         case CORINFO_INTRINSIC_TypeNEQ:
3951         {
3952             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3953             op1              = impStackTop(1).val;
3954             op2              = impStackTop(0).val;
3955             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3956             if (optTree != nullptr)
3957             {
3958                 // Success, clean up the evaluation stack.
3959                 impPopStack();
3960                 impPopStack();
3961
3962                 // See if we can optimize even further, to a handle compare.
3963                 optTree = gtFoldTypeCompare(optTree);
3964
3965                 // See if we can now fold a handle compare to a constant.
3966                 optTree = gtFoldExpr(optTree);
3967
3968                 retNode = optTree;
3969             }
3970             else
3971             {
3972                 // Retry optimizing these later
3973                 isSpecial = true;
3974             }
3975             break;
3976         }
3977
3978         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3979         case CORINFO_INTRINSIC_GetManagedThreadId:
3980         {
3981             // Retry optimizing these during morph
3982             isSpecial = true;
3983             break;
3984         }
3985
3986         default:
3987             /* Unknown intrinsic */
3988             intrinsicID = CORINFO_INTRINSIC_Illegal;
3989             break;
3990     }
3991
3992     // Look for new-style jit intrinsics by name
3993     if (ni != NI_Illegal)
3994     {
3995         assert(retNode == nullptr);
3996         switch (ni)
3997         {
3998             case NI_System_Enum_HasFlag:
3999             {
4000                 GenTree* thisOp  = impStackTop(1).val;
4001                 GenTree* flagOp  = impStackTop(0).val;
4002                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
4003
4004                 if (optTree != nullptr)
4005                 {
4006                     // Optimization successful. Pop the stack for real.
4007                     impPopStack();
4008                     impPopStack();
4009                     retNode = optTree;
4010                 }
4011                 else
4012                 {
4013                     // Retry optimizing this during morph.
4014                     isSpecial = true;
4015                 }
4016
4017                 break;
4018             }
4019
4020             case NI_MathF_Round:
4021             case NI_Math_Round:
4022             {
4023                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
4024                 // to simplify the transition, we will just treat it as if it was still the
4025                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
4026                 // everywhere else.
4027
4028                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
4029                 break;
4030             }
4031
4032             case NI_System_Collections_Generic_EqualityComparer_get_Default:
4033             {
4034                 // Flag for later handling during devirtualization.
4035                 isSpecial = true;
4036                 break;
4037             }
4038
4039             case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
4040             {
4041                 assert(sig->numArgs == 1);
4042
4043                 // We expect the return type of the ReverseEndianness routine to match the type of the
4044                 // one and only argument to the method. We use a special instruction for 16-bit
4045                 // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
4046                 // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
4047                 // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
4048
4049                 switch (sig->retType)
4050                 {
4051                     case CorInfoType::CORINFO_TYPE_SHORT:
4052                     case CorInfoType::CORINFO_TYPE_USHORT:
4053                         retNode = gtNewOperNode(GT_BSWAP16, callType, impPopStack().val);
4054                         break;
4055
4056                     case CorInfoType::CORINFO_TYPE_INT:
4057                     case CorInfoType::CORINFO_TYPE_UINT:
4058 #ifdef _TARGET_64BIT_
4059                     case CorInfoType::CORINFO_TYPE_LONG:
4060                     case CorInfoType::CORINFO_TYPE_ULONG:
4061 #endif // _TARGET_64BIT_
4062                         retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
4063                         break;
4064
4065                     default:
4066                         // This default case gets hit on 32-bit archs when a call to a 64-bit overload
4067                         // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
4068                         // method call, where the implementation decomposes the operation into two 32-bit
4069                         // bswap routines. If the input to the 64-bit function is a constant, then we rely
4070                         // on inlining + constant folding of 32-bit bswaps to effectively constant fold
4071                         // the 64-bit call site.
4072                         break;
4073                 }
4074
4075                 break;
4076             }
4077
4078             default:
4079                 break;
4080         }
4081     }
4082
4083     if (mustExpand && (retNode == nullptr))
4084     {
4085         NO_WAY("JIT must expand the intrinsic!");
4086     }
4087
4088     // Optionally report if this intrinsic is special
4089     // (that is, potentially re-optimizable during morph).
4090     if (isSpecialIntrinsic != nullptr)
4091     {
4092         *isSpecialIntrinsic = isSpecial;
4093     }
4094
4095     return retNode;
4096 }
4097
4098 #ifdef FEATURE_HW_INTRINSICS
4099 //------------------------------------------------------------------------
4100 // impBaseIntrinsic: dispatch intrinsics to their own implementation
4101 //
4102 // Arguments:
4103 //    intrinsic  -- id of the intrinsic function.
4104 //    method     -- method handle of the intrinsic function.
4105 //    sig        -- signature of the intrinsic call
4106 //
4107 // Return Value:
4108 //    the expanded intrinsic.
4109 //
4110 GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
4111 {
4112     GenTree*  retNode  = nullptr;
4113     unsigned  simdSize = 0;
4114     var_types baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &simdSize);
4115     var_types retType  = getSIMDTypeForSize(simdSize);
4116
4117     if (sig->hasThis())
4118     {
4119         CORINFO_CLASS_HANDLE thisClass = info.compCompHnd->getArgClass(sig, sig->args);
4120         var_types            thisType  = getBaseTypeOfSIMDType(thisClass);
4121
4122         if (!varTypeIsArithmetic(thisType))
4123         {
4124             return nullptr;
4125         }
4126     }
4127
4128     if (!varTypeIsArithmetic(baseType))
4129     {
4130         return nullptr;
4131     }
4132
4133     switch (intrinsic)
4134     {
4135 #if defined(_TARGET_ARM64_)
4136         case NI_Base_Vector64_AsByte:
4137         case NI_Base_Vector64_AsInt16:
4138         case NI_Base_Vector64_AsInt32:
4139         case NI_Base_Vector64_AsSByte:
4140         case NI_Base_Vector64_AsSingle:
4141         case NI_Base_Vector64_AsUInt16:
4142         case NI_Base_Vector64_AsUInt32:
4143 #endif // _TARGET_ARM64_
4144         case NI_Base_Vector128_As:
4145         case NI_Base_Vector128_AsByte:
4146         case NI_Base_Vector128_AsDouble:
4147         case NI_Base_Vector128_AsInt16:
4148         case NI_Base_Vector128_AsInt32:
4149         case NI_Base_Vector128_AsInt64:
4150         case NI_Base_Vector128_AsSByte:
4151         case NI_Base_Vector128_AsSingle:
4152         case NI_Base_Vector128_AsUInt16:
4153         case NI_Base_Vector128_AsUInt32:
4154         case NI_Base_Vector128_AsUInt64:
4155 #if defined(_TARGET_XARCH_)
4156         case NI_Base_Vector256_As:
4157         case NI_Base_Vector256_AsByte:
4158         case NI_Base_Vector256_AsDouble:
4159         case NI_Base_Vector256_AsInt16:
4160         case NI_Base_Vector256_AsInt32:
4161         case NI_Base_Vector256_AsInt64:
4162         case NI_Base_Vector256_AsSByte:
4163         case NI_Base_Vector256_AsSingle:
4164         case NI_Base_Vector256_AsUInt16:
4165         case NI_Base_Vector256_AsUInt32:
4166         case NI_Base_Vector256_AsUInt64:
4167 #endif // _TARGET_XARCH_
4168         {
4169             // We fold away the cast here, as it only exists to satisfy
4170             // the type system. It is safe to do this here since the retNode type
4171             // and the signature return type are both the same TYP_SIMD.
4172
4173             assert(sig->numArgs == 0);
4174             assert(sig->hasThis());
4175
4176             retNode = impSIMDPopStack(retType, true, sig->retTypeClass);
4177             SetOpLclRelatedToSIMDIntrinsic(retNode);
4178             assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass)));
4179             break;
4180         }
4181
4182 #ifdef _TARGET_XARCH_
4183         case NI_Base_Vector128_Zero:
4184         {
4185             assert(sig->numArgs == 0);
4186
4187             if (compSupports(InstructionSet_SSE))
4188             {
4189                 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4190             }
4191             break;
4192         }
4193
4194         case NI_Base_Vector256_Zero:
4195         {
4196             assert(sig->numArgs == 0);
4197
4198             if (compSupports(InstructionSet_AVX))
4199             {
4200                 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4201             }
4202             break;
4203         }
4204 #endif // _TARGET_XARCH_
4205
4206         default:
4207         {
4208             unreached();
4209             break;
4210         }
4211     }
4212
4213     return retNode;
4214 }
4215 #endif // FEATURE_HW_INTRINSICS
4216
4217 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
4218                                     CORINFO_SIG_INFO*     sig,
4219                                     var_types             callType,
4220                                     CorInfoIntrinsics     intrinsicID,
4221                                     bool                  tailCall)
4222 {
4223     GenTree* op1;
4224     GenTree* op2;
4225
4226     assert(callType != TYP_STRUCT);
4227     assert(IsMathIntrinsic(intrinsicID));
4228
4229     op1 = nullptr;
4230
4231 #if !defined(_TARGET_X86_)
4232     // Intrinsics that are not implemented directly by target instructions will
4233     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
4234     // don't do this optimization, because
4235     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
4236     //  b) It will be non-trivial task or too late to re-materialize a surviving
4237     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
4238     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
4239 #else
4240     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
4241     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4242     // code generation for certain EH constructs.
4243     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4244 #endif
4245     {
4246         switch (sig->numArgs)
4247         {
4248             case 1:
4249                 op1 = impPopStack().val;
4250
4251                 assert(varTypeIsFloating(op1));
4252
4253                 if (op1->TypeGet() != callType)
4254                 {
4255                     op1 = gtNewCastNode(callType, op1, false, callType);
4256                 }
4257
4258                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4259                 break;
4260
4261             case 2:
4262                 op2 = impPopStack().val;
4263                 op1 = impPopStack().val;
4264
4265                 assert(varTypeIsFloating(op1));
4266                 assert(varTypeIsFloating(op2));
4267
4268                 if (op2->TypeGet() != callType)
4269                 {
4270                     op2 = gtNewCastNode(callType, op2, false, callType);
4271                 }
4272                 if (op1->TypeGet() != callType)
4273                 {
4274                     op1 = gtNewCastNode(callType, op1, false, callType);
4275                 }
4276
4277                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4278                 break;
4279
4280             default:
4281                 NO_WAY("Unsupported number of args for Math Instrinsic");
4282         }
4283
4284         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4285         {
4286             op1->gtFlags |= GTF_CALL;
4287         }
4288     }
4289
4290     return op1;
4291 }
4292
4293 //------------------------------------------------------------------------
4294 // lookupNamedIntrinsic: map method to jit named intrinsic value
4295 //
4296 // Arguments:
4297 //    method -- method handle for method
4298 //
4299 // Return Value:
4300 //    Id for the named intrinsic, or Illegal if none.
4301 //
4302 // Notes:
4303 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4304 //    otherwise it is not a named jit intrinsic.
4305 //
4306
4307 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4308 {
4309     NamedIntrinsic result = NI_Illegal;
4310
4311     const char* className     = nullptr;
4312     const char* namespaceName = nullptr;
4313     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4314
4315     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4316     {
4317         return result;
4318     }
4319
4320     if (strcmp(namespaceName, "System") == 0)
4321     {
4322         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4323         {
4324             result = NI_System_Enum_HasFlag;
4325         }
4326         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4327         {
4328             result = NI_MathF_Round;
4329         }
4330         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4331         {
4332             result = NI_Math_Round;
4333         }
4334     }
4335 #if defined(_TARGET_XARCH_) // We currently only support BSWAP on x86
4336     else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
4337     {
4338         if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
4339         {
4340             result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
4341         }
4342     }
4343 #endif // !defined(_TARGET_XARCH_)
4344     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4345     {
4346         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4347         {
4348             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4349         }
4350     }
4351 #ifdef FEATURE_HW_INTRINSICS
4352     else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
4353     {
4354         namespaceName += 25;
4355
4356         if (namespaceName[0] == '\0')
4357         {
4358             if (strncmp(className, "Vector", 6) == 0)
4359             {
4360                 className += 6;
4361
4362 #if defined(_TARGET_ARM64_)
4363                 if (strncmp(className, "64", 2) == 0)
4364                 {
4365                     className += 2;
4366
4367                     if (strcmp(className, "`1") == 0)
4368                     {
4369                         if (strncmp(methodName, "As", 2) == 0)
4370                         {
4371                             methodName += 2;
4372
4373                             // Vector64_As, Vector64_AsDouble, Vector64_AsInt64, and Vector64_AsUInt64
4374                             // are not currently supported as they require additional plumbing to be
4375                             // supported by the JIT as TYP_SIMD8.
4376
4377                             if (strcmp(methodName, "Byte") == 0)
4378                             {
4379                                 result = NI_Base_Vector64_AsByte;
4380                             }
4381                             else if (strcmp(methodName, "Int16") == 0)
4382                             {
4383                                 result = NI_Base_Vector64_AsInt16;
4384                             }
4385                             else if (strcmp(methodName, "Int32") == 0)
4386                             {
4387                                 result = NI_Base_Vector64_AsInt32;
4388                             }
4389                             else if (strcmp(methodName, "SByte") == 0)
4390                             {
4391                                 result = NI_Base_Vector64_AsSByte;
4392                             }
4393                             else if (strcmp(methodName, "Single") == 0)
4394                             {
4395                                 result = NI_Base_Vector64_AsSingle;
4396                             }
4397                             else if (strcmp(methodName, "UInt16") == 0)
4398                             {
4399                                 result = NI_Base_Vector64_AsUInt16;
4400                             }
4401                             else if (strcmp(methodName, "UInt32") == 0)
4402                             {
4403                                 result = NI_Base_Vector64_AsUInt32;
4404                             }
4405                         }
4406                     }
4407                 }
4408                 else
4409 #endif // _TARGET_ARM64_
4410                     if (strncmp(className, "128", 3) == 0)
4411                 {
4412                     className += 3;
4413
4414                     if (strcmp(className, "`1") == 0)
4415                     {
4416                         if (strncmp(methodName, "As", 2) == 0)
4417                         {
4418                             methodName += 2;
4419
4420                             if (strcmp(methodName, "`1") == 0)
4421                             {
4422                                 result = NI_Base_Vector128_As;
4423                             }
4424                             else if (strcmp(methodName, "Byte") == 0)
4425                             {
4426                                 result = NI_Base_Vector128_AsByte;
4427                             }
4428                             else if (strcmp(methodName, "Double") == 0)
4429                             {
4430                                 result = NI_Base_Vector128_AsDouble;
4431                             }
4432                             else if (strcmp(methodName, "Int16") == 0)
4433                             {
4434                                 result = NI_Base_Vector128_AsInt16;
4435                             }
4436                             else if (strcmp(methodName, "Int32") == 0)
4437                             {
4438                                 result = NI_Base_Vector128_AsInt32;
4439                             }
4440                             else if (strcmp(methodName, "Int64") == 0)
4441                             {
4442                                 result = NI_Base_Vector128_AsInt64;
4443                             }
4444                             else if (strcmp(methodName, "SByte") == 0)
4445                             {
4446                                 result = NI_Base_Vector128_AsSByte;
4447                             }
4448                             else if (strcmp(methodName, "Single") == 0)
4449                             {
4450                                 result = NI_Base_Vector128_AsSingle;
4451                             }
4452                             else if (strcmp(methodName, "UInt16") == 0)
4453                             {
4454                                 result = NI_Base_Vector128_AsUInt16;
4455                             }
4456                             else if (strcmp(methodName, "UInt32") == 0)
4457                             {
4458                                 result = NI_Base_Vector128_AsUInt32;
4459                             }
4460                             else if (strcmp(methodName, "UInt64") == 0)
4461                             {
4462                                 result = NI_Base_Vector128_AsUInt64;
4463                             }
4464                         }
4465 #if defined(_TARGET_XARCH_)
4466                         else if (strcmp(methodName, "get_Zero") == 0)
4467                         {
4468                             result = NI_Base_Vector128_Zero;
4469                         }
4470 #endif // _TARGET_XARCH_
4471                     }
4472                 }
4473 #if defined(_TARGET_XARCH_)
4474                 else if (strncmp(className, "256", 3) == 0)
4475                 {
4476                     className += 3;
4477
4478                     if (strcmp(className, "`1") == 0)
4479                     {
4480                         if (strncmp(methodName, "As", 2) == 0)
4481                         {
4482                             methodName += 2;
4483
4484                             if (strcmp(methodName, "`1") == 0)
4485                             {
4486                                 result = NI_Base_Vector256_As;
4487                             }
4488                             else if (strcmp(methodName, "Byte") == 0)
4489                             {
4490                                 result = NI_Base_Vector256_AsByte;
4491                             }
4492                             else if (strcmp(methodName, "Double") == 0)
4493                             {
4494                                 result = NI_Base_Vector256_AsDouble;
4495                             }
4496                             else if (strcmp(methodName, "Int16") == 0)
4497                             {
4498                                 result = NI_Base_Vector256_AsInt16;
4499                             }
4500                             else if (strcmp(methodName, "Int32") == 0)
4501                             {
4502                                 result = NI_Base_Vector256_AsInt32;
4503                             }
4504                             else if (strcmp(methodName, "Int64") == 0)
4505                             {
4506                                 result = NI_Base_Vector256_AsInt64;
4507                             }
4508                             else if (strcmp(methodName, "SByte") == 0)
4509                             {
4510                                 result = NI_Base_Vector256_AsSByte;
4511                             }
4512                             else if (strcmp(methodName, "Single") == 0)
4513                             {
4514                                 result = NI_Base_Vector256_AsSingle;
4515                             }
4516                             else if (strcmp(methodName, "UInt16") == 0)
4517                             {
4518                                 result = NI_Base_Vector256_AsUInt16;
4519                             }
4520                             else if (strcmp(methodName, "UInt32") == 0)
4521                             {
4522                                 result = NI_Base_Vector256_AsUInt32;
4523                             }
4524                             else if (strcmp(methodName, "UInt64") == 0)
4525                             {
4526                                 result = NI_Base_Vector256_AsUInt64;
4527                             }
4528                         }
4529                         else if (strcmp(methodName, "get_Zero") == 0)
4530                         {
4531                             result = NI_Base_Vector256_Zero;
4532                         }
4533                     }
4534                 }
4535 #endif // _TARGET_XARCH_
4536             }
4537         }
4538 #if defined(_TARGET_XARCH_)
4539         else if (strcmp(namespaceName, ".X86") == 0)
4540         {
4541             result = HWIntrinsicInfo::lookupId(className, methodName);
4542         }
4543 #elif defined(_TARGET_ARM64_)
4544         else if (strcmp(namespaceName, ".Arm.Arm64") == 0)
4545         {
4546             result = lookupHWIntrinsic(className, methodName);
4547         }
4548 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4549 #error Unsupported platform
4550 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4551     }
4552 #endif // FEATURE_HW_INTRINSICS
4553
4554     return result;
4555 }
4556
4557 /*****************************************************************************/
4558
4559 GenTree* Compiler::impArrayAccessIntrinsic(
4560     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4561 {
4562     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4563        the following, as it generates fatter code.
4564     */
4565
4566     if (compCodeOpt() == SMALL_CODE)
4567     {
4568         return nullptr;
4569     }
4570
4571     /* These intrinsics generate fatter (but faster) code and are only
4572        done if we don't need SMALL_CODE */
4573
4574     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4575
4576     // The rank 1 case is special because it has to handle two array formats
4577     // we will simply not do that case
4578     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4579     {
4580         return nullptr;
4581     }
4582
4583     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4584     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4585
4586     // For the ref case, we will only be able to inline if the types match
4587     // (verifier checks for this, we don't care for the nonverified case and the
4588     // type is final (so we don't need to do the cast)
4589     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4590     {
4591         // Get the call site signature
4592         CORINFO_SIG_INFO LocalSig;
4593         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4594         assert(LocalSig.hasThis());
4595
4596         CORINFO_CLASS_HANDLE actualElemClsHnd;
4597
4598         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4599         {
4600             // Fetch the last argument, the one that indicates the type we are setting.
4601             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4602             for (unsigned r = 0; r < rank; r++)
4603             {
4604                 argType = info.compCompHnd->getArgNext(argType);
4605             }
4606
4607             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4608             actualElemClsHnd = argInfo.GetClassHandle();
4609         }
4610         else
4611         {
4612             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4613
4614             // Fetch the return type
4615             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4616             assert(retInfo.IsByRef());
4617             actualElemClsHnd = retInfo.GetClassHandle();
4618         }
4619
4620         // if it's not final, we can't do the optimization
4621         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4622         {
4623             return nullptr;
4624         }
4625     }
4626
4627     unsigned arrayElemSize;
4628     if (elemType == TYP_STRUCT)
4629     {
4630         assert(arrElemClsHnd);
4631
4632         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4633     }
4634     else
4635     {
4636         arrayElemSize = genTypeSize(elemType);
4637     }
4638
4639     if ((unsigned char)arrayElemSize != arrayElemSize)
4640     {
4641         // arrayElemSize would be truncated as an unsigned char.
4642         // This means the array element is too large. Don't do the optimization.
4643         return nullptr;
4644     }
4645
4646     GenTree* val = nullptr;
4647
4648     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4649     {
4650         // Assignment of a struct is more work, and there are more gets than sets.
4651         if (elemType == TYP_STRUCT)
4652         {
4653             return nullptr;
4654         }
4655
4656         val = impPopStack().val;
4657         assert(genActualType(elemType) == genActualType(val->gtType) ||
4658                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4659                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4660                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4661     }
4662
4663     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4664
4665     GenTree* inds[GT_ARR_MAX_RANK];
4666     for (unsigned k = rank; k > 0; k--)
4667     {
4668         inds[k - 1] = impPopStack().val;
4669     }
4670
4671     GenTree* arr = impPopStack().val;
4672     assert(arr->gtType == TYP_REF);
4673
4674     GenTree* arrElem =
4675         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4676                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4677
4678     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4679     {
4680         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4681     }
4682
4683     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4684     {
4685         assert(val != nullptr);
4686         return gtNewAssignNode(arrElem, val);
4687     }
4688     else
4689     {
4690         return arrElem;
4691     }
4692 }
4693
4694 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4695 {
4696     unsigned i;
4697
4698     // do some basic checks first
4699     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4700     {
4701         return FALSE;
4702     }
4703
4704     if (verCurrentState.esStackDepth > 0)
4705     {
4706         // merge stack types
4707         StackEntry* parentStack = block->bbStackOnEntry();
4708         StackEntry* childStack  = verCurrentState.esStack;
4709
4710         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4711         {
4712             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4713             {
4714                 return FALSE;
4715             }
4716         }
4717     }
4718
4719     // merge initialization status of this ptr
4720
4721     if (verTrackObjCtorInitState)
4722     {
4723         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4724         assert(verCurrentState.thisInitialized != TIS_Bottom);
4725
4726         // If the successor block's thisInit state is unknown, copy it from the current state.
4727         if (block->bbThisOnEntry() == TIS_Bottom)
4728         {
4729             *changed = true;
4730             verSetThisInit(block, verCurrentState.thisInitialized);
4731         }
4732         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4733         {
4734             if (block->bbThisOnEntry() != TIS_Top)
4735             {
4736                 *changed = true;
4737                 verSetThisInit(block, TIS_Top);
4738
4739                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4740                 {
4741                     // The block is bad. Control can flow through the block to any handler that catches the
4742                     // verification exception, but the importer ignores bad blocks and therefore won't model
4743                     // this flow in the normal way. To complete the merge into the bad block, the new state
4744                     // needs to be manually pushed to the handlers that may be reached after the verification
4745                     // exception occurs.
4746                     //
4747                     // Usually, the new state was already propagated to the relevant handlers while processing
4748                     // the predecessors of the bad block. The exception is when the bad block is at the start
4749                     // of a try region, meaning it is protected by additional handlers that do not protect its
4750                     // predecessors.
4751                     //
4752                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4753                     {
4754                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4755                         // recursive calls back into this code path (if successors of the current bad block are
4756                         // also bad blocks).
4757                         //
4758                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4759                         verCurrentState.thisInitialized = TIS_Top;
4760                         impVerifyEHBlock(block, true);
4761                         verCurrentState.thisInitialized = origTIS;
4762                     }
4763                 }
4764             }
4765         }
4766     }
4767     else
4768     {
4769         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4770     }
4771
4772     return TRUE;
4773 }
4774
4775 /*****************************************************************************
4776  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4777  *   already logged it (presumably in a more detailed fashion than done here)
4778  * 'bVerificationException' is true for a verification exception, false for a
4779  *   "call unauthorized by host" exception.
4780  */
4781
4782 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4783 {
4784     block->bbJumpKind = BBJ_THROW;
4785     block->bbFlags |= BBF_FAILED_VERIFICATION;
4786
4787     impCurStmtOffsSet(block->bbCodeOffs);
4788
4789 #ifdef DEBUG
4790     // we need this since BeginTreeList asserts otherwise
4791     impTreeList = impTreeLast = nullptr;
4792     block->bbFlags &= ~BBF_IMPORTED;
4793
4794     if (logMsg)
4795     {
4796         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4797                 block->bbCodeOffs, block->bbCodeOffsEnd));
4798         if (verbose)
4799         {
4800             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4801         }
4802     }
4803
4804     if (JitConfig.DebugBreakOnVerificationFailure())
4805     {
4806         DebugBreak();
4807     }
4808 #endif
4809
4810     impBeginTreeList();
4811
4812     // if the stack is non-empty evaluate all the side-effects
4813     if (verCurrentState.esStackDepth > 0)
4814     {
4815         impEvalSideEffects();
4816     }
4817     assert(verCurrentState.esStackDepth == 0);
4818
4819     GenTree* op1 =
4820         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4821     // verCurrentState.esStackDepth = 0;
4822     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4823
4824     // The inliner is not able to handle methods that require throw block, so
4825     // make sure this methods never gets inlined.
4826     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4827 }
4828
4829 /*****************************************************************************
4830  *
4831  */
4832 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4833
4834 {
4835     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4836     // slightly different mechanism in which it calls the JIT to perform IL verification:
4837     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4838     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4839     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4840     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4841     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4842     // to fail upon runtime of the jitted method.
4843     //
4844     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4845     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4846     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4847     // we detect these two conditions, instead of generating a throw statement inside the offending
4848     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4849     // to return false and make RyuJIT behave the same way JIT64 does.
4850     //
4851     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4852     // RyuJIT for the time being until we completely replace JIT64.
4853     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4854
4855     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4856     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4857     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4858     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4859     // be turned off during importation).
4860     CLANG_FORMAT_COMMENT_ANCHOR;
4861
4862 #ifdef _TARGET_64BIT_
4863
4864 #ifdef DEBUG
4865     bool canSkipVerificationResult =
4866         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4867     assert(tiVerificationNeeded || canSkipVerificationResult);
4868 #endif // DEBUG
4869
4870     // Add the non verifiable flag to the compiler
4871     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4872     {
4873         tiIsVerifiableCode = FALSE;
4874     }
4875 #endif //_TARGET_64BIT_
4876     verResetCurrentState(block, &verCurrentState);
4877     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4878
4879 #ifdef DEBUG
4880     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4881 #endif                   // DEBUG
4882 }
4883
4884 /******************************************************************************/
4885 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4886 {
4887     assert(ciType < CORINFO_TYPE_COUNT);
4888
4889     typeInfo tiResult;
4890     switch (ciType)
4891     {
4892         case CORINFO_TYPE_STRING:
4893         case CORINFO_TYPE_CLASS:
4894             tiResult = verMakeTypeInfo(clsHnd);
4895             if (!tiResult.IsType(TI_REF))
4896             { // type must be consistent with element type
4897                 return typeInfo();
4898             }
4899             break;
4900
4901 #ifdef _TARGET_64BIT_
4902         case CORINFO_TYPE_NATIVEINT:
4903         case CORINFO_TYPE_NATIVEUINT:
4904             if (clsHnd)
4905             {
4906                 // If we have more precise information, use it
4907                 return verMakeTypeInfo(clsHnd);
4908             }
4909             else
4910             {
4911                 return typeInfo::nativeInt();
4912             }
4913             break;
4914 #endif // _TARGET_64BIT_
4915
4916         case CORINFO_TYPE_VALUECLASS:
4917         case CORINFO_TYPE_REFANY:
4918             tiResult = verMakeTypeInfo(clsHnd);
4919             // type must be constant with element type;
4920             if (!tiResult.IsValueClass())
4921             {
4922                 return typeInfo();
4923             }
4924             break;
4925         case CORINFO_TYPE_VAR:
4926             return verMakeTypeInfo(clsHnd);
4927
4928         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4929         case CORINFO_TYPE_VOID:
4930             return typeInfo();
4931             break;
4932
4933         case CORINFO_TYPE_BYREF:
4934         {
4935             CORINFO_CLASS_HANDLE childClassHandle;
4936             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4937             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4938         }
4939         break;
4940
4941         default:
4942             if (clsHnd)
4943             { // If we have more precise information, use it
4944                 return typeInfo(TI_STRUCT, clsHnd);
4945             }
4946             else
4947             {
4948                 return typeInfo(JITtype2tiType(ciType));
4949             }
4950     }
4951     return tiResult;
4952 }
4953
4954 /******************************************************************************/
4955
4956 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4957 {
4958     if (clsHnd == nullptr)
4959     {
4960         return typeInfo();
4961     }
4962
4963     // Byrefs should only occur in method and local signatures, which are accessed
4964     // using ICorClassInfo and ICorClassInfo.getChildType.
4965     // So findClass() and getClassAttribs() should not be called for byrefs
4966
4967     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4968     {
4969         assert(!"Did findClass() return a Byref?");
4970         return typeInfo();
4971     }
4972
4973     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4974
4975     if (attribs & CORINFO_FLG_VALUECLASS)
4976     {
4977         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4978
4979         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4980         // not occur here, so we may want to change this to an assert instead.
4981         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4982         {
4983             return typeInfo();
4984         }
4985
4986 #ifdef _TARGET_64BIT_
4987         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4988         {
4989             return typeInfo::nativeInt();
4990         }
4991 #endif // _TARGET_64BIT_
4992
4993         if (t != CORINFO_TYPE_UNDEF)
4994         {
4995             return (typeInfo(JITtype2tiType(t)));
4996         }
4997         else if (bashStructToRef)
4998         {
4999             return (typeInfo(TI_REF, clsHnd));
5000         }
5001         else
5002         {
5003             return (typeInfo(TI_STRUCT, clsHnd));
5004         }
5005     }
5006     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
5007     {
5008         // See comment in _typeInfo.h for why we do it this way.
5009         return (typeInfo(TI_REF, clsHnd, true));
5010     }
5011     else
5012     {
5013         return (typeInfo(TI_REF, clsHnd));
5014     }
5015 }
5016
5017 /******************************************************************************/
5018 BOOL Compiler::verIsSDArray(typeInfo ti)
5019 {
5020     if (ti.IsNullObjRef())
5021     { // nulls are SD arrays
5022         return TRUE;
5023     }
5024
5025     if (!ti.IsType(TI_REF))
5026     {
5027         return FALSE;
5028     }
5029
5030     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
5031     {
5032         return FALSE;
5033     }
5034     return TRUE;
5035 }
5036
5037 /******************************************************************************/
5038 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
5039 /* Returns an error type if anything goes wrong */
5040
5041 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
5042 {
5043     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
5044
5045     if (!verIsSDArray(arrayObjectType))
5046     {
5047         return typeInfo();
5048     }
5049
5050     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
5051     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
5052
5053     return verMakeTypeInfo(ciType, childClassHandle);
5054 }
5055
5056 /*****************************************************************************
5057  */
5058 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
5059 {
5060     CORINFO_CLASS_HANDLE classHandle;
5061     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
5062
5063     var_types type = JITtype2varType(ciType);
5064     if (varTypeIsGC(type))
5065     {
5066         // For efficiency, getArgType only returns something in classHandle for
5067         // value types.  For other types that have addition type info, you
5068         // have to call back explicitly
5069         classHandle = info.compCompHnd->getArgClass(sig, args);
5070         if (!classHandle)
5071         {
5072             NO_WAY("Could not figure out Class specified in argument or local signature");
5073         }
5074     }
5075
5076     return verMakeTypeInfo(ciType, classHandle);
5077 }
5078
5079 /*****************************************************************************/
5080
5081 // This does the expensive check to figure out whether the method
5082 // needs to be verified. It is called only when we fail verification,
5083 // just before throwing the verification exception.
5084
5085 BOOL Compiler::verNeedsVerification()
5086 {
5087     // If we have previously determined that verification is NOT needed
5088     // (for example in Compiler::compCompile), that means verification is really not needed.
5089     // Return the same decision we made before.
5090     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
5091
5092     if (!tiVerificationNeeded)
5093     {
5094         return tiVerificationNeeded;
5095     }
5096
5097     assert(tiVerificationNeeded);
5098
5099     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
5100     // obtain the answer.
5101     CorInfoCanSkipVerificationResult canSkipVerificationResult =
5102         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
5103
5104     // canSkipVerification will return one of the following three values:
5105     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
5106     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
5107     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
5108     //     but need to insert a callout to the VM to ask during runtime
5109     //     whether to skip verification or not.
5110
5111     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
5112     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
5113     {
5114         tiRuntimeCalloutNeeded = true;
5115     }
5116
5117     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
5118     {
5119         // Dev10 706080 - Testers don't like the assert, so just silence it
5120         // by not using the macros that invoke debugAssert.
5121         badCode();
5122     }
5123
5124     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
5125     // The following line means we will NOT do jit time verification if canSkipVerification
5126     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
5127     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
5128     return tiVerificationNeeded;
5129 }
5130
5131 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
5132 {
5133     if (ti.IsByRef())
5134     {
5135         return TRUE;
5136     }
5137     if (!ti.IsType(TI_STRUCT))
5138     {
5139         return FALSE;
5140     }
5141     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
5142 }
5143
5144 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
5145 {
5146     if (ti.IsPermanentHomeByRef())
5147     {
5148         return TRUE;
5149     }
5150     else
5151     {
5152         return FALSE;
5153     }
5154 }
5155
5156 BOOL Compiler::verIsBoxable(const typeInfo& ti)
5157 {
5158     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
5159             || ti.IsUnboxedGenericTypeVar() ||
5160             (ti.IsType(TI_STRUCT) &&
5161              // exclude byreflike structs
5162              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
5163 }
5164
5165 // Is it a boxed value type?
5166 bool Compiler::verIsBoxedValueType(typeInfo ti)
5167 {
5168     if (ti.GetType() == TI_REF)
5169     {
5170         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
5171         return !!eeIsValueClass(clsHnd);
5172     }
5173     else
5174     {
5175         return false;
5176     }
5177 }
5178
5179 /*****************************************************************************
5180  *
5181  *  Check if a TailCall is legal.
5182  */
5183
5184 bool Compiler::verCheckTailCallConstraint(
5185     OPCODE                  opcode,
5186     CORINFO_RESOLVED_TOKEN* pResolvedToken,
5187     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
5188     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
5189                                                        // return false to the caller.
5190                                                        // If false, it will throw.
5191     )
5192 {
5193     DWORD            mflags;
5194     CORINFO_SIG_INFO sig;
5195     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
5196                                    // this counter is used to keep track of how many items have been
5197                                    // virtually popped
5198
5199     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
5200     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
5201     unsigned              methodClassFlgs = 0;
5202
5203     assert(impOpcodeIsCallOpcode(opcode));
5204
5205     if (compIsForInlining())
5206     {
5207         return false;
5208     }
5209
5210     // for calli, VerifyOrReturn that this is not a virtual method
5211     if (opcode == CEE_CALLI)
5212     {
5213         /* Get the call sig */
5214         eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5215
5216         // We don't know the target method, so we have to infer the flags, or
5217         // assume the worst-case.
5218         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
5219     }
5220     else
5221     {
5222         methodHnd = pResolvedToken->hMethod;
5223
5224         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
5225
5226         // When verifying generic code we pair the method handle with its
5227         // owning class to get the exact method signature.
5228         methodClassHnd = pResolvedToken->hClass;
5229         assert(methodClassHnd);
5230
5231         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
5232
5233         // opcode specific check
5234         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
5235     }
5236
5237     // We must have got the methodClassHnd if opcode is not CEE_CALLI
5238     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
5239
5240     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5241     {
5242         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5243     }
5244
5245     // check compatibility of the arguments
5246     unsigned int argCount;
5247     argCount = sig.numArgs;
5248     CORINFO_ARG_LIST_HANDLE args;
5249     args = sig.args;
5250     while (argCount--)
5251     {
5252         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
5253
5254         // check that the argument is not a byref for tailcalls
5255         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
5256
5257         // For unsafe code, we might have parameters containing pointer to the stack location.
5258         // Disallow the tailcall for this kind.
5259         CORINFO_CLASS_HANDLE classHandle;
5260         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
5261         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
5262
5263         args = info.compCompHnd->getArgNext(args);
5264     }
5265
5266     // update popCount
5267     popCount += sig.numArgs;
5268
5269     // check for 'this' which is on non-static methods, not called via NEWOBJ
5270     if (!(mflags & CORINFO_FLG_STATIC))
5271     {
5272         // Always update the popCount.
5273         // This is crucial for the stack calculation to be correct.
5274         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5275         popCount++;
5276
5277         if (opcode == CEE_CALLI)
5278         {
5279             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
5280             // on the stack.
5281             if (tiThis.IsValueClass())
5282             {
5283                 tiThis.MakeByRef();
5284             }
5285             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
5286         }
5287         else
5288         {
5289             // Check type compatibility of the this argument
5290             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
5291             if (tiDeclaredThis.IsValueClass())
5292             {
5293                 tiDeclaredThis.MakeByRef();
5294             }
5295
5296             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
5297         }
5298     }
5299
5300     // Tail calls on constrained calls should be illegal too:
5301     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
5302     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
5303
5304     // Get the exact view of the signature for an array method
5305     if (sig.retType != CORINFO_TYPE_VOID)
5306     {
5307         if (methodClassFlgs & CORINFO_FLG_ARRAY)
5308         {
5309             assert(opcode != CEE_CALLI);
5310             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5311         }
5312     }
5313
5314     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
5315     typeInfo tiCallerRetType =
5316         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
5317
5318     // void return type gets morphed into the error type, so we have to treat them specially here
5319     if (sig.retType == CORINFO_TYPE_VOID)
5320     {
5321         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
5322                                   speculative);
5323     }
5324     else
5325     {
5326         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
5327                                                    NormaliseForStack(tiCallerRetType), true),
5328                                   "tailcall return mismatch", speculative);
5329     }
5330
5331     // for tailcall, stack must be empty
5332     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
5333
5334     return true; // Yes, tailcall is legal
5335 }
5336
5337 /*****************************************************************************
5338  *
5339  *  Checks the IL verification rules for the call
5340  */
5341
5342 void Compiler::verVerifyCall(OPCODE                  opcode,
5343                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
5344                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5345                              bool                    tailCall,
5346                              bool                    readonlyCall,
5347                              const BYTE*             delegateCreateStart,
5348                              const BYTE*             codeAddr,
5349                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
5350 {
5351     DWORD             mflags;
5352     CORINFO_SIG_INFO* sig      = nullptr;
5353     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
5354                                     // this counter is used to keep track of how many items have been
5355                                     // virtually popped
5356
5357     // for calli, VerifyOrReturn that this is not a virtual method
5358     if (opcode == CEE_CALLI)
5359     {
5360         Verify(false, "Calli not verifiable");
5361         return;
5362     }
5363
5364     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
5365     mflags = callInfo->verMethodFlags;
5366
5367     sig = &callInfo->verSig;
5368
5369     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5370     {
5371         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
5372     }
5373
5374     // opcode specific check
5375     unsigned methodClassFlgs = callInfo->classFlags;
5376     switch (opcode)
5377     {
5378         case CEE_CALLVIRT:
5379             // cannot do callvirt on valuetypes
5380             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
5381             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
5382             break;
5383
5384         case CEE_NEWOBJ:
5385         {
5386             assert(!tailCall); // Importer should not allow this
5387             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
5388                            "newobj must be on instance");
5389
5390             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
5391             {
5392                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
5393                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
5394                 typeInfo tiDeclaredFtn =
5395                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
5396                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
5397
5398                 assert(popCount == 0);
5399                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
5400                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
5401
5402                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
5403                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
5404                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
5405                                "delegate object type mismatch");
5406
5407                 CORINFO_CLASS_HANDLE objTypeHandle =
5408                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
5409
5410                 // the method signature must be compatible with the delegate's invoke method
5411
5412                 // check that for virtual functions, the type of the object used to get the
5413                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
5414                 // since this is a bit of work to determine in general, we pattern match stylized
5415                 // code sequences
5416
5417                 // the delegate creation code check, which used to be done later, is now done here
5418                 // so we can read delegateMethodRef directly from
5419                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5420                 // we then use it in our call to isCompatibleDelegate().
5421
5422                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5423                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5424                                "must create delegates with certain IL");
5425
5426                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5427                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5428                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5429                 delegateResolvedToken.token        = delegateMethodRef;
5430                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5431                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5432
5433                 CORINFO_CALL_INFO delegateCallInfo;
5434                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5435                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5436
5437                 BOOL isOpenDelegate = FALSE;
5438                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5439                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5440                                                                       &isOpenDelegate),
5441                                "function incompatible with delegate");
5442
5443                 // check the constraints on the target method
5444                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5445                                "delegate target has unsatisfied class constraints");
5446                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5447                                                                             tiActualFtn.GetMethod()),
5448                                "delegate target has unsatisfied method constraints");
5449
5450                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5451                 // for additional verification rules for delegates
5452                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5453                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5454                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5455                 {
5456
5457                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5458 #ifdef DEBUG
5459                         && StrictCheckForNonVirtualCallToVirtualMethod()
5460 #endif
5461                             )
5462                     {
5463                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5464                         {
5465                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5466                                                verIsBoxedValueType(tiActualObj),
5467                                            "The 'this' parameter to the call must be either the calling method's "
5468                                            "'this' parameter or "
5469                                            "a boxed value type.");
5470                         }
5471                     }
5472                 }
5473
5474                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5475                 {
5476                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5477
5478                     Verify(targetIsStatic || !isOpenDelegate,
5479                            "Unverifiable creation of an open instance delegate for a protected member.");
5480
5481                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5482                                                                 ? info.compClassHnd
5483                                                                 : tiActualObj.GetClassHandleForObjRef();
5484
5485                     // In the case of protected methods, it is a requirement that the 'this'
5486                     // pointer be a subclass of the current context.  Perform this check.
5487                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5488                            "Accessing protected method through wrong type.");
5489                 }
5490                 goto DONE_ARGS;
5491             }
5492         }
5493         // fall thru to default checks
5494         default:
5495             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5496     }
5497     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5498                    "can only newobj a delegate constructor");
5499
5500     // check compatibility of the arguments
5501     unsigned int argCount;
5502     argCount = sig->numArgs;
5503     CORINFO_ARG_LIST_HANDLE args;
5504     args = sig->args;
5505     while (argCount--)
5506     {
5507         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5508
5509         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5510         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5511
5512         args = info.compCompHnd->getArgNext(args);
5513     }
5514
5515 DONE_ARGS:
5516
5517     // update popCount
5518     popCount += sig->numArgs;
5519
5520     // check for 'this' which are is non-static methods, not called via NEWOBJ
5521     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5522     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5523     {
5524         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5525         popCount++;
5526
5527         // If it is null, we assume we can access it (since it will AV shortly)
5528         // If it is anything but a reference class, there is no hierarchy, so
5529         // again, we don't need the precise instance class to compute 'protected' access
5530         if (tiThis.IsType(TI_REF))
5531         {
5532             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5533         }
5534
5535         // Check type compatibility of the this argument
5536         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5537         if (tiDeclaredThis.IsValueClass())
5538         {
5539             tiDeclaredThis.MakeByRef();
5540         }
5541
5542         // If this is a call to the base class .ctor, set thisPtr Init for
5543         // this block.
5544         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5545         {
5546             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5547                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5548             {
5549                 assert(verCurrentState.thisInitialized !=
5550                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5551                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5552                                "Call to base class constructor when 'this' is possibly initialized");
5553                 // Otherwise, 'this' is now initialized.
5554                 verCurrentState.thisInitialized = TIS_Init;
5555                 tiThis.SetInitialisedObjRef();
5556             }
5557             else
5558             {
5559                 // We allow direct calls to value type constructors
5560                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5561                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5562                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5563                                "Bad call to a constructor");
5564             }
5565         }
5566
5567         if (pConstrainedResolvedToken != nullptr)
5568         {
5569             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5570
5571             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5572
5573             // We just dereference this and test for equality
5574             tiThis.DereferenceByRef();
5575             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5576                            "this type mismatch with constrained type operand");
5577
5578             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5579             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5580         }
5581
5582         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5583         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5584         {
5585             tiDeclaredThis.SetIsReadonlyByRef();
5586         }
5587
5588         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5589
5590         if (tiThis.IsByRef())
5591         {
5592             // Find the actual type where the method exists (as opposed to what is declared
5593             // in the metadata). This is to prevent passing a byref as the "this" argument
5594             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5595
5596             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5597             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5598                            "Call to base type of valuetype (which is never a valuetype)");
5599         }
5600
5601         // Rules for non-virtual call to a non-final virtual method:
5602
5603         // Define:
5604         // The "this" pointer is considered to be "possibly written" if
5605         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5606         //   (or)
5607         //   2. It has been stored to (STARG.0) anywhere in the method.
5608
5609         // A non-virtual call to a non-final virtual method is only allowed if
5610         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5611         //   (or)
5612         //   2. The this pointer passed to the callee is the current method's this pointer.
5613         //      (and) The current method's this pointer is not "possibly written".
5614
5615         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5616         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5617         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5618         // hard and more error prone.
5619
5620         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5621 #ifdef DEBUG
5622             && StrictCheckForNonVirtualCallToVirtualMethod()
5623 #endif
5624                 )
5625         {
5626             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5627             {
5628                 VerifyOrReturn(
5629                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5630                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5631                     "a boxed value type.");
5632             }
5633         }
5634     }
5635
5636     // check any constraints on the callee's class and type parameters
5637     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5638                    "method has unsatisfied class constraints");
5639     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5640                    "method has unsatisfied method constraints");
5641
5642     if (mflags & CORINFO_FLG_PROTECTED)
5643     {
5644         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5645                        "Can't access protected method");
5646     }
5647
5648     // Get the exact view of the signature for an array method
5649     if (sig->retType != CORINFO_TYPE_VOID)
5650     {
5651         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5652     }
5653
5654     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5655     // The methods supported by array types are under the control of the EE
5656     // so we can trust that only the Address operation returns a byref.
5657     if (readonlyCall)
5658     {
5659         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5660         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5661                        "unexpected use of readonly prefix");
5662     }
5663
5664     // Verify the tailcall
5665     if (tailCall)
5666     {
5667         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5668     }
5669 }
5670
5671 /*****************************************************************************
5672  *  Checks that a delegate creation is done using the following pattern:
5673  *     dup
5674  *     ldvirtftn targetMemberRef
5675  *  OR
5676  *     ldftn targetMemberRef
5677  *
5678  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5679  *  not in this basic block)
5680  *
5681  *  targetMemberRef is read from the code sequence.
5682  *  targetMemberRef is validated iff verificationNeeded.
5683  */
5684
5685 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5686                                         const BYTE*  codeAddr,
5687                                         mdMemberRef& targetMemberRef)
5688 {
5689     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5690     {
5691         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5692         return TRUE;
5693     }
5694     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5695     {
5696         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5697         return TRUE;
5698     }
5699
5700     return FALSE;
5701 }
5702
5703 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5704 {
5705     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5706     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5707     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5708     if (!tiCompatibleWith(value, normPtrVal, true))
5709     {
5710         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5711         compUnsafeCastUsed = true;
5712     }
5713     return ptrVal;
5714 }
5715
5716 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5717 {
5718     assert(!instrType.IsStruct());
5719
5720     typeInfo ptrVal;
5721     if (ptr.IsByRef())
5722     {
5723         ptrVal = DereferenceByRef(ptr);
5724         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5725         {
5726             Verify(false, "bad pointer");
5727             compUnsafeCastUsed = true;
5728         }
5729         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5730         {
5731             Verify(false, "pointer not consistent with instr");
5732             compUnsafeCastUsed = true;
5733         }
5734     }
5735     else
5736     {
5737         Verify(false, "pointer not byref");
5738         compUnsafeCastUsed = true;
5739     }
5740
5741     return ptrVal;
5742 }
5743
5744 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5745 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5746 // ld*flda or a st*fld.
5747 // 'enclosingClass' is given if we are accessing a field in some specific type.
5748
5749 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5750                               const CORINFO_FIELD_INFO& fieldInfo,
5751                               const typeInfo*           tiThis,
5752                               BOOL                      mutator,
5753                               BOOL                      allowPlainStructAsThis)
5754 {
5755     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5756     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5757     CORINFO_CLASS_HANDLE instanceClass =
5758         info.compClassHnd; // for statics, we imagine the instance is the current class.
5759
5760     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5761     if (mutator)
5762     {
5763         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5764         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5765         {
5766             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5767                        info.compIsStatic == isStaticField,
5768                    "bad use of initonly field (set or address taken)");
5769         }
5770     }
5771
5772     if (tiThis == nullptr)
5773     {
5774         Verify(isStaticField, "used static opcode with non-static field");
5775     }
5776     else
5777     {
5778         typeInfo tThis = *tiThis;
5779
5780         if (allowPlainStructAsThis && tThis.IsValueClass())
5781         {
5782             tThis.MakeByRef();
5783         }
5784
5785         // If it is null, we assume we can access it (since it will AV shortly)
5786         // If it is anything but a refernce class, there is no hierarchy, so
5787         // again, we don't need the precise instance class to compute 'protected' access
5788         if (tiThis->IsType(TI_REF))
5789         {
5790             instanceClass = tiThis->GetClassHandleForObjRef();
5791         }
5792
5793         // Note that even if the field is static, we require that the this pointer
5794         // satisfy the same constraints as a non-static field  This happens to
5795         // be simpler and seems reasonable
5796         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5797         if (tiDeclaredThis.IsValueClass())
5798         {
5799             tiDeclaredThis.MakeByRef();
5800
5801             // we allow read-only tThis, on any field access (even stores!), because if the
5802             // class implementor wants to prohibit stores he should make the field private.
5803             // we do this by setting the read-only bit on the type we compare tThis to.
5804             tiDeclaredThis.SetIsReadonlyByRef();
5805         }
5806         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5807         {
5808             // Any field access is legal on "uninitialized" this pointers.
5809             // The easiest way to implement this is to simply set the
5810             // initialized bit for the duration of the type check on the
5811             // field access only.  It does not change the state of the "this"
5812             // for the function as a whole. Note that the "tThis" is a copy
5813             // of the original "this" type (*tiThis) passed in.
5814             tThis.SetInitialisedObjRef();
5815         }
5816
5817         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5818     }
5819
5820     // Presently the JIT does not check that we don't store or take the address of init-only fields
5821     // since we cannot guarantee their immutability and it is not a security issue.
5822
5823     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5824     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5825                    "field has unsatisfied class constraints");
5826     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5827     {
5828         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5829                "Accessing protected method through wrong type.");
5830     }
5831 }
5832
5833 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5834 {
5835     if (tiOp1.IsNumberType())
5836     {
5837 #ifdef _TARGET_64BIT_
5838         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5839 #else  // _TARGET_64BIT
5840         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5841         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5842         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5843         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5844 #endif // !_TARGET_64BIT_
5845     }
5846     else if (tiOp1.IsObjRef())
5847     {
5848         switch (opcode)
5849         {
5850             case CEE_BEQ_S:
5851             case CEE_BEQ:
5852             case CEE_BNE_UN_S:
5853             case CEE_BNE_UN:
5854             case CEE_CEQ:
5855             case CEE_CGT_UN:
5856                 break;
5857             default:
5858                 Verify(FALSE, "Cond not allowed on object types");
5859         }
5860         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5861     }
5862     else if (tiOp1.IsByRef())
5863     {
5864         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5865     }
5866     else
5867     {
5868         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5869     }
5870 }
5871
5872 void Compiler::verVerifyThisPtrInitialised()
5873 {
5874     if (verTrackObjCtorInitState)
5875     {
5876         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5877     }
5878 }
5879
5880 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5881 {
5882     // Either target == context, in this case calling an alternate .ctor
5883     // Or target is the immediate parent of context
5884
5885     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5886 }
5887
5888 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5889                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5890                                       CORINFO_CALL_INFO*      pCallInfo)
5891 {
5892     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5893     {
5894         NO_WAY("Virtual call to a function added via EnC is not supported");
5895     }
5896
5897     // CoreRT generic virtual method
5898     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5899     {
5900         GenTree* runtimeMethodHandle = nullptr;
5901         if (pCallInfo->exactContextNeedsRuntimeLookup)
5902         {
5903             runtimeMethodHandle =
5904                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5905         }
5906         else
5907         {
5908             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5909         }
5910         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5911                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5912     }
5913
5914 #ifdef FEATURE_READYTORUN_COMPILER
5915     if (opts.IsReadyToRun())
5916     {
5917         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5918         {
5919             GenTreeCall* call =
5920                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5921
5922             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5923
5924             return call;
5925         }
5926
5927         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5928         if (IsTargetAbi(CORINFO_CORERT_ABI))
5929         {
5930             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5931
5932             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5933                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5934         }
5935     }
5936 #endif
5937
5938     // Get the exact descriptor for the static callsite
5939     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5940     if (exactTypeDesc == nullptr)
5941     { // compDonotInline()
5942         return nullptr;
5943     }
5944
5945     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5946     if (exactMethodDesc == nullptr)
5947     { // compDonotInline()
5948         return nullptr;
5949     }
5950
5951     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5952
5953     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5954
5955     helpArgs = gtNewListNode(thisPtr, helpArgs);
5956
5957     // Call helper function.  This gets the target address of the final destination callsite.
5958
5959     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5960 }
5961
5962 //------------------------------------------------------------------------
5963 // impImportAndPushBox: build and import a value-type box
5964 //
5965 // Arguments:
5966 //   pResolvedToken - resolved token from the box operation
5967 //
5968 // Return Value:
5969 //   None.
5970 //
5971 // Side Effects:
5972 //   The value to be boxed is popped from the stack, and a tree for
5973 //   the boxed value is pushed. This method may create upstream
5974 //   statements, spill side effecting trees, and create new temps.
5975 //
5976 //   If importing an inlinee, we may also discover the inline must
5977 //   fail. If so there is no new value pushed on the stack. Callers
5978 //   should use CompDoNotInline after calling this method to see if
5979 //   ongoing importation should be aborted.
5980 //
5981 // Notes:
5982 //   Boxing of ref classes results in the same value as the value on
5983 //   the top of the stack, so is handled inline in impImportBlockCode
5984 //   for the CEE_BOX case. Only value or primitive type boxes make it
5985 //   here.
5986 //
5987 //   Boxing for nullable types is done via a helper call; boxing
5988 //   of other value types is expanded inline or handled via helper
5989 //   call, depending on the jit's codegen mode.
5990 //
5991 //   When the jit is operating in size and time constrained modes,
5992 //   using a helper call here can save jit time and code size. But it
5993 //   also may inhibit cleanup optimizations that could have also had a
5994 //   even greater benefit effect on code size and jit time. An optimal
5995 //   strategy may need to peek ahead and see if it is easy to tell how
5996 //   the box is being used. For now, we defer.
5997
5998 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5999 {
6000     // Spill any special side effects
6001     impSpillSpecialSideEff();
6002
6003     // Get get the expression to box from the stack.
6004     GenTree*             op1       = nullptr;
6005     GenTree*             op2       = nullptr;
6006     StackEntry           se        = impPopStack();
6007     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
6008     GenTree*             exprToBox = se.val;
6009
6010     // Look at what helper we should use.
6011     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
6012
6013     // Determine what expansion to prefer.
6014     //
6015     // In size/time/debuggable constrained modes, the helper call
6016     // expansion for box is generally smaller and is preferred, unless
6017     // the value to box is a struct that comes from a call. In that
6018     // case the call can construct its return value directly into the
6019     // box payload, saving possibly some up-front zeroing.
6020     //
6021     // Currently primitive type boxes always get inline expanded. We may
6022     // want to do the same for small structs if they don't come from
6023     // calls and don't have GC pointers, since explicitly copying such
6024     // structs is cheap.
6025     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
6026     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
6027     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
6028     bool expandInline    = canExpandInline && !optForSize;
6029
6030     if (expandInline)
6031     {
6032         JITDUMP(" inline allocate/copy sequence\n");
6033
6034         // we are doing 'normal' boxing.  This means that we can inline the box operation
6035         // Box(expr) gets morphed into
6036         // temp = new(clsHnd)
6037         // cpobj(temp+4, expr, clsHnd)
6038         // push temp
6039         // The code paths differ slightly below for structs and primitives because
6040         // "cpobj" differs in these cases.  In one case you get
6041         //    impAssignStructPtr(temp+4, expr, clsHnd)
6042         // and the other you get
6043         //    *(temp+4) = expr
6044
6045         if (opts.MinOpts() || opts.compDbgCode)
6046         {
6047             // For minopts/debug code, try and minimize the total number
6048             // of box temps by reusing an existing temp when possible.
6049             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
6050             {
6051                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
6052             }
6053         }
6054         else
6055         {
6056             // When optimizing, use a new temp for each box operation
6057             // since we then know the exact class of the box temp.
6058             impBoxTemp                       = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
6059             lvaTable[impBoxTemp].lvType      = TYP_REF;
6060             lvaTable[impBoxTemp].lvSingleDef = 1;
6061             JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
6062             const bool isExact = true;
6063             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
6064         }
6065
6066         // needs to stay in use until this box expression is appended
6067         // some other node.  We approximate this by keeping it alive until
6068         // the opcode stack becomes empty
6069         impBoxTempInUse = true;
6070
6071 #ifdef FEATURE_READYTORUN_COMPILER
6072         bool usingReadyToRunHelper = false;
6073
6074         if (opts.IsReadyToRun())
6075         {
6076             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
6077             usingReadyToRunHelper = (op1 != nullptr);
6078         }
6079
6080         if (!usingReadyToRunHelper)
6081 #endif
6082         {
6083             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
6084             // and the newfast call with a single call to a dynamic R2R cell that will:
6085             //      1) Load the context
6086             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
6087             //      3) Allocate and return the new object for boxing
6088             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
6089
6090             // Ensure that the value class is restored
6091             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6092             if (op2 == nullptr)
6093             {
6094                 // We must be backing out of an inline.
6095                 assert(compDonotInline());
6096                 return;
6097             }
6098
6099             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
6100                                     pResolvedToken->hClass, TYP_REF, op2);
6101         }
6102
6103         /* Remember that this basic block contains 'new' of an object, and so does this method */
6104         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
6105         optMethodFlags |= OMF_HAS_NEWOBJ;
6106
6107         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
6108
6109         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6110
6111         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6112         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
6113         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
6114
6115         if (varTypeIsStruct(exprToBox))
6116         {
6117             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
6118             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
6119         }
6120         else
6121         {
6122             var_types lclTyp = exprToBox->TypeGet();
6123             if (lclTyp == TYP_BYREF)
6124             {
6125                 lclTyp = TYP_I_IMPL;
6126             }
6127             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
6128             if (impIsPrimitive(jitType))
6129             {
6130                 lclTyp = JITtype2varType(jitType);
6131             }
6132             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
6133                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
6134             var_types srcTyp = exprToBox->TypeGet();
6135             var_types dstTyp = lclTyp;
6136
6137             if (srcTyp != dstTyp)
6138             {
6139                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
6140                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
6141                 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
6142             }
6143             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
6144         }
6145
6146         // Spill eval stack to flush out any pending side effects.
6147         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
6148
6149         // Set up this copy as a second assignment.
6150         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6151
6152         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6153
6154         // Record that this is a "box" node and keep track of the matching parts.
6155         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
6156
6157         // If it is a value class, mark the "box" node.  We can use this information
6158         // to optimise several cases:
6159         //    "box(x) == null" --> false
6160         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
6161         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
6162
6163         op1->gtFlags |= GTF_BOX_VALUE;
6164         assert(op1->IsBoxedValue());
6165         assert(asg->gtOper == GT_ASG);
6166     }
6167     else
6168     {
6169         // Don't optimize, just call the helper and be done with it.
6170         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
6171         assert(operCls != nullptr);
6172
6173         // Ensure that the value class is restored
6174         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6175         if (op2 == nullptr)
6176         {
6177             // We must be backing out of an inline.
6178             assert(compDonotInline());
6179             return;
6180         }
6181
6182         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
6183         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
6184     }
6185
6186     /* Push the result back on the stack, */
6187     /* even if clsHnd is a value class we want the TI_REF */
6188     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
6189     impPushOnStack(op1, tiRetVal);
6190 }
6191
6192 //------------------------------------------------------------------------
6193 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
6194 //
6195 // Arguments:
6196 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6197 //                     by a call to CEEInfo::resolveToken().
6198 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
6199 //                by a call to CEEInfo::getCallInfo().
6200 //
6201 // Assumptions:
6202 //    The multi-dimensional array constructor arguments (array dimensions) are
6203 //    pushed on the IL stack on entry to this method.
6204 //
6205 // Notes:
6206 //    Multi-dimensional array constructors are imported as calls to a JIT
6207 //    helper, not as regular calls.
6208
6209 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
6210 {
6211     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
6212     if (classHandle == nullptr)
6213     { // compDonotInline()
6214         return;
6215     }
6216
6217     assert(pCallInfo->sig.numArgs);
6218
6219     GenTree*        node;
6220     GenTreeArgList* args;
6221
6222     //
6223     // There are two different JIT helpers that can be used to allocate
6224     // multi-dimensional arrays:
6225     //
6226     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
6227     //      This variant is deprecated. It should be eventually removed.
6228     //
6229     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
6230     //      pointer to block of int32s. This variant is more portable.
6231     //
6232     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
6233     // unconditionally would require ReadyToRun version bump.
6234     //
6235     CLANG_FORMAT_COMMENT_ANCHOR;
6236
6237     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
6238     {
6239
6240         // Reuse the temp used to pass the array dimensions to avoid bloating
6241         // the stack frame in case there are multiple calls to multi-dim array
6242         // constructors within a single method.
6243         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
6244         {
6245             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
6246             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
6247             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
6248         }
6249
6250         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
6251         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
6252         lvaTable[lvaNewObjArrayArgs].lvExactSize =
6253             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
6254
6255         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
6256         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
6257         // to one allocation at a time.
6258         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
6259
6260         //
6261         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
6262         //  - Array class handle
6263         //  - Number of dimension arguments
6264         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
6265         //
6266
6267         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6268         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
6269
6270         // Pop dimension arguments from the stack one at a time and store it
6271         // into lvaNewObjArrayArgs temp.
6272         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
6273         {
6274             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
6275
6276             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6277             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
6278             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
6279                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
6280             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
6281
6282             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
6283         }
6284
6285         args = gtNewArgList(node);
6286
6287         // pass number of arguments to the helper
6288         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6289
6290         args = gtNewListNode(classHandle, args);
6291
6292         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
6293     }
6294     else
6295     {
6296         //
6297         // The varargs helper needs the type and method handles as last
6298         // and  last-1 param (this is a cdecl call, so args will be
6299         // pushed in reverse order on the CPU stack)
6300         //
6301
6302         args = gtNewArgList(classHandle);
6303
6304         // pass number of arguments to the helper
6305         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6306
6307         unsigned argFlags = 0;
6308         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
6309
6310         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
6311
6312         // varargs, so we pop the arguments
6313         node->gtFlags |= GTF_CALL_POP_ARGS;
6314
6315 #ifdef DEBUG
6316         // At the present time we don't track Caller pop arguments
6317         // that have GC references in them
6318         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
6319         {
6320             assert(temp->Current()->gtType != TYP_REF);
6321         }
6322 #endif
6323     }
6324
6325     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6326     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
6327
6328     // Remember that this basic block contains 'new' of a md array
6329     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
6330
6331     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
6332 }
6333
6334 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
6335                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6336                                     CORINFO_THIS_TRANSFORM  transform)
6337 {
6338     switch (transform)
6339     {
6340         case CORINFO_DEREF_THIS:
6341         {
6342             GenTree* obj = thisPtr;
6343
6344             // This does a LDIND on the obj, which should be a byref. pointing to a ref
6345             impBashVarAddrsToI(obj);
6346             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
6347             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6348
6349             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
6350             // ldind could point anywhere, example a boxed class static int
6351             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
6352
6353             return obj;
6354         }
6355
6356         case CORINFO_BOX_THIS:
6357         {
6358             // Constraint calls where there might be no
6359             // unboxed entry point require us to implement the call via helper.
6360             // These only occur when a possible target of the call
6361             // may have inherited an implementation of an interface
6362             // method from System.Object or System.ValueType.  The EE does not provide us with
6363             // "unboxed" versions of these methods.
6364
6365             GenTree* obj = thisPtr;
6366
6367             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
6368             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
6369             obj->gtFlags |= GTF_EXCEPT;
6370
6371             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6372             var_types   objType = JITtype2varType(jitTyp);
6373             if (impIsPrimitive(jitTyp))
6374             {
6375                 if (obj->OperIsBlk())
6376                 {
6377                     obj->ChangeOperUnchecked(GT_IND);
6378
6379                     // Obj could point anywhere, example a boxed class static int
6380                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
6381                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
6382                 }
6383
6384                 obj->gtType = JITtype2varType(jitTyp);
6385                 assert(varTypeIsArithmetic(obj->gtType));
6386             }
6387
6388             // This pushes on the dereferenced byref
6389             // This is then used immediately to box.
6390             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
6391
6392             // This pops off the byref-to-a-value-type remaining on the stack and
6393             // replaces it with a boxed object.
6394             // This is then used as the object to the virtual call immediately below.
6395             impImportAndPushBox(pConstrainedResolvedToken);
6396             if (compDonotInline())
6397             {
6398                 return nullptr;
6399             }
6400
6401             obj = impPopStack().val;
6402             return obj;
6403         }
6404         case CORINFO_NO_THIS_TRANSFORM:
6405         default:
6406             return thisPtr;
6407     }
6408 }
6409
6410 //------------------------------------------------------------------------
6411 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
6412 //
6413 // Return Value:
6414 //    true if PInvoke inlining should be enabled in current method, false otherwise
6415 //
6416 // Notes:
6417 //    Checks a number of ambient conditions where we could pinvoke but choose not to
6418
6419 bool Compiler::impCanPInvokeInline()
6420 {
6421     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6422            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6423         ;
6424 }
6425
6426 //------------------------------------------------------------------------
6427 // impCanPInvokeInlineCallSite: basic legality checks using information
6428 // from a call to see if the call qualifies as an inline pinvoke.
6429 //
6430 // Arguments:
6431 //    block      - block contaning the call, or for inlinees, block
6432 //                 containing the call being inlined
6433 //
6434 // Return Value:
6435 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6436 //
6437 // Notes:
6438 //    For runtimes that support exception handling interop there are
6439 //    restrictions on using inline pinvoke in handler regions.
6440 //
6441 //    * We have to disable pinvoke inlining inside of filters because
6442 //    in case the main execution (i.e. in the try block) is inside
6443 //    unmanaged code, we cannot reuse the inlined stub (we still need
6444 //    the original state until we are in the catch handler)
6445 //
6446 //    * We disable pinvoke inlining inside handlers since the GSCookie
6447 //    is in the inlined Frame (see
6448 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6449 //    this would not protect framelets/return-address of handlers.
6450 //
6451 //    These restrictions are currently also in place for CoreCLR but
6452 //    can be relaxed when coreclr/#8459 is addressed.
6453
6454 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6455 {
6456     if (block->hasHndIndex())
6457     {
6458         return false;
6459     }
6460
6461     // The remaining limitations do not apply to CoreRT
6462     if (IsTargetAbi(CORINFO_CORERT_ABI))
6463     {
6464         return true;
6465     }
6466
6467 #ifdef _TARGET_AMD64_
6468     // On x64, we disable pinvoke inlining inside of try regions.
6469     // Here is the comment from JIT64 explaining why:
6470     //
6471     //   [VSWhidbey: 611015] - because the jitted code links in the
6472     //   Frame (instead of the stub) we rely on the Frame not being
6473     //   'active' until inside the stub.  This normally happens by the
6474     //   stub setting the return address pointer in the Frame object
6475     //   inside the stub.  On a normal return, the return address
6476     //   pointer is zeroed out so the Frame can be safely re-used, but
6477     //   if an exception occurs, nobody zeros out the return address
6478     //   pointer.  Thus if we re-used the Frame object, it would go
6479     //   'active' as soon as we link it into the Frame chain.
6480     //
6481     //   Technically we only need to disable PInvoke inlining if we're
6482     //   in a handler or if we're in a try body with a catch or
6483     //   filter/except where other non-handler code in this method
6484     //   might run and try to re-use the dirty Frame object.
6485     //
6486     //   A desktop test case where this seems to matter is
6487     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6488     if (block->hasTryIndex())
6489     {
6490         return false;
6491     }
6492 #endif // _TARGET_AMD64_
6493
6494     return true;
6495 }
6496
6497 //------------------------------------------------------------------------
6498 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6499 // if it can be expressed as an inline pinvoke.
6500 //
6501 // Arguments:
6502 //    call       - tree for the call
6503 //    methHnd    - handle for the method being called (may be null)
6504 //    sig        - signature of the method being called
6505 //    mflags     - method flags for the method being called
6506 //    block      - block contaning the call, or for inlinees, block
6507 //                 containing the call being inlined
6508 //
6509 // Notes:
6510 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6511 //
6512 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6513 //   call passes a combination of legality and profitabilty checks.
6514 //
6515 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6516
6517 void Compiler::impCheckForPInvokeCall(
6518     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6519 {
6520     CorInfoUnmanagedCallConv unmanagedCallConv;
6521
6522     // If VM flagged it as Pinvoke, flag the call node accordingly
6523     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6524     {
6525         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6526     }
6527
6528     if (methHnd)
6529     {
6530         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6531         {
6532             return;
6533         }
6534
6535         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6536     }
6537     else
6538     {
6539         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6540         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6541         {
6542             // Used by the IL Stubs.
6543             callConv = CORINFO_CALLCONV_C;
6544         }
6545         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6546         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6547         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6548         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6549
6550         assert(!call->gtCallCookie);
6551     }
6552
6553     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6554         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6555     {
6556         return;
6557     }
6558     optNativeCallCount++;
6559
6560     if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
6561     {
6562         // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
6563         // converted to regular method calls earlier using convertPInvokeCalliToCall.
6564
6565         // PInvoke CALLI in IL stubs must be inlined
6566     }
6567     else
6568     {
6569         // Check legality
6570         if (!impCanPInvokeInlineCallSite(block))
6571         {
6572             return;
6573         }
6574
6575         // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
6576         // inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
6577         if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
6578         {
6579             if (!impCanPInvokeInline())
6580             {
6581                 return;
6582             }
6583
6584             // Size-speed tradeoff: don't use inline pinvoke at rarely
6585             // executed call sites.  The non-inline version is more
6586             // compact.
6587             if (block->isRunRarely())
6588             {
6589                 return;
6590             }
6591         }
6592
6593         // The expensive check should be last
6594         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6595         {
6596             return;
6597         }
6598     }
6599
6600     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6601
6602     call->gtFlags |= GTF_CALL_UNMANAGED;
6603     info.compCallUnmanaged++;
6604
6605     // AMD64 convention is same for native and managed
6606     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6607     {
6608         call->gtFlags |= GTF_CALL_POP_ARGS;
6609     }
6610
6611     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6612     {
6613         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6614     }
6615 }
6616
6617 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6618 {
6619     var_types callRetTyp = JITtype2varType(sig->retType);
6620
6621     /* The function pointer is on top of the stack - It may be a
6622      * complex expression. As it is evaluated after the args,
6623      * it may cause registered args to be spilled. Simply spill it.
6624      */
6625
6626     // Ignore this trivial case.
6627     if (impStackTop().val->gtOper != GT_LCL_VAR)
6628     {
6629         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6630                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6631     }
6632
6633     /* Get the function pointer */
6634
6635     GenTree* fptr = impPopStack().val;
6636
6637     // The function pointer is typically a sized to match the target pointer size
6638     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6639     // See ILCodeStream::LowerOpcode
6640     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6641
6642 #ifdef DEBUG
6643     // This temporary must never be converted to a double in stress mode,
6644     // because that can introduce a call to the cast helper after the
6645     // arguments have already been evaluated.
6646
6647     if (fptr->OperGet() == GT_LCL_VAR)
6648     {
6649         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6650     }
6651 #endif
6652
6653     /* Create the call node */
6654
6655     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6656
6657     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6658
6659     return call;
6660 }
6661
6662 /*****************************************************************************/
6663
6664 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6665 {
6666     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6667
6668     /* Since we push the arguments in reverse order (i.e. right -> left)
6669      * spill any side effects from the stack
6670      *
6671      * OBS: If there is only one side effect we do not need to spill it
6672      *      thus we have to spill all side-effects except last one
6673      */
6674
6675     unsigned lastLevelWithSideEffects = UINT_MAX;
6676
6677     unsigned argsToReverse = sig->numArgs;
6678
6679     // For "thiscall", the first argument goes in a register. Since its
6680     // order does not need to be changed, we do not need to spill it
6681
6682     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6683     {
6684         assert(argsToReverse);
6685         argsToReverse--;
6686     }
6687
6688 #ifndef _TARGET_X86_
6689     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6690     argsToReverse = 0;
6691 #endif
6692
6693     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6694     {
6695         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6696         {
6697             assert(lastLevelWithSideEffects == UINT_MAX);
6698
6699             impSpillStackEntry(level,
6700                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6701         }
6702         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6703         {
6704             if (lastLevelWithSideEffects != UINT_MAX)
6705             {
6706                 /* We had a previous side effect - must spill it */
6707                 impSpillStackEntry(lastLevelWithSideEffects,
6708                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6709
6710                 /* Record the level for the current side effect in case we will spill it */
6711                 lastLevelWithSideEffects = level;
6712             }
6713             else
6714             {
6715                 /* This is the first side effect encountered - record its level */
6716
6717                 lastLevelWithSideEffects = level;
6718             }
6719         }
6720     }
6721
6722     /* The argument list is now "clean" - no out-of-order side effects
6723      * Pop the argument list in reverse order */
6724
6725     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6726
6727     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6728     {
6729         GenTree* thisPtr = args->Current();
6730         impBashVarAddrsToI(thisPtr);
6731         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6732     }
6733
6734     if (args)
6735     {
6736         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6737     }
6738 }
6739
6740 //------------------------------------------------------------------------
6741 // impInitClass: Build a node to initialize the class before accessing the
6742 //               field if necessary
6743 //
6744 // Arguments:
6745 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6746 //                     by a call to CEEInfo::resolveToken().
6747 //
6748 // Return Value: If needed, a pointer to the node that will perform the class
6749 //               initializtion.  Otherwise, nullptr.
6750 //
6751
6752 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6753 {
6754     CorInfoInitClassResult initClassResult =
6755         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6756
6757     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6758     {
6759         return nullptr;
6760     }
6761     BOOL runtimeLookup;
6762
6763     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6764
6765     if (node == nullptr)
6766     {
6767         assert(compDonotInline());
6768         return nullptr;
6769     }
6770
6771     if (runtimeLookup)
6772     {
6773         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6774     }
6775     else
6776     {
6777         // Call the shared non gc static helper, as its the fastest
6778         node = fgGetSharedCCtor(pResolvedToken->hClass);
6779     }
6780
6781     return node;
6782 }
6783
6784 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6785 {
6786     GenTree* op1 = nullptr;
6787
6788     switch (lclTyp)
6789     {
6790         int     ival;
6791         __int64 lval;
6792         double  dval;
6793
6794         case TYP_BOOL:
6795             ival = *((bool*)fldAddr);
6796             goto IVAL_COMMON;
6797
6798         case TYP_BYTE:
6799             ival = *((signed char*)fldAddr);
6800             goto IVAL_COMMON;
6801
6802         case TYP_UBYTE:
6803             ival = *((unsigned char*)fldAddr);
6804             goto IVAL_COMMON;
6805
6806         case TYP_SHORT:
6807             ival = *((short*)fldAddr);
6808             goto IVAL_COMMON;
6809
6810         case TYP_USHORT:
6811             ival = *((unsigned short*)fldAddr);
6812             goto IVAL_COMMON;
6813
6814         case TYP_UINT:
6815         case TYP_INT:
6816             ival = *((int*)fldAddr);
6817         IVAL_COMMON:
6818             op1 = gtNewIconNode(ival);
6819             break;
6820
6821         case TYP_LONG:
6822         case TYP_ULONG:
6823             lval = *((__int64*)fldAddr);
6824             op1  = gtNewLconNode(lval);
6825             break;
6826
6827         case TYP_FLOAT:
6828             dval        = *((float*)fldAddr);
6829             op1         = gtNewDconNode(dval);
6830             op1->gtType = TYP_FLOAT;
6831             break;
6832
6833         case TYP_DOUBLE:
6834             dval = *((double*)fldAddr);
6835             op1  = gtNewDconNode(dval);
6836             break;
6837
6838         default:
6839             assert(!"Unexpected lclTyp");
6840             break;
6841     }
6842
6843     return op1;
6844 }
6845
6846 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6847                                               CORINFO_ACCESS_FLAGS    access,
6848                                               CORINFO_FIELD_INFO*     pFieldInfo,
6849                                               var_types               lclTyp)
6850 {
6851     GenTree* op1;
6852
6853     switch (pFieldInfo->fieldAccessor)
6854     {
6855         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6856         {
6857             assert(!compIsForInlining());
6858
6859             // We first call a special helper to get the statics base pointer
6860             op1 = impParentClassTokenToHandle(pResolvedToken);
6861
6862             // compIsForInlining() is false so we should not neve get NULL here
6863             assert(op1 != nullptr);
6864
6865             var_types type = TYP_BYREF;
6866
6867             switch (pFieldInfo->helper)
6868             {
6869                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6870                     type = TYP_I_IMPL;
6871                     break;
6872                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6873                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6874                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6875                     break;
6876                 default:
6877                     assert(!"unknown generic statics helper");
6878                     break;
6879             }
6880
6881             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6882
6883             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6884             op1              = gtNewOperNode(GT_ADD, type, op1,
6885                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6886         }
6887         break;
6888
6889         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6890         {
6891 #ifdef FEATURE_READYTORUN_COMPILER
6892             if (opts.IsReadyToRun())
6893             {
6894                 unsigned callFlags = 0;
6895
6896                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6897                 {
6898                     callFlags |= GTF_CALL_HOISTABLE;
6899                 }
6900
6901                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6902                 op1->gtFlags |= callFlags;
6903
6904                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6905             }
6906             else
6907 #endif
6908             {
6909                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6910             }
6911
6912             {
6913                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6914                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6915                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6916             }
6917             break;
6918         }
6919
6920         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6921         {
6922 #ifdef FEATURE_READYTORUN_COMPILER
6923             noway_assert(opts.IsReadyToRun());
6924             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6925             assert(kind.needsRuntimeLookup);
6926
6927             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6928             GenTreeArgList* args    = gtNewArgList(ctxTree);
6929
6930             unsigned callFlags = 0;
6931
6932             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6933             {
6934                 callFlags |= GTF_CALL_HOISTABLE;
6935             }
6936             var_types type = TYP_BYREF;
6937             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6938             op1->gtFlags |= callFlags;
6939
6940             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6941             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6942             op1              = gtNewOperNode(GT_ADD, type, op1,
6943                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6944 #else
6945             unreached();
6946 #endif // FEATURE_READYTORUN_COMPILER
6947         }
6948         break;
6949
6950         default:
6951         {
6952             if (!(access & CORINFO_ACCESS_ADDRESS))
6953             {
6954                 // In future, it may be better to just create the right tree here instead of folding it later.
6955                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6956
6957                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6958                 {
6959                     op1->gtFlags |= GTF_FLD_INITCLASS;
6960                 }
6961
6962                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6963                 {
6964                     op1->gtType = TYP_REF; // points at boxed object
6965                     FieldSeqNode* firstElemFldSeq =
6966                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6967                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6968                                         new (this, GT_CNS_INT)
6969                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6970
6971                     if (varTypeIsStruct(lclTyp))
6972                     {
6973                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6974                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6975                     }
6976                     else
6977                     {
6978                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6979                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6980                     }
6981                 }
6982
6983                 return op1;
6984             }
6985             else
6986             {
6987                 void** pFldAddr = nullptr;
6988                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6989
6990                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6991
6992                 /* Create the data member node */
6993                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6994                                           fldSeq);
6995
6996                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6997                 {
6998                     op1->gtFlags |= GTF_ICON_INITCLASS;
6999                 }
7000
7001                 if (pFldAddr != nullptr)
7002                 {
7003                     // There are two cases here, either the static is RVA based,
7004                     // in which case the type of the FIELD node is not a GC type
7005                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
7006                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
7007                     // because handles to statics now go into the large object heap
7008
7009                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
7010                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
7011                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
7012                 }
7013             }
7014             break;
7015         }
7016     }
7017
7018     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
7019     {
7020         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
7021
7022         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
7023
7024         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
7025                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
7026     }
7027
7028     if (!(access & CORINFO_ACCESS_ADDRESS))
7029     {
7030         if (varTypeIsStruct(lclTyp))
7031         {
7032             // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
7033             op1 = gtNewObjNode(pFieldInfo->structType, op1);
7034         }
7035         else
7036         {
7037             op1 = gtNewOperNode(GT_IND, lclTyp, op1);
7038             op1->gtFlags |= GTF_GLOB_REF;
7039         }
7040     }
7041
7042     return op1;
7043 }
7044
7045 // In general try to call this before most of the verification work.  Most people expect the access
7046 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
7047 // out if you can't access something we also think that you're unverifiable for other reasons.
7048 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7049 {
7050     if (result != CORINFO_ACCESS_ALLOWED)
7051     {
7052         impHandleAccessAllowedInternal(result, helperCall);
7053     }
7054 }
7055
7056 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7057 {
7058     switch (result)
7059     {
7060         case CORINFO_ACCESS_ALLOWED:
7061             break;
7062         case CORINFO_ACCESS_ILLEGAL:
7063             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
7064             // method is verifiable.  Otherwise, delay the exception to runtime.
7065             if (compIsForImportOnly())
7066             {
7067                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
7068             }
7069             else
7070             {
7071                 impInsertHelperCall(helperCall);
7072             }
7073             break;
7074         case CORINFO_ACCESS_RUNTIME_CHECK:
7075             impInsertHelperCall(helperCall);
7076             break;
7077     }
7078 }
7079
7080 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
7081 {
7082     // Construct the argument list
7083     GenTreeArgList* args = nullptr;
7084     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
7085     for (unsigned i = helperInfo->numArgs; i > 0; --i)
7086     {
7087         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
7088         GenTree*                  currentArg = nullptr;
7089         switch (helperArg.argType)
7090         {
7091             case CORINFO_HELPER_ARG_TYPE_Field:
7092                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
7093                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
7094                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
7095                 break;
7096             case CORINFO_HELPER_ARG_TYPE_Method:
7097                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
7098                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
7099                 break;
7100             case CORINFO_HELPER_ARG_TYPE_Class:
7101                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
7102                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
7103                 break;
7104             case CORINFO_HELPER_ARG_TYPE_Module:
7105                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
7106                 break;
7107             case CORINFO_HELPER_ARG_TYPE_Const:
7108                 currentArg = gtNewIconNode(helperArg.constant);
7109                 break;
7110             default:
7111                 NO_WAY("Illegal helper arg type");
7112         }
7113         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
7114     }
7115
7116     /* TODO-Review:
7117      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
7118      * Also, consider sticking this in the first basic block.
7119      */
7120     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
7121     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
7122 }
7123
7124 // Checks whether the return types of caller and callee are compatible
7125 // so that callee can be tail called. Note that here we don't check
7126 // compatibility in IL Verifier sense, but on the lines of return type
7127 // sizes are equal and get returned in the same return register.
7128 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
7129                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
7130                                             var_types            calleeRetType,
7131                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
7132 {
7133     // Note that we can not relax this condition with genActualType() as the
7134     // calling convention dictates that the caller of a function with a small
7135     // typed return value is responsible for normalizing the return val.
7136     if (callerRetType == calleeRetType)
7137     {
7138         return true;
7139     }
7140
7141     // If the class handles are the same and not null, the return types are compatible.
7142     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
7143     {
7144         return true;
7145     }
7146
7147 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
7148     // Jit64 compat:
7149     if (callerRetType == TYP_VOID)
7150     {
7151         // This needs to be allowed to support the following IL pattern that Jit64 allows:
7152         //     tail.call
7153         //     pop
7154         //     ret
7155         //
7156         // Note that the above IL pattern is not valid as per IL verification rules.
7157         // Therefore, only full trust code can take advantage of this pattern.
7158         return true;
7159     }
7160
7161     // These checks return true if the return value type sizes are the same and
7162     // get returned in the same return register i.e. caller doesn't need to normalize
7163     // return value. Some of the tail calls permitted by below checks would have
7164     // been rejected by IL Verifier before we reached here.  Therefore, only full
7165     // trust code can make those tail calls.
7166     unsigned callerRetTypeSize = 0;
7167     unsigned calleeRetTypeSize = 0;
7168     bool     isCallerRetTypMBEnreg =
7169         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs);
7170     bool isCalleeRetTypMBEnreg =
7171         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs);
7172
7173     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
7174     {
7175         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
7176     }
7177 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
7178
7179     return false;
7180 }
7181
7182 // For prefixFlags
7183 enum
7184 {
7185     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
7186     PREFIX_TAILCALL_IMPLICIT =
7187         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
7188     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
7189     PREFIX_VOLATILE    = 0x00000100,
7190     PREFIX_UNALIGNED   = 0x00001000,
7191     PREFIX_CONSTRAINED = 0x00010000,
7192     PREFIX_READONLY    = 0x00100000
7193 };
7194
7195 /********************************************************************************
7196  *
7197  * Returns true if the current opcode and and the opcodes following it correspond
7198  * to a supported tail call IL pattern.
7199  *
7200  */
7201 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
7202                                       OPCODE      curOpcode,
7203                                       const BYTE* codeAddrOfNextOpcode,
7204                                       const BYTE* codeEnd,
7205                                       bool        isRecursive,
7206                                       bool*       isCallPopAndRet /* = nullptr */)
7207 {
7208     // Bail out if the current opcode is not a call.
7209     if (!impOpcodeIsCallOpcode(curOpcode))
7210     {
7211         return false;
7212     }
7213
7214 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7215     // If shared ret tail opt is not enabled, we will enable
7216     // it for recursive methods.
7217     if (isRecursive)
7218 #endif
7219     {
7220         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
7221         // sequence. Make sure we don't go past the end of the IL however.
7222         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
7223     }
7224
7225     // Bail out if there is no next opcode after call
7226     if (codeAddrOfNextOpcode >= codeEnd)
7227     {
7228         return false;
7229     }
7230
7231     // Scan the opcodes to look for the following IL patterns if either
7232     //   i) the call is not tail prefixed (i.e. implicit tail call) or
7233     //  ii) if tail prefixed, IL verification is not needed for the method.
7234     //
7235     // Only in the above two cases we can allow the below tail call patterns
7236     // violating ECMA spec.
7237     //
7238     // Pattern1:
7239     //       call
7240     //       nop*
7241     //       ret
7242     //
7243     // Pattern2:
7244     //       call
7245     //       nop*
7246     //       pop
7247     //       nop*
7248     //       ret
7249     int    cntPop = 0;
7250     OPCODE nextOpcode;
7251
7252 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7253     do
7254     {
7255         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7256         codeAddrOfNextOpcode += sizeof(__int8);
7257     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
7258              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
7259              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
7260                                                                                          // one pop seen so far.
7261 #else
7262     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7263 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7264
7265     if (isCallPopAndRet)
7266     {
7267         // Allow call+pop+ret to be tail call optimized if caller ret type is void
7268         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
7269     }
7270
7271 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7272     // Jit64 Compat:
7273     // Tail call IL pattern could be either of the following
7274     // 1) call/callvirt/calli + ret
7275     // 2) call/callvirt/calli + pop + ret in a method returning void.
7276     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
7277 #else
7278     return (nextOpcode == CEE_RET) && (cntPop == 0);
7279 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7280 }
7281
7282 /*****************************************************************************
7283  *
7284  * Determine whether the call could be converted to an implicit tail call
7285  *
7286  */
7287 bool Compiler::impIsImplicitTailCallCandidate(
7288     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
7289 {
7290
7291 #if FEATURE_TAILCALL_OPT
7292     if (!opts.compTailCallOpt)
7293     {
7294         return false;
7295     }
7296
7297     if (opts.compDbgCode || opts.MinOpts())
7298     {
7299         return false;
7300     }
7301
7302     // must not be tail prefixed
7303     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
7304     {
7305         return false;
7306     }
7307
7308 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7309     // the block containing call is marked as BBJ_RETURN
7310     // We allow shared ret tail call optimization on recursive calls even under
7311     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
7312     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
7313         return false;
7314 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
7315
7316     // must be call+ret or call+pop+ret
7317     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
7318     {
7319         return false;
7320     }
7321
7322     return true;
7323 #else
7324     return false;
7325 #endif // FEATURE_TAILCALL_OPT
7326 }
7327
7328 //------------------------------------------------------------------------
7329 // impImportCall: import a call-inspiring opcode
7330 //
7331 // Arguments:
7332 //    opcode                    - opcode that inspires the call
7333 //    pResolvedToken            - resolved token for the call target
7334 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
7335 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
7336 //    prefixFlags               - IL prefix flags for the call
7337 //    callInfo                  - EE supplied info for the call
7338 //    rawILOffset               - IL offset of the opcode
7339 //
7340 // Returns:
7341 //    Type of the call's return value.
7342 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
7343 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
7344 //
7345 //
7346 // Notes:
7347 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
7348 //
7349 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
7350 //    uninitalized object.
7351
7352 #ifdef _PREFAST_
7353 #pragma warning(push)
7354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
7355 #endif
7356
7357 var_types Compiler::impImportCall(OPCODE                  opcode,
7358                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
7359                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
7360                                   GenTree*                newobjThis,
7361                                   int                     prefixFlags,
7362                                   CORINFO_CALL_INFO*      callInfo,
7363                                   IL_OFFSET               rawILOffset)
7364 {
7365     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
7366
7367     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
7368     var_types              callRetTyp                     = TYP_COUNT;
7369     CORINFO_SIG_INFO*      sig                            = nullptr;
7370     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
7371     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
7372     unsigned               clsFlags                       = 0;
7373     unsigned               mflags                         = 0;
7374     unsigned               argFlags                       = 0;
7375     GenTree*               call                           = nullptr;
7376     GenTreeArgList*        args                           = nullptr;
7377     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
7378     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
7379     bool                   exactContextNeedsRuntimeLookup = false;
7380     bool                   canTailCall                    = true;
7381     const char*            szCanTailCallFailReason        = nullptr;
7382     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
7383     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
7384
7385     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
7386
7387     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
7388     // do that before tailcalls, but that is probably not the intended
7389     // semantic. So just disallow tailcalls from synchronized methods.
7390     // Also, popping arguments in a varargs function is more work and NYI
7391     // If we have a security object, we have to keep our frame around for callers
7392     // to see any imperative security.
7393     if (info.compFlags & CORINFO_FLG_SYNCH)
7394     {
7395         canTailCall             = false;
7396         szCanTailCallFailReason = "Caller is synchronized";
7397     }
7398 #if !FEATURE_FIXED_OUT_ARGS
7399     else if (info.compIsVarArgs)
7400     {
7401         canTailCall             = false;
7402         szCanTailCallFailReason = "Caller is varargs";
7403     }
7404 #endif // FEATURE_FIXED_OUT_ARGS
7405     else if (opts.compNeedSecurityCheck)
7406     {
7407         canTailCall             = false;
7408         szCanTailCallFailReason = "Caller requires a security check.";
7409     }
7410
7411     // We only need to cast the return value of pinvoke inlined calls that return small types
7412
7413     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
7414     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
7415     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
7416     // the time being that the callee might be compiled by the other JIT and thus the return
7417     // value will need to be widened by us (or not widened at all...)
7418
7419     // ReadyToRun code sticks with default calling convention that does not widen small return types.
7420
7421     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
7422     bool bIntrinsicImported = false;
7423
7424     CORINFO_SIG_INFO calliSig;
7425     GenTreeArgList*  extraArg = nullptr;
7426
7427     /*-------------------------------------------------------------------------
7428      * First create the call node
7429      */
7430
7431     if (opcode == CEE_CALLI)
7432     {
7433         if (IsTargetAbi(CORINFO_CORERT_ABI))
7434         {
7435             // See comment in impCheckForPInvokeCall
7436             BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7437             if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
7438             {
7439                 eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
7440                 return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
7441             }
7442         }
7443
7444         /* Get the call site sig */
7445         eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
7446
7447         callRetTyp = JITtype2varType(calliSig.retType);
7448
7449         call = impImportIndirectCall(&calliSig, ilOffset);
7450
7451         // We don't know the target method, so we have to infer the flags, or
7452         // assume the worst-case.
7453         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7454
7455 #ifdef DEBUG
7456         if (verbose)
7457         {
7458             unsigned structSize =
7459                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7460             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7461                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7462         }
7463 #endif
7464         // This should be checked in impImportBlockCode.
7465         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7466
7467         sig = &calliSig;
7468
7469 #ifdef DEBUG
7470         // We cannot lazily obtain the signature of a CALLI call because it has no method
7471         // handle that we can use, so we need to save its full call signature here.
7472         assert(call->gtCall.callSig == nullptr);
7473         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7474         *call->gtCall.callSig = calliSig;
7475 #endif // DEBUG
7476
7477         if (IsTargetAbi(CORINFO_CORERT_ABI))
7478         {
7479             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7480                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7481                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7482                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7483             if (managedCall)
7484             {
7485                 addFatPointerCandidate(call->AsCall());
7486             }
7487         }
7488     }
7489     else // (opcode != CEE_CALLI)
7490     {
7491         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7492
7493         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7494         // supply the instantiation parameters necessary to make direct calls to underlying
7495         // shared generic code, rather than calling through instantiating stubs.  If the
7496         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7497         // must indeed pass an instantiation parameter.
7498
7499         methHnd = callInfo->hMethod;
7500
7501         sig        = &(callInfo->sig);
7502         callRetTyp = JITtype2varType(sig->retType);
7503
7504         mflags = callInfo->methodFlags;
7505
7506 #ifdef DEBUG
7507         if (verbose)
7508         {
7509             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7510             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7511                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7512         }
7513 #endif
7514         if (compIsForInlining())
7515         {
7516             /* Does this call site have security boundary restrictions? */
7517
7518             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7519             {
7520                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7521                 return TYP_UNDEF;
7522             }
7523
7524             /* Does the inlinee need a security check token on the frame */
7525
7526             if (mflags & CORINFO_FLG_SECURITYCHECK)
7527             {
7528                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7529                 return TYP_UNDEF;
7530             }
7531
7532             /* Does the inlinee use StackCrawlMark */
7533
7534             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7535             {
7536                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7537                 return TYP_UNDEF;
7538             }
7539
7540             /* For now ignore delegate invoke */
7541
7542             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7543             {
7544                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7545                 return TYP_UNDEF;
7546             }
7547
7548             /* For now ignore varargs */
7549             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7550             {
7551                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7552                 return TYP_UNDEF;
7553             }
7554
7555             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7556             {
7557                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7558                 return TYP_UNDEF;
7559             }
7560
7561             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7562             {
7563                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7564                 return TYP_UNDEF;
7565             }
7566         }
7567
7568         clsHnd = pResolvedToken->hClass;
7569
7570         clsFlags = callInfo->classFlags;
7571
7572 #ifdef DEBUG
7573         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7574
7575         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7576         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7577         const char* modName;
7578         const char* className;
7579         const char* methodName;
7580         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7581             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7582             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7583         {
7584             return impImportJitTestLabelMark(sig->numArgs);
7585         }
7586 #endif // DEBUG
7587
7588         // <NICE> Factor this into getCallInfo </NICE>
7589         bool isSpecialIntrinsic = false;
7590         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7591         {
7592             const bool isTail = canTailCall && (tailCall != 0);
7593
7594             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7595                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7596
7597             if (compDonotInline())
7598             {
7599                 return TYP_UNDEF;
7600             }
7601
7602             if (call != nullptr)
7603             {
7604                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7605                        (clsFlags & CORINFO_FLG_FINAL));
7606
7607 #ifdef FEATURE_READYTORUN_COMPILER
7608                 if (call->OperGet() == GT_INTRINSIC)
7609                 {
7610                     if (opts.IsReadyToRun())
7611                     {
7612                         noway_assert(callInfo->kind == CORINFO_CALL);
7613                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7614                     }
7615                     else
7616                     {
7617                         call->gtIntrinsic.gtEntryPoint.addr       = nullptr;
7618                         call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7619                     }
7620                 }
7621 #endif
7622
7623                 bIntrinsicImported = true;
7624                 goto DONE_CALL;
7625             }
7626         }
7627
7628 #ifdef FEATURE_SIMD
7629         if (featureSIMD)
7630         {
7631             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7632             if (call != nullptr)
7633             {
7634                 bIntrinsicImported = true;
7635                 goto DONE_CALL;
7636             }
7637         }
7638 #endif // FEATURE_SIMD
7639
7640         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7641         {
7642             NO_WAY("Virtual call to a function added via EnC is not supported");
7643         }
7644
7645         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7646             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7647             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7648         {
7649             BADCODE("Bad calling convention");
7650         }
7651
7652         //-------------------------------------------------------------------------
7653         //  Construct the call node
7654         //
7655         // Work out what sort of call we're making.
7656         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7657
7658         constraintCallThisTransform    = callInfo->thisTransform;
7659         exactContextHnd                = callInfo->contextHandle;
7660         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7661
7662         // Recursive call is treated as a loop to the begining of the method.
7663         if (gtIsRecursiveCall(methHnd))
7664         {
7665 #ifdef DEBUG
7666             if (verbose)
7667             {
7668                 JITDUMP("\nFound recursive call in the method. Mark " FMT_BB " to " FMT_BB
7669                         " as having a backward branch.\n",
7670                         fgFirstBB->bbNum, compCurBB->bbNum);
7671             }
7672 #endif
7673             fgMarkBackwardJump(fgFirstBB, compCurBB);
7674         }
7675
7676         switch (callInfo->kind)
7677         {
7678
7679             case CORINFO_VIRTUALCALL_STUB:
7680             {
7681                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7682                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7683                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7684                 {
7685
7686                     if (compIsForInlining())
7687                     {
7688                         // Don't import runtime lookups when inlining
7689                         // Inlining has to be aborted in such a case
7690                         /* XXX Fri 3/20/2009
7691                          * By the way, this would never succeed.  If the handle lookup is into the generic
7692                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7693                          * inlined code will crash.
7694                          *
7695                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7696                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7697                          * failing here.
7698                          */
7699                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7700                         return TYP_UNDEF;
7701                     }
7702
7703                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7704                     assert(!compDonotInline());
7705
7706                     // This is the rough code to set up an indirect stub call
7707                     assert(stubAddr != nullptr);
7708
7709                     // The stubAddr may be a
7710                     // complex expression. As it is evaluated after the args,
7711                     // it may cause registered args to be spilled. Simply spill it.
7712
7713                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7714                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7715                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7716
7717                     // Create the actual call node
7718
7719                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7720                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7721
7722                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7723
7724                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7725                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7726
7727 #ifdef _TARGET_X86_
7728                     // No tailcalls allowed for these yet...
7729                     canTailCall             = false;
7730                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7731 #endif
7732                 }
7733                 else
7734                 {
7735                     // ok, the stub is available at compile type.
7736
7737                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7738                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7739                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7740                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7741                            callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7742                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7743                     {
7744                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7745                     }
7746                 }
7747
7748 #ifdef FEATURE_READYTORUN_COMPILER
7749                 if (opts.IsReadyToRun())
7750                 {
7751                     // Null check is sometimes needed for ready to run to handle
7752                     // non-virtual <-> virtual changes between versions
7753                     if (callInfo->nullInstanceCheck)
7754                     {
7755                         call->gtFlags |= GTF_CALL_NULLCHECK;
7756                     }
7757                 }
7758 #endif
7759
7760                 break;
7761             }
7762
7763             case CORINFO_VIRTUALCALL_VTABLE:
7764             {
7765                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7766                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7767                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7768                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7769                 break;
7770             }
7771
7772             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7773             {
7774                 if (compIsForInlining())
7775                 {
7776                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7777                     return TYP_UNDEF;
7778                 }
7779
7780                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7781                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7782                 // OK, We've been told to call via LDVIRTFTN, so just
7783                 // take the call now....
7784
7785                 args = impPopList(sig->numArgs, sig);
7786
7787                 GenTree* thisPtr = impPopStack().val;
7788                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7789                 assert(thisPtr != nullptr);
7790
7791                 // Clone the (possibly transformed) "this" pointer
7792                 GenTree* thisPtrCopy;
7793                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7794                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7795
7796                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7797                 assert(fptr != nullptr);
7798
7799                 thisPtr = nullptr; // can't reuse it
7800
7801                 // Now make an indirect call through the function pointer
7802
7803                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7804                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7805                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7806
7807                 // Create the actual call node
7808
7809                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7810                 call->gtCall.gtCallObjp = thisPtrCopy;
7811                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7812
7813                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7814                 {
7815                     // CoreRT generic virtual method: need to handle potential fat function pointers
7816                     addFatPointerCandidate(call->AsCall());
7817                 }
7818 #ifdef FEATURE_READYTORUN_COMPILER
7819                 if (opts.IsReadyToRun())
7820                 {
7821                     // Null check is needed for ready to run to handle
7822                     // non-virtual <-> virtual changes between versions
7823                     call->gtFlags |= GTF_CALL_NULLCHECK;
7824                 }
7825 #endif
7826
7827                 // Sine we are jumping over some code, check that its OK to skip that code
7828                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7829                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7830                 goto DONE;
7831             }
7832
7833             case CORINFO_CALL:
7834             {
7835                 // This is for a non-virtual, non-interface etc. call
7836                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7837
7838                 // We remove the nullcheck for the GetType call instrinsic.
7839                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7840                 // and instrinsics.
7841                 if (callInfo->nullInstanceCheck &&
7842                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7843                 {
7844                     call->gtFlags |= GTF_CALL_NULLCHECK;
7845                 }
7846
7847 #ifdef FEATURE_READYTORUN_COMPILER
7848                 if (opts.IsReadyToRun())
7849                 {
7850                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7851                 }
7852 #endif
7853                 break;
7854             }
7855
7856             case CORINFO_CALL_CODE_POINTER:
7857             {
7858                 // The EE has asked us to call by computing a code pointer and then doing an
7859                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7860
7861                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7862                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7863
7864                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7865                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7866
7867                 GenTree* fptr =
7868                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7869
7870                 if (compDonotInline())
7871                 {
7872                     return TYP_UNDEF;
7873                 }
7874
7875                 // Now make an indirect call through the function pointer
7876
7877                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7878                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7879                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7880
7881                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7882                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7883                 if (callInfo->nullInstanceCheck)
7884                 {
7885                     call->gtFlags |= GTF_CALL_NULLCHECK;
7886                 }
7887
7888                 break;
7889             }
7890
7891             default:
7892                 assert(!"unknown call kind");
7893                 break;
7894         }
7895
7896         //-------------------------------------------------------------------------
7897         // Set more flags
7898
7899         PREFIX_ASSUME(call != nullptr);
7900
7901         if (mflags & CORINFO_FLG_NOGCCHECK)
7902         {
7903             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7904         }
7905
7906         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7907         if (isSpecialIntrinsic)
7908         {
7909             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7910         }
7911     }
7912     assert(sig);
7913     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7914
7915     /* Some sanity checks */
7916
7917     // CALL_VIRT and NEWOBJ must have a THIS pointer
7918     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7919     // static bit and hasThis are negations of one another
7920     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7921     assert(call != nullptr);
7922
7923     /*-------------------------------------------------------------------------
7924      * Check special-cases etc
7925      */
7926
7927     /* Special case - Check if it is a call to Delegate.Invoke(). */
7928
7929     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7930     {
7931         assert(!compIsForInlining());
7932         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7933         assert(mflags & CORINFO_FLG_FINAL);
7934
7935         /* Set the delegate flag */
7936         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7937
7938         if (callInfo->secureDelegateInvoke)
7939         {
7940             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7941         }
7942
7943         if (opcode == CEE_CALLVIRT)
7944         {
7945             assert(mflags & CORINFO_FLG_FINAL);
7946
7947             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7948             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7949             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7950         }
7951     }
7952
7953     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7954     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7955     if (varTypeIsStruct(callRetTyp))
7956     {
7957         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7958         call->gtType = callRetTyp;
7959     }
7960
7961 #if !FEATURE_VARARG
7962     /* Check for varargs */
7963     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7964         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7965     {
7966         BADCODE("Varargs not supported.");
7967     }
7968 #endif // !FEATURE_VARARG
7969
7970 #ifdef UNIX_X86_ABI
7971     if (call->gtCall.callSig == nullptr)
7972     {
7973         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7974         *call->gtCall.callSig = *sig;
7975     }
7976 #endif // UNIX_X86_ABI
7977
7978     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7979         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7980     {
7981         assert(!compIsForInlining());
7982
7983         /* Set the right flags */
7984
7985         call->gtFlags |= GTF_CALL_POP_ARGS;
7986         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7987
7988         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7989            will be expecting to pop a certain number of arguments, but if we
7990            tailcall to a function with a different number of arguments, we
7991            are hosed. There are ways around this (caller remembers esp value,
7992            varargs is not caller-pop, etc), but not worth it. */
7993         CLANG_FORMAT_COMMENT_ANCHOR;
7994
7995 #ifdef _TARGET_X86_
7996         if (canTailCall)
7997         {
7998             canTailCall             = false;
7999             szCanTailCallFailReason = "Callee is varargs";
8000         }
8001 #endif
8002
8003         /* Get the total number of arguments - this is already correct
8004          * for CALLI - for methods we have to get it from the call site */
8005
8006         if (opcode != CEE_CALLI)
8007         {
8008 #ifdef DEBUG
8009             unsigned numArgsDef = sig->numArgs;
8010 #endif
8011             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8012
8013 #ifdef DEBUG
8014             // We cannot lazily obtain the signature of a vararg call because using its method
8015             // handle will give us only the declared argument list, not the full argument list.
8016             assert(call->gtCall.callSig == nullptr);
8017             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8018             *call->gtCall.callSig = *sig;
8019 #endif
8020
8021             // For vararg calls we must be sure to load the return type of the
8022             // method actually being called, as well as the return types of the
8023             // specified in the vararg signature. With type equivalency, these types
8024             // may not be the same.
8025             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
8026             {
8027                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
8028                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
8029                     sig->retType != CORINFO_TYPE_VAR)
8030                 {
8031                     // Make sure that all valuetypes (including enums) that we push are loaded.
8032                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
8033                     // all valuetypes in the method signature are already loaded.
8034                     // We need to be able to find the size of the valuetypes, but we cannot
8035                     // do a class-load from within GC.
8036                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
8037                 }
8038             }
8039
8040             assert(numArgsDef <= sig->numArgs);
8041         }
8042
8043         /* We will have "cookie" as the last argument but we cannot push
8044          * it on the operand stack because we may overflow, so we append it
8045          * to the arg list next after we pop them */
8046     }
8047
8048     if (mflags & CORINFO_FLG_SECURITYCHECK)
8049     {
8050         assert(!compIsForInlining());
8051
8052         // Need security prolog/epilog callouts when there is
8053         // imperative security in the method. This is to give security a
8054         // chance to do any setup in the prolog and cleanup in the epilog if needed.
8055
8056         if (compIsForInlining())
8057         {
8058             // Cannot handle this if the method being imported is an inlinee by itself.
8059             // Because inlinee method does not have its own frame.
8060
8061             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
8062             return TYP_UNDEF;
8063         }
8064         else
8065         {
8066             tiSecurityCalloutNeeded = true;
8067
8068             // If the current method calls a method which needs a security check,
8069             // (i.e. the method being compiled has imperative security)
8070             // we need to reserve a slot for the security object in
8071             // the current method's stack frame
8072             opts.compNeedSecurityCheck = true;
8073         }
8074     }
8075
8076     //--------------------------- Inline NDirect ------------------------------
8077
8078     // For inline cases we technically should look at both the current
8079     // block and the call site block (or just the latter if we've
8080     // fused the EH trees). However the block-related checks pertain to
8081     // EH and we currently won't inline a method with EH. So for
8082     // inlinees, just checking the call site block is sufficient.
8083     {
8084         // New lexical block here to avoid compilation errors because of GOTOs.
8085         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
8086         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
8087     }
8088
8089     if (call->gtFlags & GTF_CALL_UNMANAGED)
8090     {
8091         // We set up the unmanaged call by linking the frame, disabling GC, etc
8092         // This needs to be cleaned up on return
8093         if (canTailCall)
8094         {
8095             canTailCall             = false;
8096             szCanTailCallFailReason = "Callee is native";
8097         }
8098
8099         checkForSmallType = true;
8100
8101         impPopArgsForUnmanagedCall(call, sig);
8102
8103         goto DONE;
8104     }
8105     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
8106                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
8107                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
8108                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
8109     {
8110         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
8111         {
8112             // Normally this only happens with inlining.
8113             // However, a generic method (or type) being NGENd into another module
8114             // can run into this issue as well.  There's not an easy fall-back for NGEN
8115             // so instead we fallback to JIT.
8116             if (compIsForInlining())
8117             {
8118                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
8119             }
8120             else
8121             {
8122                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
8123             }
8124
8125             return TYP_UNDEF;
8126         }
8127
8128         GenTree* cookie = eeGetPInvokeCookie(sig);
8129
8130         // This cookie is required to be either a simple GT_CNS_INT or
8131         // an indirection of a GT_CNS_INT
8132         //
8133         GenTree* cookieConst = cookie;
8134         if (cookie->gtOper == GT_IND)
8135         {
8136             cookieConst = cookie->gtOp.gtOp1;
8137         }
8138         assert(cookieConst->gtOper == GT_CNS_INT);
8139
8140         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
8141         // we won't allow this tree to participate in any CSE logic
8142         //
8143         cookie->gtFlags |= GTF_DONT_CSE;
8144         cookieConst->gtFlags |= GTF_DONT_CSE;
8145
8146         call->gtCall.gtCallCookie = cookie;
8147
8148         if (canTailCall)
8149         {
8150             canTailCall             = false;
8151             szCanTailCallFailReason = "PInvoke calli";
8152         }
8153     }
8154
8155     /*-------------------------------------------------------------------------
8156      * Create the argument list
8157      */
8158
8159     //-------------------------------------------------------------------------
8160     // Special case - for varargs we have an implicit last argument
8161
8162     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
8163     {
8164         assert(!compIsForInlining());
8165
8166         void *varCookie, *pVarCookie;
8167         if (!info.compCompHnd->canGetVarArgsHandle(sig))
8168         {
8169             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
8170             return TYP_UNDEF;
8171         }
8172
8173         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
8174         assert((!varCookie) != (!pVarCookie));
8175         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
8176
8177         assert(extraArg == nullptr);
8178         extraArg = gtNewArgList(cookie);
8179     }
8180
8181     //-------------------------------------------------------------------------
8182     // Extra arg for shared generic code and array methods
8183     //
8184     // Extra argument containing instantiation information is passed in the
8185     // following circumstances:
8186     // (a) To the "Address" method on array classes; the extra parameter is
8187     //     the array's type handle (a TypeDesc)
8188     // (b) To shared-code instance methods in generic structs; the extra parameter
8189     //     is the struct's type handle (a vtable ptr)
8190     // (c) To shared-code per-instantiation non-generic static methods in generic
8191     //     classes and structs; the extra parameter is the type handle
8192     // (d) To shared-code generic methods; the extra parameter is an
8193     //     exact-instantiation MethodDesc
8194     //
8195     // We also set the exact type context associated with the call so we can
8196     // inline the call correctly later on.
8197
8198     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
8199     {
8200         assert(call->gtCall.gtCallType == CT_USER_FUNC);
8201         if (clsHnd == nullptr)
8202         {
8203             NO_WAY("CALLI on parameterized type");
8204         }
8205
8206         assert(opcode != CEE_CALLI);
8207
8208         GenTree* instParam;
8209         BOOL     runtimeLookup;
8210
8211         // Instantiated generic method
8212         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
8213         {
8214             CORINFO_METHOD_HANDLE exactMethodHandle =
8215                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8216
8217             if (!exactContextNeedsRuntimeLookup)
8218             {
8219 #ifdef FEATURE_READYTORUN_COMPILER
8220                 if (opts.IsReadyToRun())
8221                 {
8222                     instParam =
8223                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
8224                     if (instParam == nullptr)
8225                     {
8226                         assert(compDonotInline());
8227                         return TYP_UNDEF;
8228                     }
8229                 }
8230                 else
8231 #endif
8232                 {
8233                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
8234                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
8235                 }
8236             }
8237             else
8238             {
8239                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8240                 if (instParam == nullptr)
8241                 {
8242                     assert(compDonotInline());
8243                     return TYP_UNDEF;
8244                 }
8245             }
8246         }
8247
8248         // otherwise must be an instance method in a generic struct,
8249         // a static method in a generic type, or a runtime-generated array method
8250         else
8251         {
8252             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
8253             CORINFO_CLASS_HANDLE exactClassHandle =
8254                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8255
8256             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
8257             {
8258                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
8259                 return TYP_UNDEF;
8260             }
8261
8262             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
8263             {
8264                 // We indicate "readonly" to the Address operation by using a null
8265                 // instParam.
8266                 instParam = gtNewIconNode(0, TYP_REF);
8267             }
8268             else if (!exactContextNeedsRuntimeLookup)
8269             {
8270 #ifdef FEATURE_READYTORUN_COMPILER
8271                 if (opts.IsReadyToRun())
8272                 {
8273                     instParam =
8274                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
8275                     if (instParam == nullptr)
8276                     {
8277                         assert(compDonotInline());
8278                         return TYP_UNDEF;
8279                     }
8280                 }
8281                 else
8282 #endif
8283                 {
8284                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
8285                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
8286                 }
8287             }
8288             else
8289             {
8290                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
8291                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
8292                 // because pResolvedToken is an interface method and interface types make a poor generic context.
8293                 if (pConstrainedResolvedToken)
8294                 {
8295                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
8296                                                  FALSE /* importParent */);
8297                 }
8298                 else
8299                 {
8300                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8301                 }
8302
8303                 if (instParam == nullptr)
8304                 {
8305                     assert(compDonotInline());
8306                     return TYP_UNDEF;
8307                 }
8308             }
8309         }
8310
8311         assert(extraArg == nullptr);
8312         extraArg = gtNewArgList(instParam);
8313     }
8314
8315     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
8316     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
8317     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
8318     // exactContextHnd is not currently required when inlining shared generic code into shared
8319     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
8320     // (e.g. anything marked needsRuntimeLookup)
8321     if (exactContextNeedsRuntimeLookup)
8322     {
8323         exactContextHnd = nullptr;
8324     }
8325
8326     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
8327     {
8328         // Only verifiable cases are supported.
8329         // dup; ldvirtftn; newobj; or ldftn; newobj.
8330         // IL test could contain unverifiable sequence, in this case optimization should not be done.
8331         if (impStackHeight() > 0)
8332         {
8333             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
8334             if (delegateTypeInfo.IsToken())
8335             {
8336                 ldftnToken = delegateTypeInfo.GetToken();
8337             }
8338         }
8339     }
8340
8341     //-------------------------------------------------------------------------
8342     // The main group of arguments
8343
8344     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
8345
8346     if (args)
8347     {
8348         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
8349     }
8350
8351     //-------------------------------------------------------------------------
8352     // The "this" pointer
8353
8354     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
8355     {
8356         GenTree* obj;
8357
8358         if (opcode == CEE_NEWOBJ)
8359         {
8360             obj = newobjThis;
8361         }
8362         else
8363         {
8364             obj = impPopStack().val;
8365             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
8366             if (compDonotInline())
8367             {
8368                 return TYP_UNDEF;
8369             }
8370         }
8371
8372         // Store the "this" value in the call
8373         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
8374         call->gtCall.gtCallObjp = obj;
8375
8376         // Is this a virtual or interface call?
8377         if (call->gtCall.IsVirtual())
8378         {
8379             // only true object pointers can be virtual
8380             assert(obj->gtType == TYP_REF);
8381
8382             // See if we can devirtualize.
8383             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
8384                                 &exactContextHnd);
8385         }
8386
8387         if (impIsThis(obj))
8388         {
8389             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
8390         }
8391     }
8392
8393     //-------------------------------------------------------------------------
8394     // The "this" pointer for "newobj"
8395
8396     if (opcode == CEE_NEWOBJ)
8397     {
8398         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
8399         {
8400             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
8401             // This is a 'new' of a variable sized object, wher
8402             // the constructor is to return the object.  In this case
8403             // the constructor claims to return VOID but we know it
8404             // actually returns the new object
8405             assert(callRetTyp == TYP_VOID);
8406             callRetTyp   = TYP_REF;
8407             call->gtType = TYP_REF;
8408             impSpillSpecialSideEff();
8409
8410             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
8411         }
8412         else
8413         {
8414             if (clsFlags & CORINFO_FLG_DELEGATE)
8415             {
8416                 // New inliner morph it in impImportCall.
8417                 // This will allow us to inline the call to the delegate constructor.
8418                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
8419             }
8420
8421             if (!bIntrinsicImported)
8422             {
8423
8424 #if defined(DEBUG) || defined(INLINE_DATA)
8425
8426                 // Keep track of the raw IL offset of the call
8427                 call->gtCall.gtRawILOffset = rawILOffset;
8428
8429 #endif // defined(DEBUG) || defined(INLINE_DATA)
8430
8431                 // Is it an inline candidate?
8432                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8433             }
8434
8435             // append the call node.
8436             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8437
8438             // Now push the value of the 'new onto the stack
8439
8440             // This is a 'new' of a non-variable sized object.
8441             // Append the new node (op1) to the statement list,
8442             // and then push the local holding the value of this
8443             // new instruction on the stack.
8444
8445             if (clsFlags & CORINFO_FLG_VALUECLASS)
8446             {
8447                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8448
8449                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8450                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8451             }
8452             else
8453             {
8454                 if (newobjThis->gtOper == GT_COMMA)
8455                 {
8456                     // In coreclr the callout can be inserted even if verification is disabled
8457                     // so we cannot rely on tiVerificationNeeded alone
8458
8459                     // We must have inserted the callout. Get the real newobj.
8460                     newobjThis = newobjThis->gtOp.gtOp2;
8461                 }
8462
8463                 assert(newobjThis->gtOper == GT_LCL_VAR);
8464                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8465             }
8466         }
8467         return callRetTyp;
8468     }
8469
8470 DONE:
8471
8472     if (tailCall)
8473     {
8474         // This check cannot be performed for implicit tail calls for the reason
8475         // that impIsImplicitTailCallCandidate() is not checking whether return
8476         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8477         // As a result it is possible that in the following case, we find that
8478         // the type stack is non-empty if Callee() is considered for implicit
8479         // tail calling.
8480         //      int Caller(..) { .... void Callee(); ret val; ... }
8481         //
8482         // Note that we cannot check return type compatibility before ImpImportCall()
8483         // as we don't have required info or need to duplicate some of the logic of
8484         // ImpImportCall().
8485         //
8486         // For implicit tail calls, we perform this check after return types are
8487         // known to be compatible.
8488         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8489         {
8490             BADCODE("Stack should be empty after tailcall");
8491         }
8492
8493         // Note that we can not relax this condition with genActualType() as
8494         // the calling convention dictates that the caller of a function with
8495         // a small-typed return value is responsible for normalizing the return val
8496
8497         if (canTailCall &&
8498             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8499                                           callInfo->sig.retTypeClass))
8500         {
8501             canTailCall             = false;
8502             szCanTailCallFailReason = "Return types are not tail call compatible";
8503         }
8504
8505         // Stack empty check for implicit tail calls.
8506         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8507         {
8508 #ifdef _TARGET_AMD64_
8509             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8510             // in JIT64, not an InvalidProgramException.
8511             Verify(false, "Stack should be empty after tailcall");
8512 #else  // _TARGET_64BIT_
8513             BADCODE("Stack should be empty after tailcall");
8514 #endif //!_TARGET_64BIT_
8515         }
8516
8517         // assert(compCurBB is not a catch, finally or filter block);
8518         // assert(compCurBB is not a try block protected by a finally block);
8519
8520         // Check for permission to tailcall
8521         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8522
8523         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8524
8525         if (canTailCall)
8526         {
8527             // True virtual or indirect calls, shouldn't pass in a callee handle.
8528             CORINFO_METHOD_HANDLE exactCalleeHnd =
8529                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8530             GenTree* thisArg = call->gtCall.gtCallObjp;
8531
8532             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8533             {
8534                 canTailCall = true;
8535                 if (explicitTailCall)
8536                 {
8537                     // In case of explicit tail calls, mark it so that it is not considered
8538                     // for in-lining.
8539                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8540 #ifdef DEBUG
8541                     if (verbose)
8542                     {
8543                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8544                         printTreeID(call);
8545                         printf("\n");
8546                     }
8547 #endif
8548                 }
8549                 else
8550                 {
8551 #if FEATURE_TAILCALL_OPT
8552                     // Must be an implicit tail call.
8553                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8554
8555                     // It is possible that a call node is both an inline candidate and marked
8556                     // for opportunistic tail calling.  In-lining happens before morhphing of
8557                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8558                     // reason, it will survive to the morphing stage at which point it will be
8559                     // transformed into a tail call after performing additional checks.
8560
8561                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8562 #ifdef DEBUG
8563                     if (verbose)
8564                     {
8565                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8566                         printTreeID(call);
8567                         printf("\n");
8568                     }
8569 #endif
8570
8571 #else //! FEATURE_TAILCALL_OPT
8572                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8573
8574 #endif // FEATURE_TAILCALL_OPT
8575                 }
8576
8577                 // we can't report success just yet...
8578             }
8579             else
8580             {
8581                 canTailCall = false;
8582 // canTailCall reported its reasons already
8583 #ifdef DEBUG
8584                 if (verbose)
8585                 {
8586                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8587                     printTreeID(call);
8588                     printf("\n");
8589                 }
8590 #endif
8591             }
8592         }
8593         else
8594         {
8595             // If this assert fires it means that canTailCall was set to false without setting a reason!
8596             assert(szCanTailCallFailReason != nullptr);
8597
8598 #ifdef DEBUG
8599             if (verbose)
8600             {
8601                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8602                 printTreeID(call);
8603                 printf(": %s\n", szCanTailCallFailReason);
8604             }
8605 #endif
8606             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8607                                                      szCanTailCallFailReason);
8608         }
8609     }
8610
8611     // Note: we assume that small return types are already normalized by the managed callee
8612     // or by the pinvoke stub for calls to unmanaged code.
8613
8614     if (!bIntrinsicImported)
8615     {
8616         //
8617         // Things needed to be checked when bIntrinsicImported is false.
8618         //
8619
8620         assert(call->gtOper == GT_CALL);
8621         assert(sig != nullptr);
8622
8623         // Tail calls require us to save the call site's sig info so we can obtain an argument
8624         // copying thunk from the EE later on.
8625         if (call->gtCall.callSig == nullptr)
8626         {
8627             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8628             *call->gtCall.callSig = *sig;
8629         }
8630
8631         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8632         {
8633             GenTree* callObj = call->gtCall.gtCallObjp;
8634             assert(callObj != nullptr);
8635
8636             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8637                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8638                                                                    impInlineInfo->inlArgInfo))
8639             {
8640                 impInlineInfo->thisDereferencedFirst = true;
8641             }
8642         }
8643
8644 #if defined(DEBUG) || defined(INLINE_DATA)
8645
8646         // Keep track of the raw IL offset of the call
8647         call->gtCall.gtRawILOffset = rawILOffset;
8648
8649 #endif // defined(DEBUG) || defined(INLINE_DATA)
8650
8651         // Is it an inline candidate?
8652         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8653     }
8654
8655 DONE_CALL:
8656     // Push or append the result of the call
8657     if (callRetTyp == TYP_VOID)
8658     {
8659         if (opcode == CEE_NEWOBJ)
8660         {
8661             // we actually did push something, so don't spill the thing we just pushed.
8662             assert(verCurrentState.esStackDepth > 0);
8663             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8664         }
8665         else
8666         {
8667             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8668         }
8669     }
8670     else
8671     {
8672         impSpillSpecialSideEff();
8673
8674         if (clsFlags & CORINFO_FLG_ARRAY)
8675         {
8676             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8677         }
8678
8679         // Find the return type used for verification by interpreting the method signature.
8680         // NB: we are clobbering the already established sig.
8681         if (tiVerificationNeeded)
8682         {
8683             // Actually, we never get the sig for the original method.
8684             sig = &(callInfo->verSig);
8685         }
8686
8687         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8688         tiRetVal.NormaliseForStack();
8689
8690         // The CEE_READONLY prefix modifies the verification semantics of an Address
8691         // operation on an array type.
8692         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8693         {
8694             tiRetVal.SetIsReadonlyByRef();
8695         }
8696
8697         if (tiVerificationNeeded)
8698         {
8699             // We assume all calls return permanent home byrefs. If they
8700             // didn't they wouldn't be verifiable. This is also covering
8701             // the Address() helper for multidimensional arrays.
8702             if (tiRetVal.IsByRef())
8703             {
8704                 tiRetVal.SetIsPermanentHomeByRef();
8705             }
8706         }
8707
8708         if (call->IsCall())
8709         {
8710             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8711
8712             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8713             if (varTypeIsStruct(callRetTyp))
8714             {
8715                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8716             }
8717
8718             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8719             {
8720                 assert(opts.OptEnabled(CLFLG_INLINING));
8721                 assert(!fatPointerCandidate); // We should not try to inline calli.
8722
8723                 // Make the call its own tree (spill the stack if needed).
8724                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8725
8726                 // TODO: Still using the widened type.
8727                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8728             }
8729             else
8730             {
8731                 if (fatPointerCandidate)
8732                 {
8733                     // fatPointer candidates should be in statements of the form call() or var = call().
8734                     // Such form allows to find statements with fat calls without walking through whole trees
8735                     // and removes problems with cutting trees.
8736                     assert(!bIntrinsicImported);
8737                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8738                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8739                     {
8740                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8741                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8742                         varDsc->lvVerTypeInfo = tiRetVal;
8743                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8744                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8745                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8746                         call           = gtNewLclvNode(calliSlot, type);
8747                     }
8748                 }
8749
8750                 // For non-candidates we must also spill, since we
8751                 // might have locals live on the eval stack that this
8752                 // call can modify.
8753                 //
8754                 // Suppress this for certain well-known call targets
8755                 // that we know won't modify locals, eg calls that are
8756                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8757                 // we may break key fragile pattern matches later on.
8758                 bool spillStack = true;
8759                 if (call->IsCall())
8760                 {
8761                     GenTreeCall* callNode = call->AsCall();
8762                     if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
8763                                                                 gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
8764                     {
8765                         spillStack = false;
8766                     }
8767                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8768                     {
8769                         spillStack = false;
8770                     }
8771                 }
8772
8773                 if (spillStack)
8774                 {
8775                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8776                 }
8777             }
8778         }
8779
8780         if (!bIntrinsicImported)
8781         {
8782             //-------------------------------------------------------------------------
8783             //
8784             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8785                 before returning.
8786                 However, we need to normalize small type values returned by unmanaged
8787                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8788                 if we use the shorter inlined pinvoke stub. */
8789
8790             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8791             {
8792                 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8793             }
8794         }
8795
8796         impPushOnStack(call, tiRetVal);
8797     }
8798
8799     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8800     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8801     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8802     //  callInfoCache.uncacheCallInfo();
8803
8804     return callRetTyp;
8805 }
8806 #ifdef _PREFAST_
8807 #pragma warning(pop)
8808 #endif
8809
8810 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8811 {
8812     CorInfoType corType = methInfo->args.retType;
8813
8814     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8815     {
8816         // We have some kind of STRUCT being returned
8817
8818         structPassingKind howToReturnStruct = SPK_Unknown;
8819
8820         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8821
8822         if (howToReturnStruct == SPK_ByReference)
8823         {
8824             return true;
8825         }
8826     }
8827
8828     return false;
8829 }
8830
8831 #ifdef DEBUG
8832 //
8833 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8834 {
8835     TestLabelAndNum tlAndN;
8836     if (numArgs == 2)
8837     {
8838         tlAndN.m_num  = 0;
8839         StackEntry se = impPopStack();
8840         assert(se.seTypeInfo.GetType() == TI_INT);
8841         GenTree* val = se.val;
8842         assert(val->IsCnsIntOrI());
8843         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8844     }
8845     else if (numArgs == 3)
8846     {
8847         StackEntry se = impPopStack();
8848         assert(se.seTypeInfo.GetType() == TI_INT);
8849         GenTree* val = se.val;
8850         assert(val->IsCnsIntOrI());
8851         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8852         se           = impPopStack();
8853         assert(se.seTypeInfo.GetType() == TI_INT);
8854         val = se.val;
8855         assert(val->IsCnsIntOrI());
8856         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8857     }
8858     else
8859     {
8860         assert(false);
8861     }
8862
8863     StackEntry expSe = impPopStack();
8864     GenTree*   node  = expSe.val;
8865
8866     // There are a small number of special cases, where we actually put the annotation on a subnode.
8867     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8868     {
8869         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8870         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8871         // offset within the the static field block whose address is returned by the helper call.
8872         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8873         GenTree* helperCall = nullptr;
8874         assert(node->OperGet() == GT_IND);
8875         tlAndN.m_num -= 100;
8876         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8877         GetNodeTestData()->Remove(node);
8878     }
8879     else
8880     {
8881         GetNodeTestData()->Set(node, tlAndN);
8882     }
8883
8884     impPushOnStack(node, expSe.seTypeInfo);
8885     return node->TypeGet();
8886 }
8887 #endif // DEBUG
8888
8889 //-----------------------------------------------------------------------------------
8890 //  impFixupCallStructReturn: For a call node that returns a struct type either
8891 //  adjust the return type to an enregisterable type, or set the flag to indicate
8892 //  struct return via retbuf arg.
8893 //
8894 //  Arguments:
8895 //    call       -  GT_CALL GenTree node
8896 //    retClsHnd  -  Class handle of return type of the call
8897 //
8898 //  Return Value:
8899 //    Returns new GenTree node after fixing struct return of call node
8900 //
8901 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8902 {
8903     if (!varTypeIsStruct(call))
8904     {
8905         return call;
8906     }
8907
8908     call->gtRetClsHnd = retClsHnd;
8909
8910 #if FEATURE_MULTIREG_RET
8911     // Initialize Return type descriptor of call node
8912     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8913     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8914 #endif // FEATURE_MULTIREG_RET
8915
8916 #ifdef UNIX_AMD64_ABI
8917
8918     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8919     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8920
8921     // The return type will remain as the incoming struct type unless normalized to a
8922     // single eightbyte return type below.
8923     call->gtReturnType = call->gtType;
8924
8925     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8926     if (retRegCount != 0)
8927     {
8928         if (retRegCount == 1)
8929         {
8930             // See if the struct size is smaller than the return
8931             // type size...
8932             if (retTypeDesc->IsEnclosingType())
8933             {
8934                 // If we know for sure this call will remain a call,
8935                 // retype and return value via a suitable temp.
8936                 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8937                 {
8938                     call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8939                     return impAssignSmallStructTypeToVar(call, retClsHnd);
8940                 }
8941             }
8942             else
8943             {
8944                 // Return type is same size as struct, so we can
8945                 // simply retype the call.
8946                 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8947             }
8948         }
8949         else
8950         {
8951             // must be a struct returned in two registers
8952             assert(retRegCount == 2);
8953
8954             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8955             {
8956                 // Force a call returning multi-reg struct to be always of the IR form
8957                 //   tmp = call
8958                 //
8959                 // No need to assign a multi-reg struct to a local var if:
8960                 //  - It is a tail call or
8961                 //  - The call is marked for in-lining later
8962                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8963             }
8964         }
8965     }
8966     else
8967     {
8968         // struct not returned in registers i.e returned via hiddden retbuf arg.
8969         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8970     }
8971
8972 #else // not UNIX_AMD64_ABI
8973
8974     // Check for TYP_STRUCT type that wraps a primitive type
8975     // Such structs are returned using a single register
8976     // and we change the return type on those calls here.
8977     //
8978     structPassingKind howToReturnStruct;
8979     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8980
8981     if (howToReturnStruct == SPK_ByReference)
8982     {
8983         assert(returnType == TYP_UNKNOWN);
8984         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8985     }
8986     else
8987     {
8988         assert(returnType != TYP_UNKNOWN);
8989
8990         // See if the struct size is smaller than the return
8991         // type size...
8992         if (howToReturnStruct == SPK_EnclosingType)
8993         {
8994             // If we know for sure this call will remain a call,
8995             // retype and return value via a suitable temp.
8996             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8997             {
8998                 call->gtReturnType = returnType;
8999                 return impAssignSmallStructTypeToVar(call, retClsHnd);
9000             }
9001         }
9002         else
9003         {
9004             // Return type is same size as struct, so we can
9005             // simply retype the call.
9006             call->gtReturnType = returnType;
9007         }
9008
9009         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
9010         if ((returnType == TYP_LONG) && (compLongUsed == false))
9011         {
9012             compLongUsed = true;
9013         }
9014         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
9015         {
9016             compFloatingPointUsed = true;
9017         }
9018
9019 #if FEATURE_MULTIREG_RET
9020         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
9021         assert(retRegCount != 0);
9022
9023         if (retRegCount >= 2)
9024         {
9025             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9026             {
9027                 // Force a call returning multi-reg struct to be always of the IR form
9028                 //   tmp = call
9029                 //
9030                 // No need to assign a multi-reg struct to a local var if:
9031                 //  - It is a tail call or
9032                 //  - The call is marked for in-lining later
9033                 return impAssignMultiRegTypeToVar(call, retClsHnd);
9034             }
9035         }
9036 #endif // FEATURE_MULTIREG_RET
9037     }
9038
9039 #endif // not UNIX_AMD64_ABI
9040
9041     return call;
9042 }
9043
9044 /*****************************************************************************
9045    For struct return values, re-type the operand in the case where the ABI
9046    does not use a struct return buffer
9047    Note that this method is only call for !_TARGET_X86_
9048  */
9049
9050 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
9051 {
9052     assert(varTypeIsStruct(info.compRetType));
9053     assert(info.compRetBuffArg == BAD_VAR_NUM);
9054
9055     JITDUMP("\nimpFixupStructReturnType: retyping\n");
9056     DISPTREE(op);
9057
9058 #if defined(_TARGET_XARCH_)
9059
9060 #ifdef UNIX_AMD64_ABI
9061     // No VarArgs for CoreCLR on x64 Unix
9062     assert(!info.compIsVarArgs);
9063
9064     // Is method returning a multi-reg struct?
9065     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
9066     {
9067         // In case of multi-reg struct return, we force IR to be one of the following:
9068         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
9069         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
9070
9071         if (op->gtOper == GT_LCL_VAR)
9072         {
9073             // Make sure that this struct stays in memory and doesn't get promoted.
9074             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
9075             lvaTable[lclNum].lvIsMultiRegRet = true;
9076
9077             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9078             op->gtFlags |= GTF_DONT_CSE;
9079
9080             return op;
9081         }
9082
9083         if (op->gtOper == GT_CALL)
9084         {
9085             return op;
9086         }
9087
9088         return impAssignMultiRegTypeToVar(op, retClsHnd);
9089     }
9090 #else  // !UNIX_AMD64_ABI
9091     assert(info.compRetNativeType != TYP_STRUCT);
9092 #endif // !UNIX_AMD64_ABI
9093
9094 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
9095
9096     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
9097     {
9098         if (op->gtOper == GT_LCL_VAR)
9099         {
9100             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
9101             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9102             // Make sure this struct type stays as struct so that we can return it as an HFA
9103             lvaTable[lclNum].lvIsMultiRegRet = true;
9104
9105             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9106             op->gtFlags |= GTF_DONT_CSE;
9107
9108             return op;
9109         }
9110
9111         if (op->gtOper == GT_CALL)
9112         {
9113             if (op->gtCall.IsVarargs())
9114             {
9115                 // We cannot tail call because control needs to return to fixup the calling
9116                 // convention for result return.
9117                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9118                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9119             }
9120             else
9121             {
9122                 return op;
9123             }
9124         }
9125         return impAssignMultiRegTypeToVar(op, retClsHnd);
9126     }
9127
9128 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
9129
9130     // Is method returning a multi-reg struct?
9131     if (IsMultiRegReturnedType(retClsHnd))
9132     {
9133         if (op->gtOper == GT_LCL_VAR)
9134         {
9135             // This LCL_VAR stays as a TYP_STRUCT
9136             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9137
9138             // Make sure this struct type is not struct promoted
9139             lvaTable[lclNum].lvIsMultiRegRet = true;
9140
9141             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9142             op->gtFlags |= GTF_DONT_CSE;
9143
9144             return op;
9145         }
9146
9147         if (op->gtOper == GT_CALL)
9148         {
9149             if (op->gtCall.IsVarargs())
9150             {
9151                 // We cannot tail call because control needs to return to fixup the calling
9152                 // convention for result return.
9153                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9154                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9155             }
9156             else
9157             {
9158                 return op;
9159             }
9160         }
9161         return impAssignMultiRegTypeToVar(op, retClsHnd);
9162     }
9163
9164 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
9165
9166 REDO_RETURN_NODE:
9167     // adjust the type away from struct to integral
9168     // and no normalizing
9169     if (op->gtOper == GT_LCL_VAR)
9170     {
9171         // It is possible that we now have a lclVar of scalar type.
9172         // If so, don't transform it to GT_LCL_FLD.
9173         if (varTypeIsStruct(lvaTable[op->AsLclVar()->gtLclNum].lvType))
9174         {
9175             op->ChangeOper(GT_LCL_FLD);
9176         }
9177     }
9178     else if (op->gtOper == GT_OBJ)
9179     {
9180         GenTree* op1 = op->AsObj()->Addr();
9181
9182         // We will fold away OBJ/ADDR
9183         // except for OBJ/ADDR/INDEX
9184         //     as the array type influences the array element's offset
9185         //     Later in this method we change op->gtType to info.compRetNativeType
9186         //     This is not correct when op is a GT_INDEX as the starting offset
9187         //     for the array elements 'elemOffs' is different for an array of
9188         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
9189         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
9190         //
9191         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
9192         {
9193             // Change '*(&X)' to 'X' and see if we can do better
9194             op = op1->gtOp.gtOp1;
9195             goto REDO_RETURN_NODE;
9196         }
9197         op->gtObj.gtClass = NO_CLASS_HANDLE;
9198         op->ChangeOperUnchecked(GT_IND);
9199         op->gtFlags |= GTF_IND_TGTANYWHERE;
9200     }
9201     else if (op->gtOper == GT_CALL)
9202     {
9203         if (op->AsCall()->TreatAsHasRetBufArg(this))
9204         {
9205             // This must be one of those 'special' helpers that don't
9206             // really have a return buffer, but instead use it as a way
9207             // to keep the trees cleaner with fewer address-taken temps.
9208             //
9209             // Well now we have to materialize the the return buffer as
9210             // an address-taken temp. Then we can return the temp.
9211             //
9212             // NOTE: this code assumes that since the call directly
9213             // feeds the return, then the call must be returning the
9214             // same structure/class/type.
9215             //
9216             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
9217
9218             // No need to spill anything as we're about to return.
9219             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
9220
9221             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
9222             // jump directly to a GT_LCL_FLD.
9223             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
9224             op->ChangeOper(GT_LCL_FLD);
9225         }
9226         else
9227         {
9228             // Don't change the gtType of the call just yet, it will get changed later.
9229             return op;
9230         }
9231     }
9232 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
9233     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
9234     {
9235         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
9236         // assert(op->gtType == info.compRetNativeType)
9237         if (op->gtType != info.compRetNativeType)
9238         {
9239             // Insert a register move to keep target type of SIMD intrinsic intact
9240             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
9241         }
9242     }
9243 #endif
9244     else if (op->gtOper == GT_COMMA)
9245     {
9246         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
9247     }
9248
9249     op->gtType = info.compRetNativeType;
9250
9251     JITDUMP("\nimpFixupStructReturnType: result of retyping is\n");
9252     DISPTREE(op);
9253
9254     return op;
9255 }
9256
9257 /*****************************************************************************
9258    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
9259    finally-protected try. We find the finally blocks protecting the current
9260    offset (in order) by walking over the complete exception table and
9261    finding enclosing clauses. This assumes that the table is sorted.
9262    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
9263
9264    If we are leaving a catch handler, we need to attach the
9265    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
9266
9267    After this function, the BBJ_LEAVE block has been converted to a different type.
9268  */
9269
9270 #if !FEATURE_EH_FUNCLETS
9271
9272 void Compiler::impImportLeave(BasicBlock* block)
9273 {
9274 #ifdef DEBUG
9275     if (verbose)
9276     {
9277         printf("\nBefore import CEE_LEAVE:\n");
9278         fgDispBasicBlocks();
9279         fgDispHandlerTab();
9280     }
9281 #endif // DEBUG
9282
9283     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9284     unsigned    blkAddr         = block->bbCodeOffs;
9285     BasicBlock* leaveTarget     = block->bbJumpDest;
9286     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9287
9288     // LEAVE clears the stack, spill side effects, and set stack to 0
9289
9290     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9291     verCurrentState.esStackDepth = 0;
9292
9293     assert(block->bbJumpKind == BBJ_LEAVE);
9294     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
9295
9296     BasicBlock* step         = DUMMY_INIT(NULL);
9297     unsigned    encFinallies = 0; // Number of enclosing finallies.
9298     GenTree*    endCatches   = NULL;
9299     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
9300
9301     unsigned  XTnum;
9302     EHblkDsc* HBtab;
9303
9304     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9305     {
9306         // Grab the handler offsets
9307
9308         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9309         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9310         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9311         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9312
9313         /* Is this a catch-handler we are CEE_LEAVEing out of?
9314          * If so, we need to call CORINFO_HELP_ENDCATCH.
9315          */
9316
9317         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9318         {
9319             // Can't CEE_LEAVE out of a finally/fault handler
9320             if (HBtab->HasFinallyOrFaultHandler())
9321                 BADCODE("leave out of fault/finally block");
9322
9323             // Create the call to CORINFO_HELP_ENDCATCH
9324             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
9325
9326             // Make a list of all the currently pending endCatches
9327             if (endCatches)
9328                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
9329             else
9330                 endCatches = endCatch;
9331
9332 #ifdef DEBUG
9333             if (verbose)
9334             {
9335                 printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
9336                        "CORINFO_HELP_ENDCATCH\n",
9337                        block->bbNum, XTnum);
9338             }
9339 #endif
9340         }
9341         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9342                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9343         {
9344             /* This is a finally-protected try we are jumping out of */
9345
9346             /* If there are any pending endCatches, and we have already
9347                jumped out of a finally-protected try, then the endCatches
9348                have to be put in a block in an outer try for async
9349                exceptions to work correctly.
9350                Else, just use append to the original block */
9351
9352             BasicBlock* callBlock;
9353
9354             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
9355
9356             if (encFinallies == 0)
9357             {
9358                 assert(step == DUMMY_INIT(NULL));
9359                 callBlock             = block;
9360                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9361
9362                 if (endCatches)
9363                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9364
9365 #ifdef DEBUG
9366                 if (verbose)
9367                 {
9368                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
9369                            "block %s\n",
9370                            callBlock->dspToString());
9371                 }
9372 #endif
9373             }
9374             else
9375             {
9376                 assert(step != DUMMY_INIT(NULL));
9377
9378                 /* Calling the finally block */
9379                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
9380                 assert(step->bbJumpKind == BBJ_ALWAYS);
9381                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9382                                               // finally in the chain)
9383                 step->bbJumpDest->bbRefs++;
9384
9385                 /* The new block will inherit this block's weight */
9386                 callBlock->setBBWeight(block->bbWeight);
9387                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9388
9389 #ifdef DEBUG
9390                 if (verbose)
9391                 {
9392                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
9393                            callBlock->dspToString());
9394                 }
9395 #endif
9396
9397                 GenTree* lastStmt;
9398
9399                 if (endCatches)
9400                 {
9401                     lastStmt         = gtNewStmt(endCatches);
9402                     endLFin->gtNext  = lastStmt;
9403                     lastStmt->gtPrev = endLFin;
9404                 }
9405                 else
9406                 {
9407                     lastStmt = endLFin;
9408                 }
9409
9410                 // note that this sets BBF_IMPORTED on the block
9411                 impEndTreeList(callBlock, endLFin, lastStmt);
9412             }
9413
9414             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9415             /* The new block will inherit this block's weight */
9416             step->setBBWeight(block->bbWeight);
9417             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9418
9419 #ifdef DEBUG
9420             if (verbose)
9421             {
9422                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
9423                        step->dspToString());
9424             }
9425 #endif
9426
9427             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
9428             assert(finallyNesting <= compHndBBtabCount);
9429
9430             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9431             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
9432             endLFin               = gtNewStmt(endLFin);
9433             endCatches            = NULL;
9434
9435             encFinallies++;
9436
9437             invalidatePreds = true;
9438         }
9439     }
9440
9441     /* Append any remaining endCatches, if any */
9442
9443     assert(!encFinallies == !endLFin);
9444
9445     if (encFinallies == 0)
9446     {
9447         assert(step == DUMMY_INIT(NULL));
9448         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9449
9450         if (endCatches)
9451             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9452
9453 #ifdef DEBUG
9454         if (verbose)
9455         {
9456             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
9457                    "block %s\n",
9458                    block->dspToString());
9459         }
9460 #endif
9461     }
9462     else
9463     {
9464         // If leaveTarget is the start of another try block, we want to make sure that
9465         // we do not insert finalStep into that try block. Hence, we find the enclosing
9466         // try block.
9467         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
9468
9469         // Insert a new BB either in the try region indicated by tryIndex or
9470         // the handler region indicated by leaveTarget->bbHndIndex,
9471         // depending on which is the inner region.
9472         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
9473         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
9474         step->bbJumpDest = finalStep;
9475
9476         /* The new block will inherit this block's weight */
9477         finalStep->setBBWeight(block->bbWeight);
9478         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9479
9480 #ifdef DEBUG
9481         if (verbose)
9482         {
9483             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9484                    finalStep->dspToString());
9485         }
9486 #endif
9487
9488         GenTree* lastStmt;
9489
9490         if (endCatches)
9491         {
9492             lastStmt         = gtNewStmt(endCatches);
9493             endLFin->gtNext  = lastStmt;
9494             lastStmt->gtPrev = endLFin;
9495         }
9496         else
9497         {
9498             lastStmt = endLFin;
9499         }
9500
9501         impEndTreeList(finalStep, endLFin, lastStmt);
9502
9503         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9504
9505         // Queue up the jump target for importing
9506
9507         impImportBlockPending(leaveTarget);
9508
9509         invalidatePreds = true;
9510     }
9511
9512     if (invalidatePreds && fgComputePredsDone)
9513     {
9514         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9515         fgRemovePreds();
9516     }
9517
9518 #ifdef DEBUG
9519     fgVerifyHandlerTab();
9520
9521     if (verbose)
9522     {
9523         printf("\nAfter import CEE_LEAVE:\n");
9524         fgDispBasicBlocks();
9525         fgDispHandlerTab();
9526     }
9527 #endif // DEBUG
9528 }
9529
9530 #else // FEATURE_EH_FUNCLETS
9531
9532 void Compiler::impImportLeave(BasicBlock* block)
9533 {
9534 #ifdef DEBUG
9535     if (verbose)
9536     {
9537         printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
9538                block->bbJumpDest->bbNum);
9539         fgDispBasicBlocks();
9540         fgDispHandlerTab();
9541     }
9542 #endif // DEBUG
9543
9544     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9545     unsigned    blkAddr         = block->bbCodeOffs;
9546     BasicBlock* leaveTarget     = block->bbJumpDest;
9547     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9548
9549     // LEAVE clears the stack, spill side effects, and set stack to 0
9550
9551     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9552     verCurrentState.esStackDepth = 0;
9553
9554     assert(block->bbJumpKind == BBJ_LEAVE);
9555     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9556
9557     BasicBlock* step = nullptr;
9558
9559     enum StepType
9560     {
9561         // No step type; step == NULL.
9562         ST_None,
9563
9564         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9565         // That is, is step->bbJumpDest where a finally will return to?
9566         ST_FinallyReturn,
9567
9568         // The step block is a catch return.
9569         ST_Catch,
9570
9571         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9572         ST_Try
9573     };
9574     StepType stepType = ST_None;
9575
9576     unsigned  XTnum;
9577     EHblkDsc* HBtab;
9578
9579     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9580     {
9581         // Grab the handler offsets
9582
9583         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9584         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9585         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9586         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9587
9588         /* Is this a catch-handler we are CEE_LEAVEing out of?
9589          */
9590
9591         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9592         {
9593             // Can't CEE_LEAVE out of a finally/fault handler
9594             if (HBtab->HasFinallyOrFaultHandler())
9595             {
9596                 BADCODE("leave out of fault/finally block");
9597             }
9598
9599             /* We are jumping out of a catch */
9600
9601             if (step == nullptr)
9602             {
9603                 step             = block;
9604                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9605                 stepType         = ST_Catch;
9606
9607 #ifdef DEBUG
9608                 if (verbose)
9609                 {
9610                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
9611                            " to BBJ_EHCATCHRET "
9612                            "block\n",
9613                            XTnum, step->bbNum);
9614                 }
9615 #endif
9616             }
9617             else
9618             {
9619                 BasicBlock* exitBlock;
9620
9621                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9622                  * scope */
9623                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9624
9625                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9626                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9627                                               // exit) returns to this block
9628                 step->bbJumpDest->bbRefs++;
9629
9630 #if defined(_TARGET_ARM_)
9631                 if (stepType == ST_FinallyReturn)
9632                 {
9633                     assert(step->bbJumpKind == BBJ_ALWAYS);
9634                     // Mark the target of a finally return
9635                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9636                 }
9637 #endif // defined(_TARGET_ARM_)
9638
9639                 /* The new block will inherit this block's weight */
9640                 exitBlock->setBBWeight(block->bbWeight);
9641                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9642
9643                 /* This exit block is the new step */
9644                 step     = exitBlock;
9645                 stepType = ST_Catch;
9646
9647                 invalidatePreds = true;
9648
9649 #ifdef DEBUG
9650                 if (verbose)
9651                 {
9652                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
9653                            XTnum, exitBlock->bbNum);
9654                 }
9655 #endif
9656             }
9657         }
9658         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9659                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9660         {
9661             /* We are jumping out of a finally-protected try */
9662
9663             BasicBlock* callBlock;
9664
9665             if (step == nullptr)
9666             {
9667 #if FEATURE_EH_CALLFINALLY_THUNKS
9668
9669                 // Put the call to the finally in the enclosing region.
9670                 unsigned callFinallyTryIndex =
9671                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9672                 unsigned callFinallyHndIndex =
9673                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9674                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9675
9676                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9677                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9678                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9679                 // next block, and flow optimizations will remove it.
9680                 block->bbJumpKind = BBJ_ALWAYS;
9681                 block->bbJumpDest = callBlock;
9682                 block->bbJumpDest->bbRefs++;
9683
9684                 /* The new block will inherit this block's weight */
9685                 callBlock->setBBWeight(block->bbWeight);
9686                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9687
9688 #ifdef DEBUG
9689                 if (verbose)
9690                 {
9691                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9692                            " to "
9693                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
9694                            XTnum, block->bbNum, callBlock->bbNum);
9695                 }
9696 #endif
9697
9698 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9699
9700                 callBlock             = block;
9701                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9702
9703 #ifdef DEBUG
9704                 if (verbose)
9705                 {
9706                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9707                            " to "
9708                            "BBJ_CALLFINALLY block\n",
9709                            XTnum, callBlock->bbNum);
9710                 }
9711 #endif
9712
9713 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9714             }
9715             else
9716             {
9717                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9718                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9719                 // a 'finally'), or the step block is the return from a catch.
9720                 //
9721                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9722                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9723                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9724                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9725                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9726                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9727                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9728                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9729                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9730                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9731                 // stack walks.)
9732
9733                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9734
9735 #if FEATURE_EH_CALLFINALLY_THUNKS
9736                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9737                 {
9738                     // Need to create another step block in the 'try' region that will actually branch to the
9739                     // call-to-finally thunk.
9740                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9741                     step->bbJumpDest  = step2;
9742                     step->bbJumpDest->bbRefs++;
9743                     step2->setBBWeight(block->bbWeight);
9744                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9745
9746 #ifdef DEBUG
9747                     if (verbose)
9748                     {
9749                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9750                                "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
9751                                XTnum, step->bbNum, step2->bbNum);
9752                     }
9753 #endif
9754
9755                     step = step2;
9756                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9757                 }
9758 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9759
9760 #if FEATURE_EH_CALLFINALLY_THUNKS
9761                 unsigned callFinallyTryIndex =
9762                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9763                 unsigned callFinallyHndIndex =
9764                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9765 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9766                 unsigned callFinallyTryIndex = XTnum + 1;
9767                 unsigned callFinallyHndIndex = 0; // don't care
9768 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9769
9770                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9771                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9772                                               // finally in the chain)
9773                 step->bbJumpDest->bbRefs++;
9774
9775 #if defined(_TARGET_ARM_)
9776                 if (stepType == ST_FinallyReturn)
9777                 {
9778                     assert(step->bbJumpKind == BBJ_ALWAYS);
9779                     // Mark the target of a finally return
9780                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9781                 }
9782 #endif // defined(_TARGET_ARM_)
9783
9784                 /* The new block will inherit this block's weight */
9785                 callBlock->setBBWeight(block->bbWeight);
9786                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9787
9788 #ifdef DEBUG
9789                 if (verbose)
9790                 {
9791                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
9792                            "block " FMT_BB "\n",
9793                            XTnum, callBlock->bbNum);
9794                 }
9795 #endif
9796             }
9797
9798             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9799             stepType = ST_FinallyReturn;
9800
9801             /* The new block will inherit this block's weight */
9802             step->setBBWeight(block->bbWeight);
9803             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9804
9805 #ifdef DEBUG
9806             if (verbose)
9807             {
9808                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9809                        "block " FMT_BB "\n",
9810                        XTnum, step->bbNum);
9811             }
9812 #endif
9813
9814             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9815
9816             invalidatePreds = true;
9817         }
9818         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9819                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9820         {
9821             // We are jumping out of a catch-protected try.
9822             //
9823             // If we are returning from a call to a finally, then we must have a step block within a try
9824             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9825             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9826             // and invoke the appropriate catch.
9827             //
9828             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9829             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9830             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9831             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9832             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9833             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9834             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9835             // For example:
9836             //
9837             // try {
9838             //    try {
9839             //       // something here raises ThreadAbortException
9840             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9841             //    } catch (Exception) {
9842             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9843             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9844             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9845             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9846             //       // need to do this transformation if the current EH block is a try/catch that catches
9847             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9848             //       // information, so currently we do it for all catch types.
9849             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9850             //    }
9851             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9852             // } catch (ThreadAbortException) {
9853             // }
9854             // LABEL_1:
9855             //
9856             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9857             // compiler.
9858
9859             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9860             {
9861                 BasicBlock* catchStep;
9862
9863                 assert(step);
9864
9865                 if (stepType == ST_FinallyReturn)
9866                 {
9867                     assert(step->bbJumpKind == BBJ_ALWAYS);
9868                 }
9869                 else
9870                 {
9871                     assert(stepType == ST_Catch);
9872                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9873                 }
9874
9875                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9876                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9877                 step->bbJumpDest = catchStep;
9878                 step->bbJumpDest->bbRefs++;
9879
9880 #if defined(_TARGET_ARM_)
9881                 if (stepType == ST_FinallyReturn)
9882                 {
9883                     // Mark the target of a finally return
9884                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9885                 }
9886 #endif // defined(_TARGET_ARM_)
9887
9888                 /* The new block will inherit this block's weight */
9889                 catchStep->setBBWeight(block->bbWeight);
9890                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9891
9892 #ifdef DEBUG
9893                 if (verbose)
9894                 {
9895                     if (stepType == ST_FinallyReturn)
9896                     {
9897                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9898                                "BBJ_ALWAYS block " FMT_BB "\n",
9899                                XTnum, catchStep->bbNum);
9900                     }
9901                     else
9902                     {
9903                         assert(stepType == ST_Catch);
9904                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9905                                "BBJ_ALWAYS block " FMT_BB "\n",
9906                                XTnum, catchStep->bbNum);
9907                     }
9908                 }
9909 #endif // DEBUG
9910
9911                 /* This block is the new step */
9912                 step     = catchStep;
9913                 stepType = ST_Try;
9914
9915                 invalidatePreds = true;
9916             }
9917         }
9918     }
9919
9920     if (step == nullptr)
9921     {
9922         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9923
9924 #ifdef DEBUG
9925         if (verbose)
9926         {
9927             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9928                    "block " FMT_BB " to BBJ_ALWAYS\n",
9929                    block->bbNum);
9930         }
9931 #endif
9932     }
9933     else
9934     {
9935         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9936
9937 #if defined(_TARGET_ARM_)
9938         if (stepType == ST_FinallyReturn)
9939         {
9940             assert(step->bbJumpKind == BBJ_ALWAYS);
9941             // Mark the target of a finally return
9942             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9943         }
9944 #endif // defined(_TARGET_ARM_)
9945
9946 #ifdef DEBUG
9947         if (verbose)
9948         {
9949             printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
9950         }
9951 #endif
9952
9953         // Queue up the jump target for importing
9954
9955         impImportBlockPending(leaveTarget);
9956     }
9957
9958     if (invalidatePreds && fgComputePredsDone)
9959     {
9960         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9961         fgRemovePreds();
9962     }
9963
9964 #ifdef DEBUG
9965     fgVerifyHandlerTab();
9966
9967     if (verbose)
9968     {
9969         printf("\nAfter import CEE_LEAVE:\n");
9970         fgDispBasicBlocks();
9971         fgDispHandlerTab();
9972     }
9973 #endif // DEBUG
9974 }
9975
9976 #endif // FEATURE_EH_FUNCLETS
9977
9978 /*****************************************************************************/
9979 // This is called when reimporting a leave block. It resets the JumpKind,
9980 // JumpDest, and bbNext to the original values
9981
9982 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9983 {
9984 #if FEATURE_EH_FUNCLETS
9985     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9986     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9987     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9988     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9989     // only predecessor are also considered orphans and attempted to be deleted.
9990     //
9991     //  try  {
9992     //     ....
9993     //     try
9994     //     {
9995     //         ....
9996     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9997     //     } finally { }
9998     //  } finally { }
9999     //  OUTSIDE:
10000     //
10001     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
10002     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
10003     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
10004     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
10005     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
10006     // will be treated as pair and handled correctly.
10007     if (block->bbJumpKind == BBJ_CALLFINALLY)
10008     {
10009         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
10010         dupBlock->bbFlags    = block->bbFlags;
10011         dupBlock->bbJumpDest = block->bbJumpDest;
10012         dupBlock->copyEHRegion(block);
10013         dupBlock->bbCatchTyp = block->bbCatchTyp;
10014
10015         // Mark this block as
10016         //  a) not referenced by any other block to make sure that it gets deleted
10017         //  b) weight zero
10018         //  c) prevent from being imported
10019         //  d) as internal
10020         //  e) as rarely run
10021         dupBlock->bbRefs   = 0;
10022         dupBlock->bbWeight = 0;
10023         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
10024
10025         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
10026         // will be next to each other.
10027         fgInsertBBafter(block, dupBlock);
10028
10029 #ifdef DEBUG
10030         if (verbose)
10031         {
10032             printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
10033         }
10034 #endif
10035     }
10036 #endif // FEATURE_EH_FUNCLETS
10037
10038     block->bbJumpKind = BBJ_LEAVE;
10039     fgInitBBLookup();
10040     block->bbJumpDest = fgLookupBB(jmpAddr);
10041
10042     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
10043     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
10044     // reason we don't want to remove the block at this point is that if we call
10045     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
10046     // added and the linked list length will be different than fgBBcount.
10047 }
10048
10049 /*****************************************************************************/
10050 // Get the first non-prefix opcode. Used for verification of valid combinations
10051 // of prefixes and actual opcodes.
10052
10053 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
10054 {
10055     while (codeAddr < codeEndp)
10056     {
10057         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10058         codeAddr += sizeof(__int8);
10059
10060         if (opcode == CEE_PREFIX1)
10061         {
10062             if (codeAddr >= codeEndp)
10063             {
10064                 break;
10065             }
10066             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10067             codeAddr += sizeof(__int8);
10068         }
10069
10070         switch (opcode)
10071         {
10072             case CEE_UNALIGNED:
10073             case CEE_VOLATILE:
10074             case CEE_TAILCALL:
10075             case CEE_CONSTRAINED:
10076             case CEE_READONLY:
10077                 break;
10078             default:
10079                 return opcode;
10080         }
10081
10082         codeAddr += opcodeSizes[opcode];
10083     }
10084
10085     return CEE_ILLEGAL;
10086 }
10087
10088 /*****************************************************************************/
10089 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
10090
10091 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
10092 {
10093     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
10094
10095     if (!(
10096             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
10097             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
10098             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
10099             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
10100             // volatile. prefix is allowed with the ldsfld and stsfld
10101             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
10102     {
10103         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
10104     }
10105 }
10106
10107 /*****************************************************************************/
10108
10109 #ifdef DEBUG
10110
10111 #undef RETURN // undef contracts RETURN macro
10112
10113 enum controlFlow_t
10114 {
10115     NEXT,
10116     CALL,
10117     RETURN,
10118     THROW,
10119     BRANCH,
10120     COND_BRANCH,
10121     BREAK,
10122     PHI,
10123     META,
10124 };
10125
10126 const static controlFlow_t controlFlow[] = {
10127 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
10128 #include "opcode.def"
10129 #undef OPDEF
10130 };
10131
10132 #endif // DEBUG
10133
10134 /*****************************************************************************
10135  *  Determine the result type of an arithemetic operation
10136  *  On 64-bit inserts upcasts when native int is mixed with int32
10137  */
10138 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
10139 {
10140     var_types type = TYP_UNDEF;
10141     GenTree*  op1  = *pOp1;
10142     GenTree*  op2  = *pOp2;
10143
10144     // Arithemetic operations are generally only allowed with
10145     // primitive types, but certain operations are allowed
10146     // with byrefs
10147
10148     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10149     {
10150         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10151         {
10152             // byref1-byref2 => gives a native int
10153             type = TYP_I_IMPL;
10154         }
10155         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10156         {
10157             // [native] int - byref => gives a native int
10158
10159             //
10160             // The reason is that it is possible, in managed C++,
10161             // to have a tree like this:
10162             //
10163             //              -
10164             //             / \
10165             //            /   \
10166             //           /     \
10167             //          /       \
10168             // const(h) int     addr byref
10169             //
10170             // <BUGNUM> VSW 318822 </BUGNUM>
10171             //
10172             // So here we decide to make the resulting type to be a native int.
10173             CLANG_FORMAT_COMMENT_ANCHOR;
10174
10175 #ifdef _TARGET_64BIT_
10176             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10177             {
10178                 // insert an explicit upcast
10179                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10180             }
10181 #endif // _TARGET_64BIT_
10182
10183             type = TYP_I_IMPL;
10184         }
10185         else
10186         {
10187             // byref - [native] int => gives a byref
10188             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
10189
10190 #ifdef _TARGET_64BIT_
10191             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
10192             {
10193                 // insert an explicit upcast
10194                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10195             }
10196 #endif // _TARGET_64BIT_
10197
10198             type = TYP_BYREF;
10199         }
10200     }
10201     else if ((oper == GT_ADD) &&
10202              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10203     {
10204         // byref + [native] int => gives a byref
10205         // (or)
10206         // [native] int + byref => gives a byref
10207
10208         // only one can be a byref : byref op byref not allowed
10209         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
10210         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
10211
10212 #ifdef _TARGET_64BIT_
10213         if (genActualType(op2->TypeGet()) == TYP_BYREF)
10214         {
10215             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10216             {
10217                 // insert an explicit upcast
10218                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10219             }
10220         }
10221         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10222         {
10223             // insert an explicit upcast
10224             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10225         }
10226 #endif // _TARGET_64BIT_
10227
10228         type = TYP_BYREF;
10229     }
10230 #ifdef _TARGET_64BIT_
10231     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
10232     {
10233         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10234
10235         // int + long => gives long
10236         // long + int => gives long
10237         // we get this because in the IL the long isn't Int64, it's just IntPtr
10238
10239         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10240         {
10241             // insert an explicit upcast
10242             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10243         }
10244         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10245         {
10246             // insert an explicit upcast
10247             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10248         }
10249
10250         type = TYP_I_IMPL;
10251     }
10252 #else  // 32-bit TARGET
10253     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
10254     {
10255         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10256
10257         // int + long => gives long
10258         // long + int => gives long
10259
10260         type = TYP_LONG;
10261     }
10262 #endif // _TARGET_64BIT_
10263     else
10264     {
10265         // int + int => gives an int
10266         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
10267
10268         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
10269                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
10270
10271         type = genActualType(op1->gtType);
10272
10273         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
10274         // Otherwise, turn floats into doubles
10275         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
10276         {
10277             assert(genActualType(op2->gtType) == TYP_DOUBLE);
10278             type = TYP_DOUBLE;
10279         }
10280     }
10281
10282     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
10283     return type;
10284 }
10285
10286 //------------------------------------------------------------------------
10287 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
10288 //
10289 // Arguments:
10290 //   op1 - value to cast
10291 //   pResolvedToken - resolved token for type to cast to
10292 //   isCastClass - true if this is a castclass, false if isinst
10293 //
10294 // Return Value:
10295 //   tree representing optimized cast, or null if no optimization possible
10296
10297 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
10298 {
10299     assert(op1->TypeGet() == TYP_REF);
10300
10301     // Don't optimize for minopts or debug codegen.
10302     if (opts.compDbgCode || opts.MinOpts())
10303     {
10304         return nullptr;
10305     }
10306
10307     // See what we know about the type of the object being cast.
10308     bool                 isExact   = false;
10309     bool                 isNonNull = false;
10310     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
10311     GenTree*             optResult = nullptr;
10312
10313     if (fromClass != nullptr)
10314     {
10315         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
10316         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
10317                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
10318                 info.compCompHnd->getClassName(toClass));
10319
10320         // Perhaps we know if the cast will succeed or fail.
10321         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
10322
10323         if (castResult == TypeCompareState::Must)
10324         {
10325             // Cast will succeed, result is simply op1.
10326             JITDUMP("Cast will succeed, optimizing to simply return input\n");
10327             return op1;
10328         }
10329         else if (castResult == TypeCompareState::MustNot)
10330         {
10331             // See if we can sharpen exactness by looking for final classes
10332             if (!isExact)
10333             {
10334                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
10335                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
10336                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
10337                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10338             }
10339
10340             // Cast to exact type will fail. Handle case where we have
10341             // an exact type (that is, fromClass is not a subtype)
10342             // and we're not going to throw on failure.
10343             if (isExact && !isCastClass)
10344             {
10345                 JITDUMP("Cast will fail, optimizing to return null\n");
10346                 GenTree* result = gtNewIconNode(0, TYP_REF);
10347
10348                 // If the cast was fed by a box, we can remove that too.
10349                 if (op1->IsBoxedValue())
10350                 {
10351                     JITDUMP("Also removing upstream box\n");
10352                     gtTryRemoveBoxUpstreamEffects(op1);
10353                 }
10354
10355                 return result;
10356             }
10357             else if (isExact)
10358             {
10359                 JITDUMP("Not optimizing failing castclass (yet)\n");
10360             }
10361             else
10362             {
10363                 JITDUMP("Can't optimize since fromClass is inexact\n");
10364             }
10365         }
10366         else
10367         {
10368             JITDUMP("Result of cast unknown, must generate runtime test\n");
10369         }
10370     }
10371     else
10372     {
10373         JITDUMP("\nCan't optimize since fromClass is unknown\n");
10374     }
10375
10376     return nullptr;
10377 }
10378
10379 //------------------------------------------------------------------------
10380 // impCastClassOrIsInstToTree: build and import castclass/isinst
10381 //
10382 // Arguments:
10383 //   op1 - value to cast
10384 //   op2 - type handle for type to cast to
10385 //   pResolvedToken - resolved token from the cast operation
10386 //   isCastClass - true if this is castclass, false means isinst
10387 //
10388 // Return Value:
10389 //   Tree representing the cast
10390 //
10391 // Notes:
10392 //   May expand into a series of runtime checks or a helper call.
10393
10394 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
10395                                               GenTree*                op2,
10396                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
10397                                               bool                    isCastClass)
10398 {
10399     assert(op1->TypeGet() == TYP_REF);
10400
10401     // Optimistically assume the jit should expand this as an inline test
10402     bool shouldExpandInline = true;
10403
10404     // Profitability check.
10405     //
10406     // Don't bother with inline expansion when jit is trying to
10407     // generate code quickly, or the cast is in code that won't run very
10408     // often, or the method already is pretty big.
10409     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
10410     {
10411         // not worth the code expansion if jitting fast or in a rarely run block
10412         shouldExpandInline = false;
10413     }
10414     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
10415     {
10416         // not worth creating an untracked local variable
10417         shouldExpandInline = false;
10418     }
10419
10420     // Pessimistically assume the jit cannot expand this as an inline test
10421     bool                  canExpandInline = false;
10422     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
10423
10424     // Legality check.
10425     //
10426     // Not all classclass/isinst operations can be inline expanded.
10427     // Check legality only if an inline expansion is desirable.
10428     if (shouldExpandInline)
10429     {
10430         if (isCastClass)
10431         {
10432             // Jit can only inline expand the normal CHKCASTCLASS helper.
10433             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
10434         }
10435         else
10436         {
10437             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
10438             {
10439                 // Check the class attributes.
10440                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
10441
10442                 // If the class is final and is not marshal byref or
10443                 // contextful, the jit can expand the IsInst check inline.
10444                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
10445                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10446             }
10447         }
10448     }
10449
10450     const bool expandInline = canExpandInline && shouldExpandInline;
10451
10452     if (!expandInline)
10453     {
10454         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
10455                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
10456
10457         // If we CSE this class handle we prevent assertionProp from making SubType assertions
10458         // so instead we force the CSE logic to not consider CSE-ing this class handle.
10459         //
10460         op2->gtFlags |= GTF_DONT_CSE;
10461
10462         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
10463     }
10464
10465     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
10466
10467     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
10468
10469     GenTree* temp;
10470     GenTree* condMT;
10471     //
10472     // expand the methodtable match:
10473     //
10474     //  condMT ==>   GT_NE
10475     //               /    \
10476     //           GT_IND   op2 (typically CNS_INT)
10477     //              |
10478     //           op1Copy
10479     //
10480
10481     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10482     //
10483     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10484     //
10485     // op1 is now known to be a non-complex tree
10486     // thus we can use gtClone(op1) from now on
10487     //
10488
10489     GenTree* op2Var = op2;
10490     if (isCastClass)
10491     {
10492         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10493         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10494     }
10495     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10496     temp->gtFlags |= GTF_EXCEPT;
10497     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10498
10499     GenTree* condNull;
10500     //
10501     // expand the null check:
10502     //
10503     //  condNull ==>   GT_EQ
10504     //                 /    \
10505     //             op1Copy CNS_INT
10506     //                      null
10507     //
10508     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10509
10510     //
10511     // expand the true and false trees for the condMT
10512     //
10513     GenTree* condFalse = gtClone(op1);
10514     GenTree* condTrue;
10515     if (isCastClass)
10516     {
10517         //
10518         // use the special helper that skips the cases checked by our inlined cast
10519         //
10520         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10521
10522         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10523     }
10524     else
10525     {
10526         condTrue = gtNewIconNode(0, TYP_REF);
10527     }
10528
10529 #define USE_QMARK_TREES
10530
10531 #ifdef USE_QMARK_TREES
10532     GenTree* qmarkMT;
10533     //
10534     // Generate first QMARK - COLON tree
10535     //
10536     //  qmarkMT ==>   GT_QMARK
10537     //                 /     \
10538     //            condMT   GT_COLON
10539     //                      /     \
10540     //                condFalse  condTrue
10541     //
10542     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10543     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10544
10545     GenTree* qmarkNull;
10546     //
10547     // Generate second QMARK - COLON tree
10548     //
10549     //  qmarkNull ==>  GT_QMARK
10550     //                 /     \
10551     //           condNull  GT_COLON
10552     //                      /     \
10553     //                qmarkMT   op1Copy
10554     //
10555     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10556     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10557     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10558
10559     // Make QMark node a top level node by spilling it.
10560     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10561     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10562
10563     // TODO-CQ: Is it possible op1 has a better type?
10564     //
10565     // See also gtGetHelperCallClassHandle where we make the same
10566     // determination for the helper call variants.
10567     LclVarDsc* lclDsc = lvaGetDesc(tmp);
10568     assert(lclDsc->lvSingleDef == 0);
10569     lclDsc->lvSingleDef = 1;
10570     JITDUMP("Marked V%02u as a single def temp\n", tmp);
10571     lvaSetClass(tmp, pResolvedToken->hClass);
10572     return gtNewLclvNode(tmp, TYP_REF);
10573 #endif
10574 }
10575
10576 #ifndef DEBUG
10577 #define assertImp(cond) ((void)0)
10578 #else
10579 #define assertImp(cond)                                                                                                \
10580     do                                                                                                                 \
10581     {                                                                                                                  \
10582         if (!(cond))                                                                                                   \
10583         {                                                                                                              \
10584             const int cchAssertImpBuf = 600;                                                                           \
10585             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10586             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10587                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10588                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10589                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10590             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10591         }                                                                                                              \
10592     } while (0)
10593 #endif // DEBUG
10594
10595 #ifdef _PREFAST_
10596 #pragma warning(push)
10597 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10598 #endif
10599 /*****************************************************************************
10600  *  Import the instr for the given basic block
10601  */
10602 void Compiler::impImportBlockCode(BasicBlock* block)
10603 {
10604 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10605
10606 #ifdef DEBUG
10607
10608     if (verbose)
10609     {
10610         printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10611     }
10612 #endif
10613
10614     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10615     IL_OFFSET nxtStmtOffs;
10616
10617     GenTree*                     arrayNodeFrom;
10618     GenTree*                     arrayNodeTo;
10619     GenTree*                     arrayNodeToIndex;
10620     CorInfoHelpFunc              helper;
10621     CorInfoIsAccessAllowedResult accessAllowedResult;
10622     CORINFO_HELPER_DESC          calloutHelper;
10623     const BYTE*                  lastLoadToken = nullptr;
10624
10625     // reject cyclic constraints
10626     if (tiVerificationNeeded)
10627     {
10628         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10629         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10630     }
10631
10632     /* Get the tree list started */
10633
10634     impBeginTreeList();
10635
10636     /* Walk the opcodes that comprise the basic block */
10637
10638     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10639     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10640
10641     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10642     IL_OFFSET lastSpillOffs = opcodeOffs;
10643
10644     signed jmpDist;
10645
10646     /* remember the start of the delegate creation sequence (used for verification) */
10647     const BYTE* delegateCreateStart = nullptr;
10648
10649     int  prefixFlags = 0;
10650     bool explicitTailCall, constraintCall, readonlyCall;
10651
10652     typeInfo tiRetVal;
10653
10654     unsigned numArgs = info.compArgsCount;
10655
10656     /* Now process all the opcodes in the block */
10657
10658     var_types callTyp    = TYP_COUNT;
10659     OPCODE    prevOpcode = CEE_ILLEGAL;
10660
10661     if (block->bbCatchTyp)
10662     {
10663         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10664         {
10665             impCurStmtOffsSet(block->bbCodeOffs);
10666         }
10667
10668         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10669         // to a temp. This is a trade off for code simplicity
10670         impSpillSpecialSideEff();
10671     }
10672
10673     while (codeAddr < codeEndp)
10674     {
10675         bool                   usingReadyToRunHelper = false;
10676         CORINFO_RESOLVED_TOKEN resolvedToken;
10677         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10678         CORINFO_CALL_INFO      callInfo;
10679         CORINFO_FIELD_INFO     fieldInfo;
10680
10681         tiRetVal = typeInfo(); // Default type info
10682
10683         //---------------------------------------------------------------------
10684
10685         /* We need to restrict the max tree depth as many of the Compiler
10686            functions are recursive. We do this by spilling the stack */
10687
10688         if (verCurrentState.esStackDepth)
10689         {
10690             /* Has it been a while since we last saw a non-empty stack (which
10691                guarantees that the tree depth isnt accumulating. */
10692
10693             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10694             {
10695                 impSpillStackEnsure();
10696                 lastSpillOffs = opcodeOffs;
10697             }
10698         }
10699         else
10700         {
10701             lastSpillOffs   = opcodeOffs;
10702             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10703         }
10704
10705         /* Compute the current instr offset */
10706
10707         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10708
10709 #ifndef DEBUG
10710         if (opts.compDbgInfo)
10711 #endif
10712         {
10713             if (!compIsForInlining())
10714             {
10715                 nxtStmtOffs =
10716                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10717
10718                 /* Have we reached the next stmt boundary ? */
10719
10720                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10721                 {
10722                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10723
10724                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10725                     {
10726                         /* We need to provide accurate IP-mapping at this point.
10727                            So spill anything on the stack so that it will form
10728                            gtStmts with the correct stmt offset noted */
10729
10730                         impSpillStackEnsure(true);
10731                     }
10732
10733                     // Has impCurStmtOffs been reported in any tree?
10734
10735                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10736                     {
10737                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10738                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10739
10740                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10741                     }
10742
10743                     if (impCurStmtOffs == BAD_IL_OFFSET)
10744                     {
10745                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10746                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10747
10748                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10749                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10750                         {
10751                             nxtStmtIndex++;
10752                         }
10753
10754                         /* Go to the new stmt */
10755
10756                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10757
10758                         /* Update the stmt boundary index */
10759
10760                         nxtStmtIndex++;
10761                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10762
10763                         /* Are there any more line# entries after this one? */
10764
10765                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10766                         {
10767                             /* Remember where the next line# starts */
10768
10769                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10770                         }
10771                         else
10772                         {
10773                             /* No more line# entries */
10774
10775                             nxtStmtOffs = BAD_IL_OFFSET;
10776                         }
10777                     }
10778                 }
10779                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10780                          (verCurrentState.esStackDepth == 0))
10781                 {
10782                     /* At stack-empty locations, we have already added the tree to
10783                        the stmt list with the last offset. We just need to update
10784                        impCurStmtOffs
10785                      */
10786
10787                     impCurStmtOffsSet(opcodeOffs);
10788                 }
10789                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10790                          impOpcodeIsCallSiteBoundary(prevOpcode))
10791                 {
10792                     /* Make sure we have a type cached */
10793                     assert(callTyp != TYP_COUNT);
10794
10795                     if (callTyp == TYP_VOID)
10796                     {
10797                         impCurStmtOffsSet(opcodeOffs);
10798                     }
10799                     else if (opts.compDbgCode)
10800                     {
10801                         impSpillStackEnsure(true);
10802                         impCurStmtOffsSet(opcodeOffs);
10803                     }
10804                 }
10805                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10806                 {
10807                     if (opts.compDbgCode)
10808                     {
10809                         impSpillStackEnsure(true);
10810                     }
10811
10812                     impCurStmtOffsSet(opcodeOffs);
10813                 }
10814
10815                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10816                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10817             }
10818         }
10819
10820         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10821         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10822         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10823
10824         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10825         GenTree*        op1           = DUMMY_INIT(NULL);
10826         GenTree*        op2           = DUMMY_INIT(NULL);
10827         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10828         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10829         bool            uns           = DUMMY_INIT(false);
10830         bool            isLocal       = false;
10831
10832         /* Get the next opcode and the size of its parameters */
10833
10834         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10835         codeAddr += sizeof(__int8);
10836
10837 #ifdef DEBUG
10838         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10839         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10840 #endif
10841
10842     DECODE_OPCODE:
10843
10844         // Return if any previous code has caused inline to fail.
10845         if (compDonotInline())
10846         {
10847             return;
10848         }
10849
10850         /* Get the size of additional parameters */
10851
10852         signed int sz = opcodeSizes[opcode];
10853
10854 #ifdef DEBUG
10855         clsHnd  = NO_CLASS_HANDLE;
10856         lclTyp  = TYP_COUNT;
10857         callTyp = TYP_COUNT;
10858
10859         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10860         impCurOpcName = opcodeNames[opcode];
10861
10862         if (verbose && (opcode != CEE_PREFIX1))
10863         {
10864             printf("%s", impCurOpcName);
10865         }
10866
10867         /* Use assertImp() to display the opcode */
10868
10869         op1 = op2 = nullptr;
10870 #endif
10871
10872         /* See what kind of an opcode we have, then */
10873
10874         unsigned mflags   = 0;
10875         unsigned clsFlags = 0;
10876
10877         switch (opcode)
10878         {
10879             unsigned  lclNum;
10880             var_types type;
10881
10882             GenTree*   op3;
10883             genTreeOps oper;
10884             unsigned   size;
10885
10886             int val;
10887
10888             CORINFO_SIG_INFO     sig;
10889             IL_OFFSET            jmpAddr;
10890             bool                 ovfl, unordered, callNode;
10891             bool                 ldstruct;
10892             CORINFO_CLASS_HANDLE tokenType;
10893
10894             union {
10895                 int     intVal;
10896                 float   fltVal;
10897                 __int64 lngVal;
10898                 double  dblVal;
10899             } cval;
10900
10901             case CEE_PREFIX1:
10902                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10903                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10904                 codeAddr += sizeof(__int8);
10905                 goto DECODE_OPCODE;
10906
10907             SPILL_APPEND:
10908
10909                 // We need to call impSpillLclRefs() for a struct type lclVar.
10910                 // This is done for non-block assignments in the handling of stloc.
10911                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10912                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10913                 {
10914                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10915                 }
10916
10917                 /* Append 'op1' to the list of statements */
10918                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10919                 goto DONE_APPEND;
10920
10921             APPEND:
10922
10923                 /* Append 'op1' to the list of statements */
10924
10925                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10926                 goto DONE_APPEND;
10927
10928             DONE_APPEND:
10929
10930 #ifdef DEBUG
10931                 // Remember at which BC offset the tree was finished
10932                 impNoteLastILoffs();
10933 #endif
10934                 break;
10935
10936             case CEE_LDNULL:
10937                 impPushNullObjRefOnStack();
10938                 break;
10939
10940             case CEE_LDC_I4_M1:
10941             case CEE_LDC_I4_0:
10942             case CEE_LDC_I4_1:
10943             case CEE_LDC_I4_2:
10944             case CEE_LDC_I4_3:
10945             case CEE_LDC_I4_4:
10946             case CEE_LDC_I4_5:
10947             case CEE_LDC_I4_6:
10948             case CEE_LDC_I4_7:
10949             case CEE_LDC_I4_8:
10950                 cval.intVal = (opcode - CEE_LDC_I4_0);
10951                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10952                 goto PUSH_I4CON;
10953
10954             case CEE_LDC_I4_S:
10955                 cval.intVal = getI1LittleEndian(codeAddr);
10956                 goto PUSH_I4CON;
10957             case CEE_LDC_I4:
10958                 cval.intVal = getI4LittleEndian(codeAddr);
10959                 goto PUSH_I4CON;
10960             PUSH_I4CON:
10961                 JITDUMP(" %d", cval.intVal);
10962                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10963                 break;
10964
10965             case CEE_LDC_I8:
10966                 cval.lngVal = getI8LittleEndian(codeAddr);
10967                 JITDUMP(" 0x%016llx", cval.lngVal);
10968                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10969                 break;
10970
10971             case CEE_LDC_R8:
10972                 cval.dblVal = getR8LittleEndian(codeAddr);
10973                 JITDUMP(" %#.17g", cval.dblVal);
10974                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10975                 break;
10976
10977             case CEE_LDC_R4:
10978                 cval.dblVal = getR4LittleEndian(codeAddr);
10979                 JITDUMP(" %#.17g", cval.dblVal);
10980                 {
10981                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10982                     cnsOp->gtType  = TYP_FLOAT;
10983                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10984                 }
10985                 break;
10986
10987             case CEE_LDSTR:
10988
10989                 if (compIsForInlining())
10990                 {
10991                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10992                     {
10993                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10994                         return;
10995                     }
10996                 }
10997
10998                 val = getU4LittleEndian(codeAddr);
10999                 JITDUMP(" %08X", val);
11000                 if (tiVerificationNeeded)
11001                 {
11002                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
11003                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
11004                 }
11005                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
11006
11007                 break;
11008
11009             case CEE_LDARG:
11010                 lclNum = getU2LittleEndian(codeAddr);
11011                 JITDUMP(" %u", lclNum);
11012                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11013                 break;
11014
11015             case CEE_LDARG_S:
11016                 lclNum = getU1LittleEndian(codeAddr);
11017                 JITDUMP(" %u", lclNum);
11018                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11019                 break;
11020
11021             case CEE_LDARG_0:
11022             case CEE_LDARG_1:
11023             case CEE_LDARG_2:
11024             case CEE_LDARG_3:
11025                 lclNum = (opcode - CEE_LDARG_0);
11026                 assert(lclNum >= 0 && lclNum < 4);
11027                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11028                 break;
11029
11030             case CEE_LDLOC:
11031                 lclNum = getU2LittleEndian(codeAddr);
11032                 JITDUMP(" %u", lclNum);
11033                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11034                 break;
11035
11036             case CEE_LDLOC_S:
11037                 lclNum = getU1LittleEndian(codeAddr);
11038                 JITDUMP(" %u", lclNum);
11039                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11040                 break;
11041
11042             case CEE_LDLOC_0:
11043             case CEE_LDLOC_1:
11044             case CEE_LDLOC_2:
11045             case CEE_LDLOC_3:
11046                 lclNum = (opcode - CEE_LDLOC_0);
11047                 assert(lclNum >= 0 && lclNum < 4);
11048                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11049                 break;
11050
11051             case CEE_STARG:
11052                 lclNum = getU2LittleEndian(codeAddr);
11053                 goto STARG;
11054
11055             case CEE_STARG_S:
11056                 lclNum = getU1LittleEndian(codeAddr);
11057             STARG:
11058                 JITDUMP(" %u", lclNum);
11059
11060                 if (tiVerificationNeeded)
11061                 {
11062                     Verify(lclNum < info.compILargsCount, "bad arg num");
11063                 }
11064
11065                 if (compIsForInlining())
11066                 {
11067                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11068                     noway_assert(op1->gtOper == GT_LCL_VAR);
11069                     lclNum = op1->AsLclVar()->gtLclNum;
11070
11071                     goto VAR_ST_VALID;
11072                 }
11073
11074                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11075                 assertImp(lclNum < numArgs);
11076
11077                 if (lclNum == info.compThisArg)
11078                 {
11079                     lclNum = lvaArg0Var;
11080                 }
11081
11082                 // We should have seen this arg write in the prescan
11083                 assert(lvaTable[lclNum].lvHasILStoreOp);
11084
11085                 if (tiVerificationNeeded)
11086                 {
11087                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
11088                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
11089                            "type mismatch");
11090
11091                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11092                     {
11093                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
11094                     }
11095                 }
11096
11097                 goto VAR_ST;
11098
11099             case CEE_STLOC:
11100                 lclNum  = getU2LittleEndian(codeAddr);
11101                 isLocal = true;
11102                 JITDUMP(" %u", lclNum);
11103                 goto LOC_ST;
11104
11105             case CEE_STLOC_S:
11106                 lclNum  = getU1LittleEndian(codeAddr);
11107                 isLocal = true;
11108                 JITDUMP(" %u", lclNum);
11109                 goto LOC_ST;
11110
11111             case CEE_STLOC_0:
11112             case CEE_STLOC_1:
11113             case CEE_STLOC_2:
11114             case CEE_STLOC_3:
11115                 isLocal = true;
11116                 lclNum  = (opcode - CEE_STLOC_0);
11117                 assert(lclNum >= 0 && lclNum < 4);
11118
11119             LOC_ST:
11120                 if (tiVerificationNeeded)
11121                 {
11122                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11123                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
11124                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
11125                            "type mismatch");
11126                 }
11127
11128                 if (compIsForInlining())
11129                 {
11130                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11131
11132                     /* Have we allocated a temp for this local? */
11133
11134                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
11135
11136                     goto _PopValue;
11137                 }
11138
11139                 lclNum += numArgs;
11140
11141             VAR_ST:
11142
11143                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
11144                 {
11145                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11146                     BADCODE("Bad IL");
11147                 }
11148
11149             VAR_ST_VALID:
11150
11151                 /* if it is a struct assignment, make certain we don't overflow the buffer */
11152                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
11153
11154                 if (lvaTable[lclNum].lvNormalizeOnLoad())
11155                 {
11156                     lclTyp = lvaGetRealType(lclNum);
11157                 }
11158                 else
11159                 {
11160                     lclTyp = lvaGetActualType(lclNum);
11161                 }
11162
11163             _PopValue:
11164                 /* Pop the value being assigned */
11165
11166                 {
11167                     StackEntry se = impPopStack();
11168                     clsHnd        = se.seTypeInfo.GetClassHandle();
11169                     op1           = se.val;
11170                     tiRetVal      = se.seTypeInfo;
11171                 }
11172
11173 #ifdef FEATURE_SIMD
11174                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
11175                 {
11176                     assert(op1->TypeGet() == TYP_STRUCT);
11177                     op1->gtType = lclTyp;
11178                 }
11179 #endif // FEATURE_SIMD
11180
11181                 op1 = impImplicitIorI4Cast(op1, lclTyp);
11182
11183 #ifdef _TARGET_64BIT_
11184                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11185                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
11186                 {
11187                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11188                     op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
11189                 }
11190 #endif // _TARGET_64BIT_
11191
11192                 // We had better assign it a value of the correct type
11193                 assertImp(
11194                     genActualType(lclTyp) == genActualType(op1->gtType) ||
11195                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
11196                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
11197                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
11198                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
11199                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
11200
11201                 /* If op1 is "&var" then its type is the transient "*" and it can
11202                    be used either as TYP_BYREF or TYP_I_IMPL */
11203
11204                 if (op1->IsVarAddr())
11205                 {
11206                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
11207
11208                     /* When "&var" is created, we assume it is a byref. If it is
11209                        being assigned to a TYP_I_IMPL var, change the type to
11210                        prevent unnecessary GC info */
11211
11212                     if (genActualType(lclTyp) == TYP_I_IMPL)
11213                     {
11214                         op1->gtType = TYP_I_IMPL;
11215                     }
11216                 }
11217
11218                 // If this is a local and the local is a ref type, see
11219                 // if we can improve type information based on the
11220                 // value being assigned.
11221                 if (isLocal && (lclTyp == TYP_REF))
11222                 {
11223                     // We should have seen a stloc in our IL prescan.
11224                     assert(lvaTable[lclNum].lvHasILStoreOp);
11225
11226                     // Is there just one place this local is defined?
11227                     const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
11228
11229                     // Conservative check that there is just one
11230                     // definition that reaches this store.
11231                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
11232
11233                     if (isSingleDefLocal && hasSingleReachingDef)
11234                     {
11235                         lvaUpdateClass(lclNum, op1, clsHnd);
11236                     }
11237                 }
11238
11239                 /* Filter out simple assignments to itself */
11240
11241                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
11242                 {
11243                     if (opts.compDbgCode)
11244                     {
11245                         op1 = gtNewNothingNode();
11246                         goto SPILL_APPEND;
11247                     }
11248                     else
11249                     {
11250                         break;
11251                     }
11252                 }
11253
11254                 /* Create the assignment node */
11255
11256                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
11257
11258                 /* If the local is aliased or pinned, we need to spill calls and
11259                    indirections from the stack. */
11260
11261                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
11262                     (verCurrentState.esStackDepth > 0))
11263                 {
11264                     impSpillSideEffects(false,
11265                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
11266                 }
11267
11268                 /* Spill any refs to the local from the stack */
11269
11270                 impSpillLclRefs(lclNum);
11271
11272                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
11273                 // We insert a cast to the dest 'op2' type
11274                 //
11275                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
11276                     varTypeIsFloating(op2->gtType))
11277                 {
11278                     op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
11279                 }
11280
11281                 if (varTypeIsStruct(lclTyp))
11282                 {
11283                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
11284                 }
11285                 else
11286                 {
11287                     // The code generator generates GC tracking information
11288                     // based on the RHS of the assignment.  Later the LHS (which is
11289                     // is a BYREF) gets used and the emitter checks that that variable
11290                     // is being tracked.  It is not (since the RHS was an int and did
11291                     // not need tracking).  To keep this assert happy, we change the RHS
11292                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
11293                     {
11294                         op1->gtType = TYP_BYREF;
11295                     }
11296                     op1 = gtNewAssignNode(op2, op1);
11297                 }
11298
11299                 goto SPILL_APPEND;
11300
11301             case CEE_LDLOCA:
11302                 lclNum = getU2LittleEndian(codeAddr);
11303                 goto LDLOCA;
11304
11305             case CEE_LDLOCA_S:
11306                 lclNum = getU1LittleEndian(codeAddr);
11307             LDLOCA:
11308                 JITDUMP(" %u", lclNum);
11309                 if (tiVerificationNeeded)
11310                 {
11311                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11312                     Verify(info.compInitMem, "initLocals not set");
11313                 }
11314
11315                 if (compIsForInlining())
11316                 {
11317                     // Get the local type
11318                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11319
11320                     /* Have we allocated a temp for this local? */
11321
11322                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
11323
11324                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
11325
11326                     goto _PUSH_ADRVAR;
11327                 }
11328
11329                 lclNum += numArgs;
11330                 assertImp(lclNum < info.compLocalsCount);
11331                 goto ADRVAR;
11332
11333             case CEE_LDARGA:
11334                 lclNum = getU2LittleEndian(codeAddr);
11335                 goto LDARGA;
11336
11337             case CEE_LDARGA_S:
11338                 lclNum = getU1LittleEndian(codeAddr);
11339             LDARGA:
11340                 JITDUMP(" %u", lclNum);
11341                 Verify(lclNum < info.compILargsCount, "bad arg num");
11342
11343                 if (compIsForInlining())
11344                 {
11345                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
11346                     // followed by a ldfld to load the field.
11347
11348                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11349                     if (op1->gtOper != GT_LCL_VAR)
11350                     {
11351                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
11352                         return;
11353                     }
11354
11355                     assert(op1->gtOper == GT_LCL_VAR);
11356
11357                     goto _PUSH_ADRVAR;
11358                 }
11359
11360                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11361                 assertImp(lclNum < numArgs);
11362
11363                 if (lclNum == info.compThisArg)
11364                 {
11365                     lclNum = lvaArg0Var;
11366                 }
11367
11368                 goto ADRVAR;
11369
11370             ADRVAR:
11371
11372                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
11373
11374             _PUSH_ADRVAR:
11375                 assert(op1->gtOper == GT_LCL_VAR);
11376
11377                 /* Note that this is supposed to create the transient type "*"
11378                    which may be used as a TYP_I_IMPL. However we catch places
11379                    where it is used as a TYP_I_IMPL and change the node if needed.
11380                    Thus we are pessimistic and may report byrefs in the GC info
11381                    where it was not absolutely needed, but it is safer this way.
11382                  */
11383                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11384
11385                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
11386                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
11387
11388                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
11389                 if (tiVerificationNeeded)
11390                 {
11391                     // Don't allow taking address of uninit this ptr.
11392                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11393                     {
11394                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
11395                     }
11396
11397                     if (!tiRetVal.IsByRef())
11398                     {
11399                         tiRetVal.MakeByRef();
11400                     }
11401                     else
11402                     {
11403                         Verify(false, "byref to byref");
11404                     }
11405                 }
11406
11407                 impPushOnStack(op1, tiRetVal);
11408                 break;
11409
11410             case CEE_ARGLIST:
11411
11412                 if (!info.compIsVarArgs)
11413                 {
11414                     BADCODE("arglist in non-vararg method");
11415                 }
11416
11417                 if (tiVerificationNeeded)
11418                 {
11419                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
11420                 }
11421                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
11422
11423                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
11424                    adjusted the arg count cos this is like fetching the last param */
11425                 assertImp(0 < numArgs);
11426                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
11427                 lclNum = lvaVarargsHandleArg;
11428                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
11429                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11430                 impPushOnStack(op1, tiRetVal);
11431                 break;
11432
11433             case CEE_ENDFINALLY:
11434
11435                 if (compIsForInlining())
11436                 {
11437                     assert(!"Shouldn't have exception handlers in the inliner!");
11438                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
11439                     return;
11440                 }
11441
11442                 if (verCurrentState.esStackDepth > 0)
11443                 {
11444                     impEvalSideEffects();
11445                 }
11446
11447                 if (info.compXcptnsCount == 0)
11448                 {
11449                     BADCODE("endfinally outside finally");
11450                 }
11451
11452                 assert(verCurrentState.esStackDepth == 0);
11453
11454                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
11455                 goto APPEND;
11456
11457             case CEE_ENDFILTER:
11458
11459                 if (compIsForInlining())
11460                 {
11461                     assert(!"Shouldn't have exception handlers in the inliner!");
11462                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
11463                     return;
11464                 }
11465
11466                 block->bbSetRunRarely(); // filters are rare
11467
11468                 if (info.compXcptnsCount == 0)
11469                 {
11470                     BADCODE("endfilter outside filter");
11471                 }
11472
11473                 if (tiVerificationNeeded)
11474                 {
11475                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11476                 }
11477
11478                 op1 = impPopStack().val;
11479                 assertImp(op1->gtType == TYP_INT);
11480                 if (!bbInFilterILRange(block))
11481                 {
11482                     BADCODE("EndFilter outside a filter handler");
11483                 }
11484
11485                 /* Mark current bb as end of filter */
11486
11487                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11488                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11489
11490                 /* Mark catch handler as successor */
11491
11492                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11493                 if (verCurrentState.esStackDepth != 0)
11494                 {
11495                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11496                                                 DEBUGARG(__LINE__));
11497                 }
11498                 goto APPEND;
11499
11500             case CEE_RET:
11501                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11502             RET:
11503                 if (!impReturnInstruction(block, prefixFlags, opcode))
11504                 {
11505                     return; // abort
11506                 }
11507                 else
11508                 {
11509                     break;
11510                 }
11511
11512             case CEE_JMP:
11513
11514                 assert(!compIsForInlining());
11515
11516                 if (tiVerificationNeeded)
11517                 {
11518                     Verify(false, "Invalid opcode: CEE_JMP");
11519                 }
11520
11521                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11522                 {
11523                     /* CEE_JMP does not make sense in some "protected" regions. */
11524
11525                     BADCODE("Jmp not allowed in protected region");
11526                 }
11527
11528                 if (verCurrentState.esStackDepth != 0)
11529                 {
11530                     BADCODE("Stack must be empty after CEE_JMPs");
11531                 }
11532
11533                 _impResolveToken(CORINFO_TOKENKIND_Method);
11534
11535                 JITDUMP(" %08X", resolvedToken.token);
11536
11537                 /* The signature of the target has to be identical to ours.
11538                    At least check that argCnt and returnType match */
11539
11540                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11541                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11542                     sig.retType != info.compMethodInfo->args.retType ||
11543                     sig.callConv != info.compMethodInfo->args.callConv)
11544                 {
11545                     BADCODE("Incompatible target for CEE_JMPs");
11546                 }
11547
11548                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11549
11550                 /* Mark the basic block as being a JUMP instead of RETURN */
11551
11552                 block->bbFlags |= BBF_HAS_JMP;
11553
11554                 /* Set this flag to make sure register arguments have a location assigned
11555                  * even if we don't use them inside the method */
11556
11557                 compJmpOpUsed = true;
11558
11559                 fgNoStructPromotion = true;
11560
11561                 goto APPEND;
11562
11563             case CEE_LDELEMA:
11564                 assertImp(sz == sizeof(unsigned));
11565
11566                 _impResolveToken(CORINFO_TOKENKIND_Class);
11567
11568                 JITDUMP(" %08X", resolvedToken.token);
11569
11570                 ldelemClsHnd = resolvedToken.hClass;
11571
11572                 if (tiVerificationNeeded)
11573                 {
11574                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11575                     typeInfo tiIndex = impStackTop().seTypeInfo;
11576
11577                     // As per ECMA 'index' specified can be either int32 or native int.
11578                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11579
11580                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11581                     Verify(tiArray.IsNullObjRef() ||
11582                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11583                            "bad array");
11584
11585                     tiRetVal = arrayElemType;
11586                     tiRetVal.MakeByRef();
11587                     if (prefixFlags & PREFIX_READONLY)
11588                     {
11589                         tiRetVal.SetIsReadonlyByRef();
11590                     }
11591
11592                     // an array interior pointer is always in the heap
11593                     tiRetVal.SetIsPermanentHomeByRef();
11594                 }
11595
11596                 // If it's a value class array we just do a simple address-of
11597                 if (eeIsValueClass(ldelemClsHnd))
11598                 {
11599                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11600                     if (cit == CORINFO_TYPE_UNDEF)
11601                     {
11602                         lclTyp = TYP_STRUCT;
11603                     }
11604                     else
11605                     {
11606                         lclTyp = JITtype2varType(cit);
11607                     }
11608                     goto ARR_LD_POST_VERIFY;
11609                 }
11610
11611                 // Similarly, if its a readonly access, we can do a simple address-of
11612                 // without doing a runtime type-check
11613                 if (prefixFlags & PREFIX_READONLY)
11614                 {
11615                     lclTyp = TYP_REF;
11616                     goto ARR_LD_POST_VERIFY;
11617                 }
11618
11619                 // Otherwise we need the full helper function with run-time type check
11620                 op1 = impTokenToHandle(&resolvedToken);
11621                 if (op1 == nullptr)
11622                 { // compDonotInline()
11623                     return;
11624                 }
11625
11626                 args = gtNewArgList(op1);                      // Type
11627                 args = gtNewListNode(impPopStack().val, args); // index
11628                 args = gtNewListNode(impPopStack().val, args); // array
11629                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11630
11631                 impPushOnStack(op1, tiRetVal);
11632                 break;
11633
11634             // ldelem for reference and value types
11635             case CEE_LDELEM:
11636                 assertImp(sz == sizeof(unsigned));
11637
11638                 _impResolveToken(CORINFO_TOKENKIND_Class);
11639
11640                 JITDUMP(" %08X", resolvedToken.token);
11641
11642                 ldelemClsHnd = resolvedToken.hClass;
11643
11644                 if (tiVerificationNeeded)
11645                 {
11646                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11647                     typeInfo tiIndex = impStackTop().seTypeInfo;
11648
11649                     // As per ECMA 'index' specified can be either int32 or native int.
11650                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11651                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11652
11653                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11654                            "type of array incompatible with type operand");
11655                     tiRetVal.NormaliseForStack();
11656                 }
11657
11658                 // If it's a reference type or generic variable type
11659                 // then just generate code as though it's a ldelem.ref instruction
11660                 if (!eeIsValueClass(ldelemClsHnd))
11661                 {
11662                     lclTyp = TYP_REF;
11663                     opcode = CEE_LDELEM_REF;
11664                 }
11665                 else
11666                 {
11667                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11668                     lclTyp             = JITtype2varType(jitTyp);
11669                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11670                     tiRetVal.NormaliseForStack();
11671                 }
11672                 goto ARR_LD_POST_VERIFY;
11673
11674             case CEE_LDELEM_I1:
11675                 lclTyp = TYP_BYTE;
11676                 goto ARR_LD;
11677             case CEE_LDELEM_I2:
11678                 lclTyp = TYP_SHORT;
11679                 goto ARR_LD;
11680             case CEE_LDELEM_I:
11681                 lclTyp = TYP_I_IMPL;
11682                 goto ARR_LD;
11683
11684             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11685             // and treating it as TYP_INT avoids other asserts.
11686             case CEE_LDELEM_U4:
11687                 lclTyp = TYP_INT;
11688                 goto ARR_LD;
11689
11690             case CEE_LDELEM_I4:
11691                 lclTyp = TYP_INT;
11692                 goto ARR_LD;
11693             case CEE_LDELEM_I8:
11694                 lclTyp = TYP_LONG;
11695                 goto ARR_LD;
11696             case CEE_LDELEM_REF:
11697                 lclTyp = TYP_REF;
11698                 goto ARR_LD;
11699             case CEE_LDELEM_R4:
11700                 lclTyp = TYP_FLOAT;
11701                 goto ARR_LD;
11702             case CEE_LDELEM_R8:
11703                 lclTyp = TYP_DOUBLE;
11704                 goto ARR_LD;
11705             case CEE_LDELEM_U1:
11706                 lclTyp = TYP_UBYTE;
11707                 goto ARR_LD;
11708             case CEE_LDELEM_U2:
11709                 lclTyp = TYP_USHORT;
11710                 goto ARR_LD;
11711
11712             ARR_LD:
11713
11714                 if (tiVerificationNeeded)
11715                 {
11716                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11717                     typeInfo tiIndex = impStackTop().seTypeInfo;
11718
11719                     // As per ECMA 'index' specified can be either int32 or native int.
11720                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11721                     if (tiArray.IsNullObjRef())
11722                     {
11723                         if (lclTyp == TYP_REF)
11724                         { // we will say a deref of a null array yields a null ref
11725                             tiRetVal = typeInfo(TI_NULL);
11726                         }
11727                         else
11728                         {
11729                             tiRetVal = typeInfo(lclTyp);
11730                         }
11731                     }
11732                     else
11733                     {
11734                         tiRetVal             = verGetArrayElemType(tiArray);
11735                         typeInfo arrayElemTi = typeInfo(lclTyp);
11736 #ifdef _TARGET_64BIT_
11737                         if (opcode == CEE_LDELEM_I)
11738                         {
11739                             arrayElemTi = typeInfo::nativeInt();
11740                         }
11741
11742                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11743                         {
11744                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11745                         }
11746                         else
11747 #endif // _TARGET_64BIT_
11748                         {
11749                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11750                         }
11751                     }
11752                     tiRetVal.NormaliseForStack();
11753                 }
11754             ARR_LD_POST_VERIFY:
11755
11756                 /* Pull the index value and array address */
11757                 op2 = impPopStack().val;
11758                 op1 = impPopStack().val;
11759                 assertImp(op1->gtType == TYP_REF);
11760
11761                 /* Check for null pointer - in the inliner case we simply abort */
11762
11763                 if (compIsForInlining())
11764                 {
11765                     if (op1->gtOper == GT_CNS_INT)
11766                     {
11767                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11768                         return;
11769                     }
11770                 }
11771
11772                 op1 = impCheckForNullPointer(op1);
11773
11774                 /* Mark the block as containing an index expression */
11775
11776                 if (op1->gtOper == GT_LCL_VAR)
11777                 {
11778                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11779                     {
11780                         block->bbFlags |= BBF_HAS_IDX_LEN;
11781                         optMethodFlags |= OMF_HAS_ARRAYREF;
11782                     }
11783                 }
11784
11785                 /* Create the index node and push it on the stack */
11786
11787                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11788
11789                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11790
11791                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11792                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11793                 {
11794                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11795
11796                     // remember the element size
11797                     if (lclTyp == TYP_REF)
11798                     {
11799                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11800                     }
11801                     else
11802                     {
11803                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11804                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11805                         {
11806                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11807                         }
11808                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11809                         if (lclTyp == TYP_STRUCT)
11810                         {
11811                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11812                             op1->gtIndex.gtIndElemSize = size;
11813                             op1->gtType                = lclTyp;
11814                         }
11815                     }
11816
11817                     if ((opcode == CEE_LDELEMA) || ldstruct)
11818                     {
11819                         // wrap it in a &
11820                         lclTyp = TYP_BYREF;
11821
11822                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11823                     }
11824                     else
11825                     {
11826                         assert(lclTyp != TYP_STRUCT);
11827                     }
11828                 }
11829
11830                 if (ldstruct)
11831                 {
11832                     // Create an OBJ for the result
11833                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11834                     op1->gtFlags |= GTF_EXCEPT;
11835                 }
11836                 impPushOnStack(op1, tiRetVal);
11837                 break;
11838
11839             // stelem for reference and value types
11840             case CEE_STELEM:
11841
11842                 assertImp(sz == sizeof(unsigned));
11843
11844                 _impResolveToken(CORINFO_TOKENKIND_Class);
11845
11846                 JITDUMP(" %08X", resolvedToken.token);
11847
11848                 stelemClsHnd = resolvedToken.hClass;
11849
11850                 if (tiVerificationNeeded)
11851                 {
11852                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11853                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11854                     typeInfo tiValue = impStackTop().seTypeInfo;
11855
11856                     // As per ECMA 'index' specified can be either int32 or native int.
11857                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11858                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11859
11860                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11861                            "type operand incompatible with array element type");
11862                     arrayElem.NormaliseForStack();
11863                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11864                 }
11865
11866                 // If it's a reference type just behave as though it's a stelem.ref instruction
11867                 if (!eeIsValueClass(stelemClsHnd))
11868                 {
11869                     goto STELEM_REF_POST_VERIFY;
11870                 }
11871
11872                 // Otherwise extract the type
11873                 {
11874                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11875                     lclTyp             = JITtype2varType(jitTyp);
11876                     goto ARR_ST_POST_VERIFY;
11877                 }
11878
11879             case CEE_STELEM_REF:
11880
11881                 if (tiVerificationNeeded)
11882                 {
11883                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11884                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11885                     typeInfo tiValue = impStackTop().seTypeInfo;
11886
11887                     // As per ECMA 'index' specified can be either int32 or native int.
11888                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11889                     Verify(tiValue.IsObjRef(), "bad value");
11890
11891                     // we only check that it is an object referece, The helper does additional checks
11892                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11893                 }
11894
11895             STELEM_REF_POST_VERIFY:
11896
11897                 arrayNodeTo      = impStackTop(2).val;
11898                 arrayNodeToIndex = impStackTop(1).val;
11899                 arrayNodeFrom    = impStackTop().val;
11900
11901                 //
11902                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11903                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11904                 //
11905
11906                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11907                 // This does not need CORINFO_HELP_ARRADDR_ST
11908                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11909                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11910                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11911                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11912                 {
11913                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11914                     lclTyp = TYP_REF;
11915                     goto ARR_ST_POST_VERIFY;
11916                 }
11917
11918                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11919                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11920                 {
11921                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11922                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11923                     lclTyp = TYP_REF;
11924                     goto ARR_ST_POST_VERIFY;
11925                 }
11926
11927                 /* Call a helper function to do the assignment */
11928                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11929
11930                 goto SPILL_APPEND;
11931
11932             case CEE_STELEM_I1:
11933                 lclTyp = TYP_BYTE;
11934                 goto ARR_ST;
11935             case CEE_STELEM_I2:
11936                 lclTyp = TYP_SHORT;
11937                 goto ARR_ST;
11938             case CEE_STELEM_I:
11939                 lclTyp = TYP_I_IMPL;
11940                 goto ARR_ST;
11941             case CEE_STELEM_I4:
11942                 lclTyp = TYP_INT;
11943                 goto ARR_ST;
11944             case CEE_STELEM_I8:
11945                 lclTyp = TYP_LONG;
11946                 goto ARR_ST;
11947             case CEE_STELEM_R4:
11948                 lclTyp = TYP_FLOAT;
11949                 goto ARR_ST;
11950             case CEE_STELEM_R8:
11951                 lclTyp = TYP_DOUBLE;
11952                 goto ARR_ST;
11953
11954             ARR_ST:
11955
11956                 if (tiVerificationNeeded)
11957                 {
11958                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11959                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11960                     typeInfo tiValue = impStackTop().seTypeInfo;
11961
11962                     // As per ECMA 'index' specified can be either int32 or native int.
11963                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11964                     typeInfo arrayElem = typeInfo(lclTyp);
11965 #ifdef _TARGET_64BIT_
11966                     if (opcode == CEE_STELEM_I)
11967                     {
11968                         arrayElem = typeInfo::nativeInt();
11969                     }
11970 #endif // _TARGET_64BIT_
11971                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11972                            "bad array");
11973
11974                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11975                            "bad value");
11976                 }
11977
11978             ARR_ST_POST_VERIFY:
11979                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11980                    range-check, and then assignment. However, codegen currently
11981                    does the range-check before evaluation the RHS-operands. So to
11982                    maintain strict ordering, we spill the stack. */
11983
11984                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11985                 {
11986                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11987                                                    "Strict ordering of exceptions for Array store"));
11988                 }
11989
11990                 /* Pull the new value from the stack */
11991                 op2 = impPopStack().val;
11992
11993                 /* Pull the index value */
11994                 op1 = impPopStack().val;
11995
11996                 /* Pull the array address */
11997                 op3 = impPopStack().val;
11998
11999                 assertImp(op3->gtType == TYP_REF);
12000                 if (op2->IsVarAddr())
12001                 {
12002                     op2->gtType = TYP_I_IMPL;
12003                 }
12004
12005                 op3 = impCheckForNullPointer(op3);
12006
12007                 // Mark the block as containing an index expression
12008
12009                 if (op3->gtOper == GT_LCL_VAR)
12010                 {
12011                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
12012                     {
12013                         block->bbFlags |= BBF_HAS_IDX_LEN;
12014                         optMethodFlags |= OMF_HAS_ARRAYREF;
12015                     }
12016                 }
12017
12018                 /* Create the index node */
12019
12020                 op1 = gtNewIndexRef(lclTyp, op3, op1);
12021
12022                 /* Create the assignment node and append it */
12023
12024                 if (lclTyp == TYP_STRUCT)
12025                 {
12026                     assert(stelemClsHnd != DUMMY_INIT(NULL));
12027
12028                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
12029                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
12030                 }
12031                 if (varTypeIsStruct(op1))
12032                 {
12033                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
12034                 }
12035                 else
12036                 {
12037                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
12038                     op1 = gtNewAssignNode(op1, op2);
12039                 }
12040
12041                 /* Mark the expression as containing an assignment */
12042
12043                 op1->gtFlags |= GTF_ASG;
12044
12045                 goto SPILL_APPEND;
12046
12047             case CEE_ADD:
12048                 oper = GT_ADD;
12049                 goto MATH_OP2;
12050
12051             case CEE_ADD_OVF:
12052                 uns = false;
12053                 goto ADD_OVF;
12054             case CEE_ADD_OVF_UN:
12055                 uns = true;
12056                 goto ADD_OVF;
12057
12058             ADD_OVF:
12059                 ovfl     = true;
12060                 callNode = false;
12061                 oper     = GT_ADD;
12062                 goto MATH_OP2_FLAGS;
12063
12064             case CEE_SUB:
12065                 oper = GT_SUB;
12066                 goto MATH_OP2;
12067
12068             case CEE_SUB_OVF:
12069                 uns = false;
12070                 goto SUB_OVF;
12071             case CEE_SUB_OVF_UN:
12072                 uns = true;
12073                 goto SUB_OVF;
12074
12075             SUB_OVF:
12076                 ovfl     = true;
12077                 callNode = false;
12078                 oper     = GT_SUB;
12079                 goto MATH_OP2_FLAGS;
12080
12081             case CEE_MUL:
12082                 oper = GT_MUL;
12083                 goto MATH_MAYBE_CALL_NO_OVF;
12084
12085             case CEE_MUL_OVF:
12086                 uns = false;
12087                 goto MUL_OVF;
12088             case CEE_MUL_OVF_UN:
12089                 uns = true;
12090                 goto MUL_OVF;
12091
12092             MUL_OVF:
12093                 ovfl = true;
12094                 oper = GT_MUL;
12095                 goto MATH_MAYBE_CALL_OVF;
12096
12097             // Other binary math operations
12098
12099             case CEE_DIV:
12100                 oper = GT_DIV;
12101                 goto MATH_MAYBE_CALL_NO_OVF;
12102
12103             case CEE_DIV_UN:
12104                 oper = GT_UDIV;
12105                 goto MATH_MAYBE_CALL_NO_OVF;
12106
12107             case CEE_REM:
12108                 oper = GT_MOD;
12109                 goto MATH_MAYBE_CALL_NO_OVF;
12110
12111             case CEE_REM_UN:
12112                 oper = GT_UMOD;
12113                 goto MATH_MAYBE_CALL_NO_OVF;
12114
12115             MATH_MAYBE_CALL_NO_OVF:
12116                 ovfl = false;
12117             MATH_MAYBE_CALL_OVF:
12118                 // Morpher has some complex logic about when to turn different
12119                 // typed nodes on different platforms into helper calls. We
12120                 // need to either duplicate that logic here, or just
12121                 // pessimistically make all the nodes large enough to become
12122                 // call nodes.  Since call nodes aren't that much larger and
12123                 // these opcodes are infrequent enough I chose the latter.
12124                 callNode = true;
12125                 goto MATH_OP2_FLAGS;
12126
12127             case CEE_AND:
12128                 oper = GT_AND;
12129                 goto MATH_OP2;
12130             case CEE_OR:
12131                 oper = GT_OR;
12132                 goto MATH_OP2;
12133             case CEE_XOR:
12134                 oper = GT_XOR;
12135                 goto MATH_OP2;
12136
12137             MATH_OP2: // For default values of 'ovfl' and 'callNode'
12138
12139                 ovfl     = false;
12140                 callNode = false;
12141
12142             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
12143
12144                 /* Pull two values and push back the result */
12145
12146                 if (tiVerificationNeeded)
12147                 {
12148                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
12149                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
12150
12151                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
12152                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
12153                     {
12154                         Verify(tiOp1.IsNumberType(), "not number");
12155                     }
12156                     else
12157                     {
12158                         Verify(tiOp1.IsIntegerType(), "not integer");
12159                     }
12160
12161                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
12162
12163                     tiRetVal = tiOp1;
12164
12165 #ifdef _TARGET_64BIT_
12166                     if (tiOp2.IsNativeIntType())
12167                     {
12168                         tiRetVal = tiOp2;
12169                     }
12170 #endif // _TARGET_64BIT_
12171                 }
12172
12173                 op2 = impPopStack().val;
12174                 op1 = impPopStack().val;
12175
12176 #if !CPU_HAS_FP_SUPPORT
12177                 if (varTypeIsFloating(op1->gtType))
12178                 {
12179                     callNode = true;
12180                 }
12181 #endif
12182                 /* Can't do arithmetic with references */
12183                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
12184
12185                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
12186                 // if it is in the stack)
12187                 impBashVarAddrsToI(op1, op2);
12188
12189                 type = impGetByRefResultType(oper, uns, &op1, &op2);
12190
12191                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
12192
12193                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
12194
12195                 if (op2->gtOper == GT_CNS_INT)
12196                 {
12197                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
12198                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
12199
12200                     {
12201                         impPushOnStack(op1, tiRetVal);
12202                         break;
12203                     }
12204                 }
12205
12206                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
12207                 //
12208                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
12209                 {
12210                     if (op1->TypeGet() != type)
12211                     {
12212                         // We insert a cast of op1 to 'type'
12213                         op1 = gtNewCastNode(type, op1, false, type);
12214                     }
12215                     if (op2->TypeGet() != type)
12216                     {
12217                         // We insert a cast of op2 to 'type'
12218                         op2 = gtNewCastNode(type, op2, false, type);
12219                     }
12220                 }
12221
12222 #if SMALL_TREE_NODES
12223                 if (callNode)
12224                 {
12225                     /* These operators can later be transformed into 'GT_CALL' */
12226
12227                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
12228 #ifndef _TARGET_ARM_
12229                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
12230                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
12231                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
12232                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
12233 #endif
12234                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
12235                     // that we'll need to transform into a general large node, but rather specifically
12236                     // to a call: by doing it this way, things keep working if there are multiple sizes,
12237                     // and a CALL is no longer the largest.
12238                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
12239                     // than an "if".
12240                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
12241                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
12242                 }
12243                 else
12244 #endif // SMALL_TREE_NODES
12245                 {
12246                     op1 = gtNewOperNode(oper, type, op1, op2);
12247                 }
12248
12249                 /* Special case: integer/long division may throw an exception */
12250
12251                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
12252                 {
12253                     op1->gtFlags |= GTF_EXCEPT;
12254                 }
12255
12256                 if (ovfl)
12257                 {
12258                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
12259                     if (ovflType != TYP_UNKNOWN)
12260                     {
12261                         op1->gtType = ovflType;
12262                     }
12263                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
12264                     if (uns)
12265                     {
12266                         op1->gtFlags |= GTF_UNSIGNED;
12267                     }
12268                 }
12269
12270                 impPushOnStack(op1, tiRetVal);
12271                 break;
12272
12273             case CEE_SHL:
12274                 oper = GT_LSH;
12275                 goto CEE_SH_OP2;
12276
12277             case CEE_SHR:
12278                 oper = GT_RSH;
12279                 goto CEE_SH_OP2;
12280             case CEE_SHR_UN:
12281                 oper = GT_RSZ;
12282                 goto CEE_SH_OP2;
12283
12284             CEE_SH_OP2:
12285                 if (tiVerificationNeeded)
12286                 {
12287                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
12288                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
12289                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
12290                     tiRetVal = tiVal;
12291                 }
12292                 op2 = impPopStack().val;
12293                 op1 = impPopStack().val; // operand to be shifted
12294                 impBashVarAddrsToI(op1, op2);
12295
12296                 type = genActualType(op1->TypeGet());
12297                 op1  = gtNewOperNode(oper, type, op1, op2);
12298
12299                 impPushOnStack(op1, tiRetVal);
12300                 break;
12301
12302             case CEE_NOT:
12303                 if (tiVerificationNeeded)
12304                 {
12305                     tiRetVal = impStackTop().seTypeInfo;
12306                     Verify(tiRetVal.IsIntegerType(), "bad int value");
12307                 }
12308
12309                 op1 = impPopStack().val;
12310                 impBashVarAddrsToI(op1, nullptr);
12311                 type = genActualType(op1->TypeGet());
12312                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
12313                 break;
12314
12315             case CEE_CKFINITE:
12316                 if (tiVerificationNeeded)
12317                 {
12318                     tiRetVal = impStackTop().seTypeInfo;
12319                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
12320                 }
12321                 op1  = impPopStack().val;
12322                 type = op1->TypeGet();
12323                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
12324                 op1->gtFlags |= GTF_EXCEPT;
12325
12326                 impPushOnStack(op1, tiRetVal);
12327                 break;
12328
12329             case CEE_LEAVE:
12330
12331                 val     = getI4LittleEndian(codeAddr); // jump distance
12332                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
12333                 goto LEAVE;
12334
12335             case CEE_LEAVE_S:
12336                 val     = getI1LittleEndian(codeAddr); // jump distance
12337                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
12338
12339             LEAVE:
12340
12341                 if (compIsForInlining())
12342                 {
12343                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
12344                     return;
12345                 }
12346
12347                 JITDUMP(" %04X", jmpAddr);
12348                 if (block->bbJumpKind != BBJ_LEAVE)
12349                 {
12350                     impResetLeaveBlock(block, jmpAddr);
12351                 }
12352
12353                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
12354                 impImportLeave(block);
12355                 impNoteBranchOffs();
12356
12357                 break;
12358
12359             case CEE_BR:
12360             case CEE_BR_S:
12361                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
12362
12363                 if (compIsForInlining() && jmpDist == 0)
12364                 {
12365                     break; /* NOP */
12366                 }
12367
12368                 impNoteBranchOffs();
12369                 break;
12370
12371             case CEE_BRTRUE:
12372             case CEE_BRTRUE_S:
12373             case CEE_BRFALSE:
12374             case CEE_BRFALSE_S:
12375
12376                 /* Pop the comparand (now there's a neat term) from the stack */
12377                 if (tiVerificationNeeded)
12378                 {
12379                     typeInfo& tiVal = impStackTop().seTypeInfo;
12380                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
12381                            "bad value");
12382                 }
12383
12384                 op1  = impPopStack().val;
12385                 type = op1->TypeGet();
12386
12387                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
12388                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12389                 {
12390                     block->bbJumpKind = BBJ_NONE;
12391
12392                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12393                     {
12394                         op1 = gtUnusedValNode(op1);
12395                         goto SPILL_APPEND;
12396                     }
12397                     else
12398                     {
12399                         break;
12400                     }
12401                 }
12402
12403                 if (op1->OperIsCompare())
12404                 {
12405                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
12406                     {
12407                         // Flip the sense of the compare
12408
12409                         op1 = gtReverseCond(op1);
12410                     }
12411                 }
12412                 else
12413                 {
12414                     /* We'll compare against an equally-sized integer 0 */
12415                     /* For small types, we always compare against int   */
12416                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
12417
12418                     /* Create the comparison operator and try to fold it */
12419
12420                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
12421                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
12422                 }
12423
12424             // fall through
12425
12426             COND_JUMP:
12427
12428                 /* Fold comparison if we can */
12429
12430                 op1 = gtFoldExpr(op1);
12431
12432                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
12433                 /* Don't make any blocks unreachable in import only mode */
12434
12435                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
12436                 {
12437                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
12438                        unreachable under compDbgCode */
12439                     assert(!opts.compDbgCode);
12440
12441                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
12442                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
12443                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
12444                                                                          // block for the second time
12445
12446                     block->bbJumpKind = foldedJumpKind;
12447 #ifdef DEBUG
12448                     if (verbose)
12449                     {
12450                         if (op1->gtIntCon.gtIconVal)
12451                         {
12452                             printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
12453                                    block->bbJumpDest->bbNum);
12454                         }
12455                         else
12456                         {
12457                             printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
12458                         }
12459                     }
12460 #endif
12461                     break;
12462                 }
12463
12464                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
12465
12466                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12467                    in impImportBlock(block). For correct line numbers, spill stack. */
12468
12469                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12470                 {
12471                     impSpillStackEnsure(true);
12472                 }
12473
12474                 goto SPILL_APPEND;
12475
12476             case CEE_CEQ:
12477                 oper = GT_EQ;
12478                 uns  = false;
12479                 goto CMP_2_OPs;
12480             case CEE_CGT_UN:
12481                 oper = GT_GT;
12482                 uns  = true;
12483                 goto CMP_2_OPs;
12484             case CEE_CGT:
12485                 oper = GT_GT;
12486                 uns  = false;
12487                 goto CMP_2_OPs;
12488             case CEE_CLT_UN:
12489                 oper = GT_LT;
12490                 uns  = true;
12491                 goto CMP_2_OPs;
12492             case CEE_CLT:
12493                 oper = GT_LT;
12494                 uns  = false;
12495                 goto CMP_2_OPs;
12496
12497             CMP_2_OPs:
12498                 if (tiVerificationNeeded)
12499                 {
12500                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12501                     tiRetVal = typeInfo(TI_INT);
12502                 }
12503
12504                 op2 = impPopStack().val;
12505                 op1 = impPopStack().val;
12506
12507 #ifdef _TARGET_64BIT_
12508                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12509                 {
12510                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12511                 }
12512                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12513                 {
12514                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12515                 }
12516 #endif // _TARGET_64BIT_
12517
12518                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12519                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12520                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12521
12522                 /* Create the comparison node */
12523
12524                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12525
12526                 /* TODO: setting both flags when only one is appropriate */
12527                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12528                 {
12529                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12530                 }
12531
12532                 // Fold result, if possible.
12533                 op1 = gtFoldExpr(op1);
12534
12535                 impPushOnStack(op1, tiRetVal);
12536                 break;
12537
12538             case CEE_BEQ_S:
12539             case CEE_BEQ:
12540                 oper = GT_EQ;
12541                 goto CMP_2_OPs_AND_BR;
12542
12543             case CEE_BGE_S:
12544             case CEE_BGE:
12545                 oper = GT_GE;
12546                 goto CMP_2_OPs_AND_BR;
12547
12548             case CEE_BGE_UN_S:
12549             case CEE_BGE_UN:
12550                 oper = GT_GE;
12551                 goto CMP_2_OPs_AND_BR_UN;
12552
12553             case CEE_BGT_S:
12554             case CEE_BGT:
12555                 oper = GT_GT;
12556                 goto CMP_2_OPs_AND_BR;
12557
12558             case CEE_BGT_UN_S:
12559             case CEE_BGT_UN:
12560                 oper = GT_GT;
12561                 goto CMP_2_OPs_AND_BR_UN;
12562
12563             case CEE_BLE_S:
12564             case CEE_BLE:
12565                 oper = GT_LE;
12566                 goto CMP_2_OPs_AND_BR;
12567
12568             case CEE_BLE_UN_S:
12569             case CEE_BLE_UN:
12570                 oper = GT_LE;
12571                 goto CMP_2_OPs_AND_BR_UN;
12572
12573             case CEE_BLT_S:
12574             case CEE_BLT:
12575                 oper = GT_LT;
12576                 goto CMP_2_OPs_AND_BR;
12577
12578             case CEE_BLT_UN_S:
12579             case CEE_BLT_UN:
12580                 oper = GT_LT;
12581                 goto CMP_2_OPs_AND_BR_UN;
12582
12583             case CEE_BNE_UN_S:
12584             case CEE_BNE_UN:
12585                 oper = GT_NE;
12586                 goto CMP_2_OPs_AND_BR_UN;
12587
12588             CMP_2_OPs_AND_BR_UN:
12589                 uns       = true;
12590                 unordered = true;
12591                 goto CMP_2_OPs_AND_BR_ALL;
12592             CMP_2_OPs_AND_BR:
12593                 uns       = false;
12594                 unordered = false;
12595                 goto CMP_2_OPs_AND_BR_ALL;
12596             CMP_2_OPs_AND_BR_ALL:
12597
12598                 if (tiVerificationNeeded)
12599                 {
12600                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12601                 }
12602
12603                 /* Pull two values */
12604                 op2 = impPopStack().val;
12605                 op1 = impPopStack().val;
12606
12607 #ifdef _TARGET_64BIT_
12608                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12609                 {
12610                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12611                 }
12612                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12613                 {
12614                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12615                 }
12616 #endif // _TARGET_64BIT_
12617
12618                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12619                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12620                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12621
12622                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12623                 {
12624                     block->bbJumpKind = BBJ_NONE;
12625
12626                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12627                     {
12628                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12629                                                        "Branch to next Optimization, op1 side effect"));
12630                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12631                     }
12632                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12633                     {
12634                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12635                                                        "Branch to next Optimization, op2 side effect"));
12636                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12637                     }
12638
12639 #ifdef DEBUG
12640                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12641                     {
12642                         impNoteLastILoffs();
12643                     }
12644 #endif
12645                     break;
12646                 }
12647
12648                 // We can generate an compare of different sized floating point op1 and op2
12649                 // We insert a cast
12650                 //
12651                 if (varTypeIsFloating(op1->TypeGet()))
12652                 {
12653                     if (op1->TypeGet() != op2->TypeGet())
12654                     {
12655                         assert(varTypeIsFloating(op2->TypeGet()));
12656
12657                         // say op1=double, op2=float. To avoid loss of precision
12658                         // while comparing, op2 is converted to double and double
12659                         // comparison is done.
12660                         if (op1->TypeGet() == TYP_DOUBLE)
12661                         {
12662                             // We insert a cast of op2 to TYP_DOUBLE
12663                             op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12664                         }
12665                         else if (op2->TypeGet() == TYP_DOUBLE)
12666                         {
12667                             // We insert a cast of op1 to TYP_DOUBLE
12668                             op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12669                         }
12670                     }
12671                 }
12672
12673                 /* Create and append the operator */
12674
12675                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12676
12677                 if (uns)
12678                 {
12679                     op1->gtFlags |= GTF_UNSIGNED;
12680                 }
12681
12682                 if (unordered)
12683                 {
12684                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12685                 }
12686
12687                 goto COND_JUMP;
12688
12689             case CEE_SWITCH:
12690                 assert(!compIsForInlining());
12691
12692                 if (tiVerificationNeeded)
12693                 {
12694                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12695                 }
12696                 /* Pop the switch value off the stack */
12697                 op1 = impPopStack().val;
12698                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12699
12700                 /* We can create a switch node */
12701
12702                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12703
12704                 val = (int)getU4LittleEndian(codeAddr);
12705                 codeAddr += 4 + val * 4; // skip over the switch-table
12706
12707                 goto SPILL_APPEND;
12708
12709             /************************** Casting OPCODES ***************************/
12710
12711             case CEE_CONV_OVF_I1:
12712                 lclTyp = TYP_BYTE;
12713                 goto CONV_OVF;
12714             case CEE_CONV_OVF_I2:
12715                 lclTyp = TYP_SHORT;
12716                 goto CONV_OVF;
12717             case CEE_CONV_OVF_I:
12718                 lclTyp = TYP_I_IMPL;
12719                 goto CONV_OVF;
12720             case CEE_CONV_OVF_I4:
12721                 lclTyp = TYP_INT;
12722                 goto CONV_OVF;
12723             case CEE_CONV_OVF_I8:
12724                 lclTyp = TYP_LONG;
12725                 goto CONV_OVF;
12726
12727             case CEE_CONV_OVF_U1:
12728                 lclTyp = TYP_UBYTE;
12729                 goto CONV_OVF;
12730             case CEE_CONV_OVF_U2:
12731                 lclTyp = TYP_USHORT;
12732                 goto CONV_OVF;
12733             case CEE_CONV_OVF_U:
12734                 lclTyp = TYP_U_IMPL;
12735                 goto CONV_OVF;
12736             case CEE_CONV_OVF_U4:
12737                 lclTyp = TYP_UINT;
12738                 goto CONV_OVF;
12739             case CEE_CONV_OVF_U8:
12740                 lclTyp = TYP_ULONG;
12741                 goto CONV_OVF;
12742
12743             case CEE_CONV_OVF_I1_UN:
12744                 lclTyp = TYP_BYTE;
12745                 goto CONV_OVF_UN;
12746             case CEE_CONV_OVF_I2_UN:
12747                 lclTyp = TYP_SHORT;
12748                 goto CONV_OVF_UN;
12749             case CEE_CONV_OVF_I_UN:
12750                 lclTyp = TYP_I_IMPL;
12751                 goto CONV_OVF_UN;
12752             case CEE_CONV_OVF_I4_UN:
12753                 lclTyp = TYP_INT;
12754                 goto CONV_OVF_UN;
12755             case CEE_CONV_OVF_I8_UN:
12756                 lclTyp = TYP_LONG;
12757                 goto CONV_OVF_UN;
12758
12759             case CEE_CONV_OVF_U1_UN:
12760                 lclTyp = TYP_UBYTE;
12761                 goto CONV_OVF_UN;
12762             case CEE_CONV_OVF_U2_UN:
12763                 lclTyp = TYP_USHORT;
12764                 goto CONV_OVF_UN;
12765             case CEE_CONV_OVF_U_UN:
12766                 lclTyp = TYP_U_IMPL;
12767                 goto CONV_OVF_UN;
12768             case CEE_CONV_OVF_U4_UN:
12769                 lclTyp = TYP_UINT;
12770                 goto CONV_OVF_UN;
12771             case CEE_CONV_OVF_U8_UN:
12772                 lclTyp = TYP_ULONG;
12773                 goto CONV_OVF_UN;
12774
12775             CONV_OVF_UN:
12776                 uns = true;
12777                 goto CONV_OVF_COMMON;
12778             CONV_OVF:
12779                 uns = false;
12780                 goto CONV_OVF_COMMON;
12781
12782             CONV_OVF_COMMON:
12783                 ovfl = true;
12784                 goto _CONV;
12785
12786             case CEE_CONV_I1:
12787                 lclTyp = TYP_BYTE;
12788                 goto CONV;
12789             case CEE_CONV_I2:
12790                 lclTyp = TYP_SHORT;
12791                 goto CONV;
12792             case CEE_CONV_I:
12793                 lclTyp = TYP_I_IMPL;
12794                 goto CONV;
12795             case CEE_CONV_I4:
12796                 lclTyp = TYP_INT;
12797                 goto CONV;
12798             case CEE_CONV_I8:
12799                 lclTyp = TYP_LONG;
12800                 goto CONV;
12801
12802             case CEE_CONV_U1:
12803                 lclTyp = TYP_UBYTE;
12804                 goto CONV;
12805             case CEE_CONV_U2:
12806                 lclTyp = TYP_USHORT;
12807                 goto CONV;
12808 #if (REGSIZE_BYTES == 8)
12809             case CEE_CONV_U:
12810                 lclTyp = TYP_U_IMPL;
12811                 goto CONV_UN;
12812 #else
12813             case CEE_CONV_U:
12814                 lclTyp = TYP_U_IMPL;
12815                 goto CONV;
12816 #endif
12817             case CEE_CONV_U4:
12818                 lclTyp = TYP_UINT;
12819                 goto CONV;
12820             case CEE_CONV_U8:
12821                 lclTyp = TYP_ULONG;
12822                 goto CONV_UN;
12823
12824             case CEE_CONV_R4:
12825                 lclTyp = TYP_FLOAT;
12826                 goto CONV;
12827             case CEE_CONV_R8:
12828                 lclTyp = TYP_DOUBLE;
12829                 goto CONV;
12830
12831             case CEE_CONV_R_UN:
12832                 lclTyp = TYP_DOUBLE;
12833                 goto CONV_UN;
12834
12835             CONV_UN:
12836                 uns  = true;
12837                 ovfl = false;
12838                 goto _CONV;
12839
12840             CONV:
12841                 uns  = false;
12842                 ovfl = false;
12843                 goto _CONV;
12844
12845             _CONV:
12846                 // just check that we have a number on the stack
12847                 if (tiVerificationNeeded)
12848                 {
12849                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12850                     Verify(tiVal.IsNumberType(), "bad arg");
12851
12852 #ifdef _TARGET_64BIT_
12853                     bool isNative = false;
12854
12855                     switch (opcode)
12856                     {
12857                         case CEE_CONV_OVF_I:
12858                         case CEE_CONV_OVF_I_UN:
12859                         case CEE_CONV_I:
12860                         case CEE_CONV_OVF_U:
12861                         case CEE_CONV_OVF_U_UN:
12862                         case CEE_CONV_U:
12863                             isNative = true;
12864                         default:
12865                             // leave 'isNative' = false;
12866                             break;
12867                     }
12868                     if (isNative)
12869                     {
12870                         tiRetVal = typeInfo::nativeInt();
12871                     }
12872                     else
12873 #endif // _TARGET_64BIT_
12874                     {
12875                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12876                     }
12877                 }
12878
12879                 // only converts from FLOAT or DOUBLE to an integer type
12880                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12881
12882                 if (varTypeIsFloating(lclTyp))
12883                 {
12884                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12885 #ifdef _TARGET_64BIT_
12886                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12887                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12888                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12889                                // and generate SSE2 code instead of going through helper calls.
12890                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12891 #endif
12892                         ;
12893                 }
12894                 else
12895                 {
12896                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12897                 }
12898
12899                 // At this point uns, ovf, callNode all set
12900
12901                 op1 = impPopStack().val;
12902                 impBashVarAddrsToI(op1);
12903
12904                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12905                 {
12906                     op2 = op1->gtOp.gtOp2;
12907
12908                     if (op2->gtOper == GT_CNS_INT)
12909                     {
12910                         ssize_t ival = op2->gtIntCon.gtIconVal;
12911                         ssize_t mask, umask;
12912
12913                         switch (lclTyp)
12914                         {
12915                             case TYP_BYTE:
12916                             case TYP_UBYTE:
12917                                 mask  = 0x00FF;
12918                                 umask = 0x007F;
12919                                 break;
12920                             case TYP_USHORT:
12921                             case TYP_SHORT:
12922                                 mask  = 0xFFFF;
12923                                 umask = 0x7FFF;
12924                                 break;
12925
12926                             default:
12927                                 assert(!"unexpected type");
12928                                 return;
12929                         }
12930
12931                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12932                         {
12933                             /* Toss the cast, it's a waste of time */
12934
12935                             impPushOnStack(op1, tiRetVal);
12936                             break;
12937                         }
12938                         else if (ival == mask)
12939                         {
12940                             /* Toss the masking, it's a waste of time, since
12941                                we sign-extend from the small value anyways */
12942
12943                             op1 = op1->gtOp.gtOp1;
12944                         }
12945                     }
12946                 }
12947
12948                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12949                     since the result of a cast to one of the 'small' integer
12950                     types is an integer.
12951                  */
12952
12953                 type = genActualType(lclTyp);
12954
12955                 // If this is a no-op cast, just use op1.
12956                 if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
12957                 {
12958                     // Nothing needs to change
12959                 }
12960                 // Work is evidently required, add cast node
12961                 else
12962                 {
12963 #if SMALL_TREE_NODES
12964                     if (callNode)
12965                     {
12966                         op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12967                     }
12968                     else
12969 #endif // SMALL_TREE_NODES
12970                     {
12971                         op1 = gtNewCastNode(type, op1, uns, lclTyp);
12972                     }
12973
12974                     if (ovfl)
12975                     {
12976                         op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12977                     }
12978                 }
12979
12980                 impPushOnStack(op1, tiRetVal);
12981                 break;
12982
12983             case CEE_NEG:
12984                 if (tiVerificationNeeded)
12985                 {
12986                     tiRetVal = impStackTop().seTypeInfo;
12987                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12988                 }
12989
12990                 op1 = impPopStack().val;
12991                 impBashVarAddrsToI(op1, nullptr);
12992                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12993                 break;
12994
12995             case CEE_POP:
12996             {
12997                 /* Pull the top value from the stack */
12998
12999                 StackEntry se = impPopStack();
13000                 clsHnd        = se.seTypeInfo.GetClassHandle();
13001                 op1           = se.val;
13002
13003                 /* Get hold of the type of the value being duplicated */
13004
13005                 lclTyp = genActualType(op1->gtType);
13006
13007                 /* Does the value have any side effects? */
13008
13009                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
13010                 {
13011                     // Since we are throwing away the value, just normalize
13012                     // it to its address.  This is more efficient.
13013
13014                     if (varTypeIsStruct(op1))
13015                     {
13016                         JITDUMP("\n ... CEE_POP struct ...\n");
13017                         DISPTREE(op1);
13018 #ifdef UNIX_AMD64_ABI
13019                         // Non-calls, such as obj or ret_expr, have to go through this.
13020                         // Calls with large struct return value have to go through this.
13021                         // Helper calls with small struct return value also have to go
13022                         // through this since they do not follow Unix calling convention.
13023                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
13024                             op1->AsCall()->gtCallType == CT_HELPER)
13025 #endif // UNIX_AMD64_ABI
13026                         {
13027                             // If the value being produced comes from loading
13028                             // via an underlying address, just null check the address.
13029                             if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
13030                             {
13031                                 op1->ChangeOper(GT_NULLCHECK);
13032                                 op1->gtType = TYP_BYTE;
13033                             }
13034                             else
13035                             {
13036                                 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
13037                             }
13038
13039                             JITDUMP("\n ... optimized to ...\n");
13040                             DISPTREE(op1);
13041                         }
13042                     }
13043
13044                     // If op1 is non-overflow cast, throw it away since it is useless.
13045                     // Another reason for throwing away the useless cast is in the context of
13046                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
13047                     // The cast gets added as part of importing GT_CALL, which gets in the way
13048                     // of fgMorphCall() on the forms of tail call nodes that we assert.
13049                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
13050                     {
13051                         op1 = op1->gtOp.gtOp1;
13052                     }
13053
13054                     // If 'op1' is an expression, create an assignment node.
13055                     // Helps analyses (like CSE) to work fine.
13056
13057                     if (op1->gtOper != GT_CALL)
13058                     {
13059                         op1 = gtUnusedValNode(op1);
13060                     }
13061
13062                     /* Append the value to the tree list */
13063                     goto SPILL_APPEND;
13064                 }
13065
13066                 /* No side effects - just throw the <BEEP> thing away */
13067             }
13068             break;
13069
13070             case CEE_DUP:
13071             {
13072                 if (tiVerificationNeeded)
13073                 {
13074                     // Dup could start the begining of delegate creation sequence, remember that
13075                     delegateCreateStart = codeAddr - 1;
13076                     impStackTop(0);
13077                 }
13078
13079                 // If the expression to dup is simple, just clone it.
13080                 // Otherwise spill it to a temp, and reload the temp
13081                 // twice.
13082                 StackEntry se   = impPopStack();
13083                 GenTree*   tree = se.val;
13084                 tiRetVal        = se.seTypeInfo;
13085                 op1             = tree;
13086
13087                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
13088                 {
13089                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
13090                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
13091                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
13092                     op1            = gtNewLclvNode(tmpNum, type);
13093
13094                     // Propagate type info to the temp from the stack and the original tree
13095                     if (type == TYP_REF)
13096                     {
13097                         assert(lvaTable[tmpNum].lvSingleDef == 0);
13098                         lvaTable[tmpNum].lvSingleDef = 1;
13099                         JITDUMP("Marked V%02u as a single def local\n", tmpNum);
13100                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
13101                     }
13102                 }
13103
13104                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
13105                                    nullptr DEBUGARG("DUP instruction"));
13106
13107                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
13108                 impPushOnStack(op1, tiRetVal);
13109                 impPushOnStack(op2, tiRetVal);
13110             }
13111             break;
13112
13113             case CEE_STIND_I1:
13114                 lclTyp = TYP_BYTE;
13115                 goto STIND;
13116             case CEE_STIND_I2:
13117                 lclTyp = TYP_SHORT;
13118                 goto STIND;
13119             case CEE_STIND_I4:
13120                 lclTyp = TYP_INT;
13121                 goto STIND;
13122             case CEE_STIND_I8:
13123                 lclTyp = TYP_LONG;
13124                 goto STIND;
13125             case CEE_STIND_I:
13126                 lclTyp = TYP_I_IMPL;
13127                 goto STIND;
13128             case CEE_STIND_REF:
13129                 lclTyp = TYP_REF;
13130                 goto STIND;
13131             case CEE_STIND_R4:
13132                 lclTyp = TYP_FLOAT;
13133                 goto STIND;
13134             case CEE_STIND_R8:
13135                 lclTyp = TYP_DOUBLE;
13136                 goto STIND;
13137             STIND:
13138
13139                 if (tiVerificationNeeded)
13140                 {
13141                     typeInfo instrType(lclTyp);
13142 #ifdef _TARGET_64BIT_
13143                     if (opcode == CEE_STIND_I)
13144                     {
13145                         instrType = typeInfo::nativeInt();
13146                     }
13147 #endif // _TARGET_64BIT_
13148                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
13149                 }
13150                 else
13151                 {
13152                     compUnsafeCastUsed = true; // Have to go conservative
13153                 }
13154
13155             STIND_POST_VERIFY:
13156
13157                 op2 = impPopStack().val; // value to store
13158                 op1 = impPopStack().val; // address to store to
13159
13160                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
13161                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13162
13163                 impBashVarAddrsToI(op1, op2);
13164
13165                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
13166
13167 #ifdef _TARGET_64BIT_
13168                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13169                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13170                 {
13171                     op2->gtType = TYP_I_IMPL;
13172                 }
13173                 else
13174                 {
13175                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13176                     //
13177                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13178                     {
13179                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13180                         op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
13181                     }
13182                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13183                     //
13184                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13185                     {
13186                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13187                         op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
13188                     }
13189                 }
13190 #endif // _TARGET_64BIT_
13191
13192                 if (opcode == CEE_STIND_REF)
13193                 {
13194                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
13195                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
13196                     lclTyp = genActualType(op2->TypeGet());
13197                 }
13198
13199 // Check target type.
13200 #ifdef DEBUG
13201                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
13202                 {
13203                     if (op2->gtType == TYP_BYREF)
13204                     {
13205                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
13206                     }
13207                     else if (lclTyp == TYP_BYREF)
13208                     {
13209                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
13210                     }
13211                 }
13212                 else
13213                 {
13214                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
13215                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
13216                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
13217                 }
13218 #endif
13219
13220                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13221
13222                 // stind could point anywhere, example a boxed class static int
13223                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13224
13225                 if (prefixFlags & PREFIX_VOLATILE)
13226                 {
13227                     assert(op1->OperGet() == GT_IND);
13228                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13229                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13230                     op1->gtFlags |= GTF_IND_VOLATILE;
13231                 }
13232
13233                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13234                 {
13235                     assert(op1->OperGet() == GT_IND);
13236                     op1->gtFlags |= GTF_IND_UNALIGNED;
13237                 }
13238
13239                 op1 = gtNewAssignNode(op1, op2);
13240                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
13241
13242                 // Spill side-effects AND global-data-accesses
13243                 if (verCurrentState.esStackDepth > 0)
13244                 {
13245                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
13246                 }
13247
13248                 goto APPEND;
13249
13250             case CEE_LDIND_I1:
13251                 lclTyp = TYP_BYTE;
13252                 goto LDIND;
13253             case CEE_LDIND_I2:
13254                 lclTyp = TYP_SHORT;
13255                 goto LDIND;
13256             case CEE_LDIND_U4:
13257             case CEE_LDIND_I4:
13258                 lclTyp = TYP_INT;
13259                 goto LDIND;
13260             case CEE_LDIND_I8:
13261                 lclTyp = TYP_LONG;
13262                 goto LDIND;
13263             case CEE_LDIND_REF:
13264                 lclTyp = TYP_REF;
13265                 goto LDIND;
13266             case CEE_LDIND_I:
13267                 lclTyp = TYP_I_IMPL;
13268                 goto LDIND;
13269             case CEE_LDIND_R4:
13270                 lclTyp = TYP_FLOAT;
13271                 goto LDIND;
13272             case CEE_LDIND_R8:
13273                 lclTyp = TYP_DOUBLE;
13274                 goto LDIND;
13275             case CEE_LDIND_U1:
13276                 lclTyp = TYP_UBYTE;
13277                 goto LDIND;
13278             case CEE_LDIND_U2:
13279                 lclTyp = TYP_USHORT;
13280                 goto LDIND;
13281             LDIND:
13282
13283                 if (tiVerificationNeeded)
13284                 {
13285                     typeInfo lclTiType(lclTyp);
13286 #ifdef _TARGET_64BIT_
13287                     if (opcode == CEE_LDIND_I)
13288                     {
13289                         lclTiType = typeInfo::nativeInt();
13290                     }
13291 #endif // _TARGET_64BIT_
13292                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
13293                     tiRetVal.NormaliseForStack();
13294                 }
13295                 else
13296                 {
13297                     compUnsafeCastUsed = true; // Have to go conservative
13298                 }
13299
13300             LDIND_POST_VERIFY:
13301
13302                 op1 = impPopStack().val; // address to load from
13303                 impBashVarAddrsToI(op1);
13304
13305 #ifdef _TARGET_64BIT_
13306                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13307                 //
13308                 if (genActualType(op1->gtType) == TYP_INT)
13309                 {
13310                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13311                     op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
13312                 }
13313 #endif
13314
13315                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13316
13317                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13318
13319                 // ldind could point anywhere, example a boxed class static int
13320                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
13321
13322                 if (prefixFlags & PREFIX_VOLATILE)
13323                 {
13324                     assert(op1->OperGet() == GT_IND);
13325                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13326                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13327                     op1->gtFlags |= GTF_IND_VOLATILE;
13328                 }
13329
13330                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13331                 {
13332                     assert(op1->OperGet() == GT_IND);
13333                     op1->gtFlags |= GTF_IND_UNALIGNED;
13334                 }
13335
13336                 impPushOnStack(op1, tiRetVal);
13337
13338                 break;
13339
13340             case CEE_UNALIGNED:
13341
13342                 assert(sz == 1);
13343                 val = getU1LittleEndian(codeAddr);
13344                 ++codeAddr;
13345                 JITDUMP(" %u", val);
13346                 if ((val != 1) && (val != 2) && (val != 4))
13347                 {
13348                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
13349                 }
13350
13351                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
13352                 prefixFlags |= PREFIX_UNALIGNED;
13353
13354                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
13355
13356             PREFIX:
13357                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
13358                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
13359                 codeAddr += sizeof(__int8);
13360                 goto DECODE_OPCODE;
13361
13362             case CEE_VOLATILE:
13363
13364                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
13365                 prefixFlags |= PREFIX_VOLATILE;
13366
13367                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
13368
13369                 assert(sz == 0);
13370                 goto PREFIX;
13371
13372             case CEE_LDFTN:
13373             {
13374                 // Need to do a lookup here so that we perform an access check
13375                 // and do a NOWAY if protections are violated
13376                 _impResolveToken(CORINFO_TOKENKIND_Method);
13377
13378                 JITDUMP(" %08X", resolvedToken.token);
13379
13380                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13381                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
13382                               &callInfo);
13383
13384                 // This check really only applies to intrinsic Array.Address methods
13385                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13386                 {
13387                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
13388                 }
13389
13390                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
13391                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13392
13393                 if (tiVerificationNeeded)
13394                 {
13395                     // LDFTN could start the begining of delegate creation sequence, remember that
13396                     delegateCreateStart = codeAddr - 2;
13397
13398                     // check any constraints on the callee's class and type parameters
13399                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13400                                    "method has unsatisfied class constraints");
13401                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13402                                                                                 resolvedToken.hMethod),
13403                                    "method has unsatisfied method constraints");
13404
13405                     mflags = callInfo.verMethodFlags;
13406                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
13407                 }
13408
13409             DO_LDFTN:
13410                 op1 = impMethodPointer(&resolvedToken, &callInfo);
13411
13412                 if (compDonotInline())
13413                 {
13414                     return;
13415                 }
13416
13417                 // Call info may have more precise information about the function than
13418                 // the resolved token.
13419                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13420                 assert(callInfo.hMethod != nullptr);
13421                 heapToken->hMethod = callInfo.hMethod;
13422                 impPushOnStack(op1, typeInfo(heapToken));
13423
13424                 break;
13425             }
13426
13427             case CEE_LDVIRTFTN:
13428             {
13429                 /* Get the method token */
13430
13431                 _impResolveToken(CORINFO_TOKENKIND_Method);
13432
13433                 JITDUMP(" %08X", resolvedToken.token);
13434
13435                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
13436                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
13437                                                     CORINFO_CALLINFO_CALLVIRT)),
13438                               &callInfo);
13439
13440                 // This check really only applies to intrinsic Array.Address methods
13441                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13442                 {
13443                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
13444                 }
13445
13446                 mflags = callInfo.methodFlags;
13447
13448                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13449
13450                 if (compIsForInlining())
13451                 {
13452                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13453                     {
13454                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
13455                         return;
13456                     }
13457                 }
13458
13459                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
13460
13461                 if (tiVerificationNeeded)
13462                 {
13463
13464                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
13465                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
13466
13467                     // JIT32 verifier rejects verifiable ldvirtftn pattern
13468                     typeInfo declType =
13469                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
13470
13471                     typeInfo arg = impStackTop().seTypeInfo;
13472                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
13473                            "bad ldvirtftn");
13474
13475                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
13476                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
13477                     {
13478                         instanceClassHnd = arg.GetClassHandleForObjRef();
13479                     }
13480
13481                     // check any constraints on the method's class and type parameters
13482                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13483                                    "method has unsatisfied class constraints");
13484                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13485                                                                                 resolvedToken.hMethod),
13486                                    "method has unsatisfied method constraints");
13487
13488                     if (mflags & CORINFO_FLG_PROTECTED)
13489                     {
13490                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
13491                                "Accessing protected method through wrong type.");
13492                     }
13493                 }
13494
13495                 /* Get the object-ref */
13496                 op1 = impPopStack().val;
13497                 assertImp(op1->gtType == TYP_REF);
13498
13499                 if (opts.IsReadyToRun())
13500                 {
13501                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13502                     {
13503                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13504                         {
13505                             op1 = gtUnusedValNode(op1);
13506                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13507                         }
13508                         goto DO_LDFTN;
13509                     }
13510                 }
13511                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13512                 {
13513                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13514                     {
13515                         op1 = gtUnusedValNode(op1);
13516                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13517                     }
13518                     goto DO_LDFTN;
13519                 }
13520
13521                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13522                 if (compDonotInline())
13523                 {
13524                     return;
13525                 }
13526
13527                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13528
13529                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13530                 assert(callInfo.hMethod != nullptr);
13531
13532                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13533                 heapToken->hMethod   = callInfo.hMethod;
13534                 impPushOnStack(fptr, typeInfo(heapToken));
13535
13536                 break;
13537             }
13538
13539             case CEE_CONSTRAINED:
13540
13541                 assertImp(sz == sizeof(unsigned));
13542                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13543                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13544                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13545
13546                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13547                 prefixFlags |= PREFIX_CONSTRAINED;
13548
13549                 {
13550                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13551                     if (actualOpcode != CEE_CALLVIRT)
13552                     {
13553                         BADCODE("constrained. has to be followed by callvirt");
13554                     }
13555                 }
13556
13557                 goto PREFIX;
13558
13559             case CEE_READONLY:
13560                 JITDUMP(" readonly.");
13561
13562                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13563                 prefixFlags |= PREFIX_READONLY;
13564
13565                 {
13566                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13567                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13568                     {
13569                         BADCODE("readonly. has to be followed by ldelema or call");
13570                     }
13571                 }
13572
13573                 assert(sz == 0);
13574                 goto PREFIX;
13575
13576             case CEE_TAILCALL:
13577                 JITDUMP(" tail.");
13578
13579                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13580                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13581
13582                 {
13583                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13584                     if (!impOpcodeIsCallOpcode(actualOpcode))
13585                     {
13586                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13587                     }
13588                 }
13589                 assert(sz == 0);
13590                 goto PREFIX;
13591
13592             case CEE_NEWOBJ:
13593
13594                 /* Since we will implicitly insert newObjThisPtr at the start of the
13595                    argument list, spill any GTF_ORDER_SIDEEFF */
13596                 impSpillSpecialSideEff();
13597
13598                 /* NEWOBJ does not respond to TAIL */
13599                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13600
13601                 /* NEWOBJ does not respond to CONSTRAINED */
13602                 prefixFlags &= ~PREFIX_CONSTRAINED;
13603
13604                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13605
13606                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13607                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13608                               &callInfo);
13609
13610                 if (compIsForInlining())
13611                 {
13612                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13613                     {
13614                         // Check to see if this call violates the boundary.
13615                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13616                         return;
13617                     }
13618                 }
13619
13620                 mflags = callInfo.methodFlags;
13621
13622                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13623                 {
13624                     BADCODE("newobj on static or abstract method");
13625                 }
13626
13627                 // Insert the security callout before any actual code is generated
13628                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13629
13630                 // There are three different cases for new
13631                 // Object size is variable (depends on arguments)
13632                 //      1) Object is an array (arrays treated specially by the EE)
13633                 //      2) Object is some other variable sized object (e.g. String)
13634                 //      3) Class Size can be determined beforehand (normal case)
13635                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13636                 // in the second case we call the constructor with a '0' this pointer
13637                 // In the third case we alloc the memory, then call the constuctor
13638
13639                 clsFlags = callInfo.classFlags;
13640                 if (clsFlags & CORINFO_FLG_ARRAY)
13641                 {
13642                     if (tiVerificationNeeded)
13643                     {
13644                         CORINFO_CLASS_HANDLE elemTypeHnd;
13645                         INDEBUG(CorInfoType corType =)
13646                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13647                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13648                         Verify(elemTypeHnd == nullptr ||
13649                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13650                                "newarr of byref-like objects");
13651                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13652                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13653                                       &callInfo DEBUGARG(info.compFullName));
13654                     }
13655                     // Arrays need to call the NEWOBJ helper.
13656                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13657
13658                     impImportNewObjArray(&resolvedToken, &callInfo);
13659                     if (compDonotInline())
13660                     {
13661                         return;
13662                     }
13663
13664                     callTyp = TYP_REF;
13665                     break;
13666                 }
13667                 // At present this can only be String
13668                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13669                 {
13670                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13671                     {
13672                         // The dummy argument does not exist in CoreRT
13673                         newObjThisPtr = nullptr;
13674                     }
13675                     else
13676                     {
13677                         // This is the case for variable-sized objects that are not
13678                         // arrays.  In this case, call the constructor with a null 'this'
13679                         // pointer
13680                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13681                     }
13682
13683                     /* Remember that this basic block contains 'new' of an object */
13684                     block->bbFlags |= BBF_HAS_NEWOBJ;
13685                     optMethodFlags |= OMF_HAS_NEWOBJ;
13686                 }
13687                 else
13688                 {
13689                     // This is the normal case where the size of the object is
13690                     // fixed.  Allocate the memory and call the constructor.
13691
13692                     // Note: We cannot add a peep to avoid use of temp here
13693                     // becase we don't have enough interference info to detect when
13694                     // sources and destination interfere, example: s = new S(ref);
13695
13696                     // TODO: We find the correct place to introduce a general
13697                     // reverse copy prop for struct return values from newobj or
13698                     // any function returning structs.
13699
13700                     /* get a temporary for the new object */
13701                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13702                     if (compDonotInline())
13703                     {
13704                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13705                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13706                         return;
13707                     }
13708
13709                     // In the value class case we only need clsHnd for size calcs.
13710                     //
13711                     // The lookup of the code pointer will be handled by CALL in this case
13712                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13713                     {
13714                         if (compIsForInlining())
13715                         {
13716                             // If value class has GC fields, inform the inliner. It may choose to
13717                             // bail out on the inline.
13718                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13719                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13720                             {
13721                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13722                                 if (compInlineResult->IsFailure())
13723                                 {
13724                                     return;
13725                                 }
13726
13727                                 // Do further notification in the case where the call site is rare;
13728                                 // some policies do not track the relative hotness of call sites for
13729                                 // "always" inline cases.
13730                                 if (impInlineInfo->iciBlock->isRunRarely())
13731                                 {
13732                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13733                                     if (compInlineResult->IsFailure())
13734                                     {
13735                                         return;
13736                                     }
13737                                 }
13738                             }
13739                         }
13740
13741                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13742                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13743
13744                         if (impIsPrimitive(jitTyp))
13745                         {
13746                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13747                         }
13748                         else
13749                         {
13750                             // The local variable itself is the allocated space.
13751                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13752                             // and potentially exploitable.
13753                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13754                         }
13755                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13756                         {
13757                             // Append a tree to zero-out the temp
13758                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13759
13760                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13761                                                            gtNewIconNode(0), // Value
13762                                                            size,             // Size
13763                                                            false,            // isVolatile
13764                                                            false);           // not copyBlock
13765                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13766                         }
13767
13768                         // Obtain the address of the temp
13769                         newObjThisPtr =
13770                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13771                     }
13772                     else
13773                     {
13774 #ifdef FEATURE_READYTORUN_COMPILER
13775                         if (opts.IsReadyToRun())
13776                         {
13777                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13778                             usingReadyToRunHelper = (op1 != nullptr);
13779                         }
13780
13781                         if (!usingReadyToRunHelper)
13782 #endif
13783                         {
13784                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13785                             if (op1 == nullptr)
13786                             { // compDonotInline()
13787                                 return;
13788                             }
13789
13790                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13791                             // and the newfast call with a single call to a dynamic R2R cell that will:
13792                             //      1) Load the context
13793                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13794                             //      stub
13795                             //      3) Allocate and return the new object
13796                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13797
13798                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13799                                                     resolvedToken.hClass, TYP_REF, op1);
13800                         }
13801
13802                         // Remember that this basic block contains 'new' of an object
13803                         block->bbFlags |= BBF_HAS_NEWOBJ;
13804                         optMethodFlags |= OMF_HAS_NEWOBJ;
13805
13806                         // Append the assignment to the temp/local. Dont need to spill
13807                         // at all as we are just calling an EE-Jit helper which can only
13808                         // cause an (async) OutOfMemoryException.
13809
13810                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13811                         // to a temp. Note that the pattern "temp = allocObj" is required
13812                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13813                         // without exhaustive walk over all expressions.
13814
13815                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13816
13817                         assert(lvaTable[lclNum].lvSingleDef == 0);
13818                         lvaTable[lclNum].lvSingleDef = 1;
13819                         JITDUMP("Marked V%02u as a single def local\n", lclNum);
13820                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13821
13822                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13823                     }
13824                 }
13825                 goto CALL;
13826
13827             case CEE_CALLI:
13828
13829                 /* CALLI does not respond to CONSTRAINED */
13830                 prefixFlags &= ~PREFIX_CONSTRAINED;
13831
13832                 if (compIsForInlining())
13833                 {
13834                     // CALLI doesn't have a method handle, so assume the worst.
13835                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13836                     {
13837                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13838                         return;
13839                     }
13840                 }
13841
13842             // fall through
13843
13844             case CEE_CALLVIRT:
13845             case CEE_CALL:
13846
13847                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13848                 // many other places.  We unfortunately embed that knowledge here.
13849                 if (opcode != CEE_CALLI)
13850                 {
13851                     _impResolveToken(CORINFO_TOKENKIND_Method);
13852
13853                     eeGetCallInfo(&resolvedToken,
13854                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13855                                   // this is how impImportCall invokes getCallInfo
13856                                   addVerifyFlag(
13857                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13858                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13859                                                                        : CORINFO_CALLINFO_NONE)),
13860                                   &callInfo);
13861                 }
13862                 else
13863                 {
13864                     // Suppress uninitialized use warning.
13865                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13866                     memset(&callInfo, 0, sizeof(callInfo));
13867
13868                     resolvedToken.token        = getU4LittleEndian(codeAddr);
13869                     resolvedToken.tokenContext = impTokenLookupContextHandle;
13870                     resolvedToken.tokenScope   = info.compScopeHnd;
13871                 }
13872
13873             CALL: // memberRef should be set.
13874                 // newObjThisPtr should be set for CEE_NEWOBJ
13875
13876                 JITDUMP(" %08X", resolvedToken.token);
13877                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13878
13879                 bool newBBcreatedForTailcallStress;
13880
13881                 newBBcreatedForTailcallStress = false;
13882
13883                 if (compIsForInlining())
13884                 {
13885                     if (compDonotInline())
13886                     {
13887                         return;
13888                     }
13889                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13890                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13891                 }
13892                 else
13893                 {
13894                     if (compTailCallStress())
13895                     {
13896                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13897                         // Tail call stress only recognizes call+ret patterns and forces them to be
13898                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13899                         // doesn't import 'ret' opcode following the call into the basic block containing
13900                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13901                         // is already checking that there is an opcode following call and hence it is
13902                         // safe here to read next opcode without bounds check.
13903                         newBBcreatedForTailcallStress =
13904                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13905                                                              // make it jump to RET.
13906                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13907
13908                         bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
13909                         if (newBBcreatedForTailcallStress && !hasTailPrefix && // User hasn't set "tail." prefix yet.
13910                             verCheckTailCallConstraint(opcode, &resolvedToken,
13911                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13912                                                        true) // Is it legal to do tailcall?
13913                             )
13914                         {
13915                             CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
13916                             bool                  isVirtual         = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
13917                                              (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
13918                             CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
13919                             if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
13920                                                               hasTailPrefix)) // Is it legal to do tailcall?
13921                             {
13922                                 // Stress the tailcall.
13923                                 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13924                                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13925                             }
13926                         }
13927                     }
13928                 }
13929
13930                 // This is split up to avoid goto flow warnings.
13931                 bool isRecursive;
13932                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13933
13934                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13935                 // hence will not be considered for implicit tail calling.
13936                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13937                 {
13938                     if (compIsForInlining())
13939                     {
13940 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13941                         // Are we inlining at an implicit tail call site? If so the we can flag
13942                         // implicit tail call sites in the inline body. These call sites
13943                         // often end up in non BBJ_RETURN blocks, so only flag them when
13944                         // we're able to handle shared returns.
13945                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13946                         {
13947                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13948                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13949                         }
13950 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13951                     }
13952                     else
13953                     {
13954                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13955                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13956                     }
13957                 }
13958
13959                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13960                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13961                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13962
13963                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13964                 {
13965                     // All calls and delegates need a security callout.
13966                     // For delegates, this is the call to the delegate constructor, not the access check on the
13967                     // LD(virt)FTN.
13968                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13969                 }
13970
13971                 if (tiVerificationNeeded)
13972                 {
13973                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13974                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13975                                   &callInfo DEBUGARG(info.compFullName));
13976                 }
13977
13978                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13979                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13980                 if (compDonotInline())
13981                 {
13982                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13983                     assert((callTyp == TYP_UNDEF) ||
13984                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13985                     return;
13986                 }
13987
13988                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13989                                                                        // have created a new BB after the "call"
13990                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13991                 {
13992                     assert(!compIsForInlining());
13993                     goto RET;
13994                 }
13995
13996                 break;
13997
13998             case CEE_LDFLD:
13999             case CEE_LDSFLD:
14000             case CEE_LDFLDA:
14001             case CEE_LDSFLDA:
14002             {
14003
14004                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
14005                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
14006
14007                 /* Get the CP_Fieldref index */
14008                 assertImp(sz == sizeof(unsigned));
14009
14010                 _impResolveToken(CORINFO_TOKENKIND_Field);
14011
14012                 JITDUMP(" %08X", resolvedToken.token);
14013
14014                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
14015
14016                 GenTree*             obj     = nullptr;
14017                 typeInfo*            tiObj   = nullptr;
14018                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
14019
14020                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
14021                 {
14022                     tiObj         = &impStackTop().seTypeInfo;
14023                     StackEntry se = impPopStack();
14024                     objType       = se.seTypeInfo.GetClassHandle();
14025                     obj           = se.val;
14026
14027                     if (impIsThis(obj))
14028                     {
14029                         aflags |= CORINFO_ACCESS_THIS;
14030
14031                         // An optimization for Contextful classes:
14032                         // we unwrap the proxy when we have a 'this reference'
14033
14034                         if (info.compUnwrapContextful)
14035                         {
14036                             aflags |= CORINFO_ACCESS_UNWRAP;
14037                         }
14038                     }
14039                 }
14040
14041                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14042
14043                 // Figure out the type of the member.  We always call canAccessField, so you always need this
14044                 // handle
14045                 CorInfoType ciType = fieldInfo.fieldType;
14046                 clsHnd             = fieldInfo.structType;
14047
14048                 lclTyp = JITtype2varType(ciType);
14049
14050 #ifdef _TARGET_AMD64
14051                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
14052 #endif // _TARGET_AMD64
14053
14054                 if (compIsForInlining())
14055                 {
14056                     switch (fieldInfo.fieldAccessor)
14057                     {
14058                         case CORINFO_FIELD_INSTANCE_HELPER:
14059                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14060                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
14061                         case CORINFO_FIELD_STATIC_TLS:
14062
14063                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
14064                             return;
14065
14066                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14067                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14068                             /* We may be able to inline the field accessors in specific instantiations of generic
14069                              * methods */
14070                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
14071                             return;
14072
14073                         default:
14074                             break;
14075                     }
14076
14077                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
14078                         clsHnd)
14079                     {
14080                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
14081                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
14082                         {
14083                             // Loading a static valuetype field usually will cause a JitHelper to be called
14084                             // for the static base. This will bloat the code.
14085                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
14086
14087                             if (compInlineResult->IsFailure())
14088                             {
14089                                 return;
14090                             }
14091                         }
14092                     }
14093                 }
14094
14095                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
14096                 if (isLoadAddress)
14097                 {
14098                     tiRetVal.MakeByRef();
14099                 }
14100                 else
14101                 {
14102                     tiRetVal.NormaliseForStack();
14103                 }
14104
14105                 // Perform this check always to ensure that we get field access exceptions even with
14106                 // SkipVerification.
14107                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14108
14109                 if (tiVerificationNeeded)
14110                 {
14111                     // You can also pass the unboxed struct to  LDFLD
14112                     BOOL bAllowPlainValueTypeAsThis = FALSE;
14113                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
14114                     {
14115                         bAllowPlainValueTypeAsThis = TRUE;
14116                     }
14117
14118                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
14119
14120                     // If we're doing this on a heap object or from a 'safe' byref
14121                     // then the result is a safe byref too
14122                     if (isLoadAddress) // load address
14123                     {
14124                         if (fieldInfo.fieldFlags &
14125                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
14126                         {
14127                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
14128                             {
14129                                 tiRetVal.SetIsPermanentHomeByRef();
14130                             }
14131                         }
14132                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
14133                         {
14134                             // ldflda of byref is safe if done on a gc object or on  a
14135                             // safe byref
14136                             tiRetVal.SetIsPermanentHomeByRef();
14137                         }
14138                     }
14139                 }
14140                 else
14141                 {
14142                     // tiVerificationNeeded is false.
14143                     // Raise InvalidProgramException if static load accesses non-static field
14144                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14145                     {
14146                         BADCODE("static access on an instance field");
14147                     }
14148                 }
14149
14150                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
14151                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14152                 {
14153                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14154                     {
14155                         obj = gtUnusedValNode(obj);
14156                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14157                     }
14158                     obj = nullptr;
14159                 }
14160
14161                 /* Preserve 'small' int types */
14162                 if (!varTypeIsSmall(lclTyp))
14163                 {
14164                     lclTyp = genActualType(lclTyp);
14165                 }
14166
14167                 bool usesHelper = false;
14168
14169                 switch (fieldInfo.fieldAccessor)
14170                 {
14171                     case CORINFO_FIELD_INSTANCE:
14172 #ifdef FEATURE_READYTORUN_COMPILER
14173                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14174 #endif
14175                     {
14176                         obj = impCheckForNullPointer(obj);
14177
14178                         // If the object is a struct, what we really want is
14179                         // for the field to operate on the address of the struct.
14180                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
14181                         {
14182                             assert(opcode == CEE_LDFLD && objType != nullptr);
14183
14184                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
14185                         }
14186
14187                         /* Create the data member node */
14188                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14189
14190 #ifdef FEATURE_READYTORUN_COMPILER
14191                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14192                         {
14193                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14194                         }
14195 #endif
14196
14197                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14198
14199                         if (fgAddrCouldBeNull(obj))
14200                         {
14201                             op1->gtFlags |= GTF_EXCEPT;
14202                         }
14203
14204                         // If gtFldObj is a BYREF then our target is a value class and
14205                         // it could point anywhere, example a boxed class static int
14206                         if (obj->gtType == TYP_BYREF)
14207                         {
14208                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14209                         }
14210
14211                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14212                         if (StructHasOverlappingFields(typeFlags))
14213                         {
14214                             op1->gtField.gtFldMayOverlap = true;
14215                         }
14216
14217                         // wrap it in a address of operator if necessary
14218                         if (isLoadAddress)
14219                         {
14220                             op1 = gtNewOperNode(GT_ADDR,
14221                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
14222                         }
14223                         else
14224                         {
14225                             if (compIsForInlining() &&
14226                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
14227                                                                                    impInlineInfo->inlArgInfo))
14228                             {
14229                                 impInlineInfo->thisDereferencedFirst = true;
14230                             }
14231                         }
14232                     }
14233                     break;
14234
14235                     case CORINFO_FIELD_STATIC_TLS:
14236 #ifdef _TARGET_X86_
14237                         // Legacy TLS access is implemented as intrinsic on x86 only
14238
14239                         /* Create the data member node */
14240                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14241                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14242
14243                         if (isLoadAddress)
14244                         {
14245                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
14246                         }
14247                         break;
14248 #else
14249                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14250
14251                         __fallthrough;
14252 #endif
14253
14254                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14255                     case CORINFO_FIELD_INSTANCE_HELPER:
14256                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14257                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14258                                                clsHnd, nullptr);
14259                         usesHelper = true;
14260                         break;
14261
14262                     case CORINFO_FIELD_STATIC_ADDRESS:
14263                         // Replace static read-only fields with constant if possible
14264                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
14265                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
14266                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
14267                         {
14268                             CorInfoInitClassResult initClassResult =
14269                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
14270                                                             impTokenLookupContextHandle);
14271
14272                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
14273                             {
14274                                 void** pFldAddr = nullptr;
14275                                 void*  fldAddr =
14276                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
14277
14278                                 // We should always be able to access this static's address directly
14279                                 assert(pFldAddr == nullptr);
14280
14281                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
14282                                 goto FIELD_DONE;
14283                             }
14284                         }
14285
14286                         __fallthrough;
14287
14288                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14289                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14290                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14291                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14292                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14293                                                          lclTyp);
14294                         break;
14295
14296                     case CORINFO_FIELD_INTRINSIC_ZERO:
14297                     {
14298                         assert(aflags & CORINFO_ACCESS_GET);
14299                         op1 = gtNewIconNode(0, lclTyp);
14300                         goto FIELD_DONE;
14301                     }
14302                     break;
14303
14304                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
14305                     {
14306                         assert(aflags & CORINFO_ACCESS_GET);
14307
14308                         LPVOID         pValue;
14309                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
14310                         op1                = gtNewStringLiteralNode(iat, pValue);
14311                         goto FIELD_DONE;
14312                     }
14313                     break;
14314
14315                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
14316                     {
14317                         assert(aflags & CORINFO_ACCESS_GET);
14318 #if BIGENDIAN
14319                         op1 = gtNewIconNode(0, lclTyp);
14320 #else
14321                         op1                     = gtNewIconNode(1, lclTyp);
14322 #endif
14323                         goto FIELD_DONE;
14324                     }
14325                     break;
14326
14327                     default:
14328                         assert(!"Unexpected fieldAccessor");
14329                 }
14330
14331                 if (!isLoadAddress)
14332                 {
14333
14334                     if (prefixFlags & PREFIX_VOLATILE)
14335                     {
14336                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14337                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14338
14339                         if (!usesHelper)
14340                         {
14341                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14342                                    (op1->OperGet() == GT_OBJ));
14343                             op1->gtFlags |= GTF_IND_VOLATILE;
14344                         }
14345                     }
14346
14347                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14348                     {
14349                         if (!usesHelper)
14350                         {
14351                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14352                                    (op1->OperGet() == GT_OBJ));
14353                             op1->gtFlags |= GTF_IND_UNALIGNED;
14354                         }
14355                     }
14356                 }
14357
14358                 /* Check if the class needs explicit initialization */
14359
14360                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14361                 {
14362                     GenTree* helperNode = impInitClass(&resolvedToken);
14363                     if (compDonotInline())
14364                     {
14365                         return;
14366                     }
14367                     if (helperNode != nullptr)
14368                     {
14369                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14370                     }
14371                 }
14372
14373             FIELD_DONE:
14374                 impPushOnStack(op1, tiRetVal);
14375             }
14376             break;
14377
14378             case CEE_STFLD:
14379             case CEE_STSFLD:
14380             {
14381
14382                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
14383
14384                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
14385
14386                 /* Get the CP_Fieldref index */
14387
14388                 assertImp(sz == sizeof(unsigned));
14389
14390                 _impResolveToken(CORINFO_TOKENKIND_Field);
14391
14392                 JITDUMP(" %08X", resolvedToken.token);
14393
14394                 int       aflags = CORINFO_ACCESS_SET;
14395                 GenTree*  obj    = nullptr;
14396                 typeInfo* tiObj  = nullptr;
14397                 typeInfo  tiVal;
14398
14399                 /* Pull the value from the stack */
14400                 StackEntry se = impPopStack();
14401                 op2           = se.val;
14402                 tiVal         = se.seTypeInfo;
14403                 clsHnd        = tiVal.GetClassHandle();
14404
14405                 if (opcode == CEE_STFLD)
14406                 {
14407                     tiObj = &impStackTop().seTypeInfo;
14408                     obj   = impPopStack().val;
14409
14410                     if (impIsThis(obj))
14411                     {
14412                         aflags |= CORINFO_ACCESS_THIS;
14413
14414                         // An optimization for Contextful classes:
14415                         // we unwrap the proxy when we have a 'this reference'
14416
14417                         if (info.compUnwrapContextful)
14418                         {
14419                             aflags |= CORINFO_ACCESS_UNWRAP;
14420                         }
14421                     }
14422                 }
14423
14424                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14425
14426                 // Figure out the type of the member.  We always call canAccessField, so you always need this
14427                 // handle
14428                 CorInfoType ciType = fieldInfo.fieldType;
14429                 fieldClsHnd        = fieldInfo.structType;
14430
14431                 lclTyp = JITtype2varType(ciType);
14432
14433                 if (compIsForInlining())
14434                 {
14435                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
14436                      * per-inst static? */
14437
14438                     switch (fieldInfo.fieldAccessor)
14439                     {
14440                         case CORINFO_FIELD_INSTANCE_HELPER:
14441                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14442                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
14443                         case CORINFO_FIELD_STATIC_TLS:
14444
14445                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
14446                             return;
14447
14448                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14449                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14450                             /* We may be able to inline the field accessors in specific instantiations of generic
14451                              * methods */
14452                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
14453                             return;
14454
14455                         default:
14456                             break;
14457                     }
14458                 }
14459
14460                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14461
14462                 if (tiVerificationNeeded)
14463                 {
14464                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
14465                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
14466                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14467                 }
14468                 else
14469                 {
14470                     // tiVerificationNeed is false.
14471                     // Raise InvalidProgramException if static store accesses non-static field
14472                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14473                     {
14474                         BADCODE("static access on an instance field");
14475                     }
14476                 }
14477
14478                 // We are using stfld on a static field.
14479                 // We allow it, but need to eval any side-effects for obj
14480                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14481                 {
14482                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14483                     {
14484                         obj = gtUnusedValNode(obj);
14485                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14486                     }
14487                     obj = nullptr;
14488                 }
14489
14490                 /* Preserve 'small' int types */
14491                 if (!varTypeIsSmall(lclTyp))
14492                 {
14493                     lclTyp = genActualType(lclTyp);
14494                 }
14495
14496                 switch (fieldInfo.fieldAccessor)
14497                 {
14498                     case CORINFO_FIELD_INSTANCE:
14499 #ifdef FEATURE_READYTORUN_COMPILER
14500                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14501 #endif
14502                     {
14503                         obj = impCheckForNullPointer(obj);
14504
14505                         /* Create the data member node */
14506                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14507                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14508                         if (StructHasOverlappingFields(typeFlags))
14509                         {
14510                             op1->gtField.gtFldMayOverlap = true;
14511                         }
14512
14513 #ifdef FEATURE_READYTORUN_COMPILER
14514                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14515                         {
14516                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14517                         }
14518 #endif
14519
14520                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14521
14522                         if (fgAddrCouldBeNull(obj))
14523                         {
14524                             op1->gtFlags |= GTF_EXCEPT;
14525                         }
14526
14527                         // If gtFldObj is a BYREF then our target is a value class and
14528                         // it could point anywhere, example a boxed class static int
14529                         if (obj->gtType == TYP_BYREF)
14530                         {
14531                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14532                         }
14533
14534                         if (compIsForInlining() &&
14535                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14536                         {
14537                             impInlineInfo->thisDereferencedFirst = true;
14538                         }
14539                     }
14540                     break;
14541
14542                     case CORINFO_FIELD_STATIC_TLS:
14543 #ifdef _TARGET_X86_
14544                         // Legacy TLS access is implemented as intrinsic on x86 only
14545
14546                         /* Create the data member node */
14547                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14548                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14549
14550                         break;
14551 #else
14552                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14553
14554                         __fallthrough;
14555 #endif
14556
14557                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14558                     case CORINFO_FIELD_INSTANCE_HELPER:
14559                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14560                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14561                                                clsHnd, op2);
14562                         goto SPILL_APPEND;
14563
14564                     case CORINFO_FIELD_STATIC_ADDRESS:
14565                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14566                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14567                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14568                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14569                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14570                                                          lclTyp);
14571                         break;
14572
14573                     default:
14574                         assert(!"Unexpected fieldAccessor");
14575                 }
14576
14577                 // Create the member assignment, unless we have a struct.
14578                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14579                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14580
14581                 if (!deferStructAssign)
14582                 {
14583                     if (prefixFlags & PREFIX_VOLATILE)
14584                     {
14585                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14586                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14587                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14588                         op1->gtFlags |= GTF_IND_VOLATILE;
14589                     }
14590                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14591                     {
14592                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14593                         op1->gtFlags |= GTF_IND_UNALIGNED;
14594                     }
14595
14596                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14597                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14598                        importation and reads from the union as if it were a long during code generation. Though this
14599                        can potentially read garbage, one can get lucky to have this working correctly.
14600
14601                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14602                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14603                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14604                        it works correctly always.
14605
14606                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14607                        for V4.0.
14608                     */
14609                     CLANG_FORMAT_COMMENT_ANCHOR;
14610
14611 #ifndef _TARGET_64BIT_
14612                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14613                     // generated for ARM as well as x86, so the following IR will be accepted:
14614                     //     *  STMT      void
14615                     //         |  /--*  CNS_INT   int    2
14616                     //         \--*  ASG       long
14617                     //            \--*  CLS_VAR   long
14618
14619                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14620                         varTypeIsLong(op1->TypeGet()))
14621                     {
14622                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14623                     }
14624 #endif
14625
14626 #ifdef _TARGET_64BIT_
14627                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14628                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14629                     {
14630                         op2->gtType = TYP_I_IMPL;
14631                     }
14632                     else
14633                     {
14634                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14635                         //
14636                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14637                         {
14638                             op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14639                         }
14640                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14641                         //
14642                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14643                         {
14644                             op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14645                         }
14646                     }
14647 #endif
14648
14649                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14650                     // We insert a cast to the dest 'op1' type
14651                     //
14652                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14653                         varTypeIsFloating(op2->gtType))
14654                     {
14655                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14656                     }
14657
14658                     op1 = gtNewAssignNode(op1, op2);
14659
14660                     /* Mark the expression as containing an assignment */
14661
14662                     op1->gtFlags |= GTF_ASG;
14663                 }
14664
14665                 /* Check if the class needs explicit initialization */
14666
14667                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14668                 {
14669                     GenTree* helperNode = impInitClass(&resolvedToken);
14670                     if (compDonotInline())
14671                     {
14672                         return;
14673                     }
14674                     if (helperNode != nullptr)
14675                     {
14676                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14677                     }
14678                 }
14679
14680                 /* stfld can interfere with value classes (consider the sequence
14681                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14682                    spill all value class references from the stack. */
14683
14684                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14685                 {
14686                     assert(tiObj);
14687
14688                     if (impIsValueType(tiObj))
14689                     {
14690                         impSpillEvalStack();
14691                     }
14692                     else
14693                     {
14694                         impSpillValueClasses();
14695                     }
14696                 }
14697
14698                 /* Spill any refs to the same member from the stack */
14699
14700                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14701
14702                 /* stsfld also interferes with indirect accesses (for aliased
14703                    statics) and calls. But don't need to spill other statics
14704                    as we have explicitly spilled this particular static field. */
14705
14706                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14707
14708                 if (deferStructAssign)
14709                 {
14710                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14711                 }
14712             }
14713                 goto APPEND;
14714
14715             case CEE_NEWARR:
14716             {
14717
14718                 /* Get the class type index operand */
14719
14720                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14721
14722                 JITDUMP(" %08X", resolvedToken.token);
14723
14724                 if (!opts.IsReadyToRun())
14725                 {
14726                     // Need to restore array classes before creating array objects on the heap
14727                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14728                     if (op1 == nullptr)
14729                     { // compDonotInline()
14730                         return;
14731                     }
14732                 }
14733
14734                 if (tiVerificationNeeded)
14735                 {
14736                     // As per ECMA 'numElems' specified can be either int32 or native int.
14737                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14738
14739                     CORINFO_CLASS_HANDLE elemTypeHnd;
14740                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14741                     Verify(elemTypeHnd == nullptr ||
14742                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14743                            "array of byref-like type");
14744                 }
14745
14746                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14747
14748                 accessAllowedResult =
14749                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14750                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14751
14752                 /* Form the arglist: array class handle, size */
14753                 op2 = impPopStack().val;
14754                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14755
14756 #ifdef _TARGET_64BIT_
14757                 // The array helper takes a native int for array length.
14758                 // So if we have an int, explicitly extend it to be a native int.
14759                 if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
14760                 {
14761                     if (op2->IsIntegralConst())
14762                     {
14763                         op2->gtType = TYP_I_IMPL;
14764                     }
14765                     else
14766                     {
14767                         bool isUnsigned = false;
14768                         op2             = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
14769                     }
14770                 }
14771 #endif // _TARGET_64BIT_
14772
14773 #ifdef FEATURE_READYTORUN_COMPILER
14774                 if (opts.IsReadyToRun())
14775                 {
14776                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14777                                                     gtNewArgList(op2));
14778                     usingReadyToRunHelper = (op1 != nullptr);
14779
14780                     if (!usingReadyToRunHelper)
14781                     {
14782                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14783                         // and the newarr call with a single call to a dynamic R2R cell that will:
14784                         //      1) Load the context
14785                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14786                         //      3) Allocate the new array
14787                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14788
14789                         // Need to restore array classes before creating array objects on the heap
14790                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14791                         if (op1 == nullptr)
14792                         { // compDonotInline()
14793                             return;
14794                         }
14795                     }
14796                 }
14797
14798                 if (!usingReadyToRunHelper)
14799 #endif
14800                 {
14801                     args = gtNewArgList(op1, op2);
14802
14803                     /* Create a call to 'new' */
14804
14805                     // Note that this only works for shared generic code because the same helper is used for all
14806                     // reference array types
14807                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14808                 }
14809
14810                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14811
14812                 /* Remember that this basic block contains 'new' of an sd array */
14813
14814                 block->bbFlags |= BBF_HAS_NEWARRAY;
14815                 optMethodFlags |= OMF_HAS_NEWARRAY;
14816
14817                 /* Push the result of the call on the stack */
14818
14819                 impPushOnStack(op1, tiRetVal);
14820
14821                 callTyp = TYP_REF;
14822             }
14823             break;
14824
14825             case CEE_LOCALLOC:
14826                 if (tiVerificationNeeded)
14827                 {
14828                     Verify(false, "bad opcode");
14829                 }
14830
14831                 // We don't allow locallocs inside handlers
14832                 if (block->hasHndIndex())
14833                 {
14834                     BADCODE("Localloc can't be inside handler");
14835                 }
14836
14837                 // Get the size to allocate
14838
14839                 op2 = impPopStack().val;
14840                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14841
14842                 if (verCurrentState.esStackDepth != 0)
14843                 {
14844                     BADCODE("Localloc can only be used when the stack is empty");
14845                 }
14846
14847                 // If the localloc is not in a loop and its size is a small constant,
14848                 // create a new local var of TYP_BLK and return its address.
14849                 {
14850                     bool convertedToLocal = false;
14851
14852                     // Need to aggressively fold here, as even fixed-size locallocs
14853                     // will have casts in the way.
14854                     op2 = gtFoldExpr(op2);
14855
14856                     if (op2->IsIntegralConst())
14857                     {
14858                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14859
14860                         if (allocSize == 0)
14861                         {
14862                             // Result is nullptr
14863                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14864                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14865                             convertedToLocal = true;
14866                         }
14867                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14868                         {
14869                             // Get the size threshold for local conversion
14870                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14871
14872 #ifdef DEBUG
14873                             // Optionally allow this to be modified
14874                             maxSize = JitConfig.JitStackAllocToLocalSize();
14875 #endif // DEBUG
14876
14877                             if (allocSize <= maxSize)
14878                             {
14879                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14880                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14881                                         stackallocAsLocal);
14882                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14883                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14884                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14885                                 op1              = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14886                                 op1              = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14887                                 convertedToLocal = true;
14888
14889                                 if (!this->opts.compDbgEnC)
14890                                 {
14891                                     // Ensure we have stack security for this method.
14892                                     // Reorder layout since the converted localloc is treated as an unsafe buffer.
14893                                     setNeedsGSSecurityCookie();
14894                                     compGSReorderStackLayout = true;
14895                                 }
14896                             }
14897                         }
14898                     }
14899
14900                     if (!convertedToLocal)
14901                     {
14902                         // Bail out if inlining and the localloc was not converted.
14903                         //
14904                         // Note we might consider allowing the inline, if the call
14905                         // site is not in a loop.
14906                         if (compIsForInlining())
14907                         {
14908                             InlineObservation obs = op2->IsIntegralConst()
14909                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14910                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14911                             compInlineResult->NoteFatal(obs);
14912                             return;
14913                         }
14914
14915                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14916                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14917                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14918
14919                         // Ensure we have stack security for this method.
14920                         setNeedsGSSecurityCookie();
14921
14922                         /* The FP register may not be back to the original value at the end
14923                            of the method, even if the frame size is 0, as localloc may
14924                            have modified it. So we will HAVE to reset it */
14925                         compLocallocUsed = true;
14926                     }
14927                     else
14928                     {
14929                         compLocallocOptimized = true;
14930                     }
14931                 }
14932
14933                 impPushOnStack(op1, tiRetVal);
14934                 break;
14935
14936             case CEE_ISINST:
14937             {
14938                 /* Get the type token */
14939                 assertImp(sz == sizeof(unsigned));
14940
14941                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14942
14943                 JITDUMP(" %08X", resolvedToken.token);
14944
14945                 if (!opts.IsReadyToRun())
14946                 {
14947                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14948                     if (op2 == nullptr)
14949                     { // compDonotInline()
14950                         return;
14951                     }
14952                 }
14953
14954                 if (tiVerificationNeeded)
14955                 {
14956                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14957                     // Even if this is a value class, we know it is boxed.
14958                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14959                 }
14960                 accessAllowedResult =
14961                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14962                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14963
14964                 op1 = impPopStack().val;
14965
14966                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14967
14968                 if (optTree != nullptr)
14969                 {
14970                     impPushOnStack(optTree, tiRetVal);
14971                 }
14972                 else
14973                 {
14974
14975 #ifdef FEATURE_READYTORUN_COMPILER
14976                     if (opts.IsReadyToRun())
14977                     {
14978                         GenTreeCall* opLookup =
14979                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14980                                                       gtNewArgList(op1));
14981                         usingReadyToRunHelper = (opLookup != nullptr);
14982                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14983
14984                         if (!usingReadyToRunHelper)
14985                         {
14986                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14987                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14988                             //      1) Load the context
14989                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14990                             //      stub
14991                             //      3) Perform the 'is instance' check on the input object
14992                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14993
14994                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14995                             if (op2 == nullptr)
14996                             { // compDonotInline()
14997                                 return;
14998                             }
14999                         }
15000                     }
15001
15002                     if (!usingReadyToRunHelper)
15003 #endif
15004                     {
15005                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
15006                     }
15007                     if (compDonotInline())
15008                     {
15009                         return;
15010                     }
15011
15012                     impPushOnStack(op1, tiRetVal);
15013                 }
15014                 break;
15015             }
15016
15017             case CEE_REFANYVAL:
15018
15019                 // get the class handle and make a ICON node out of it
15020
15021                 _impResolveToken(CORINFO_TOKENKIND_Class);
15022
15023                 JITDUMP(" %08X", resolvedToken.token);
15024
15025                 op2 = impTokenToHandle(&resolvedToken);
15026                 if (op2 == nullptr)
15027                 { // compDonotInline()
15028                     return;
15029                 }
15030
15031                 if (tiVerificationNeeded)
15032                 {
15033                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15034                            "need refany");
15035                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
15036                 }
15037
15038                 op1 = impPopStack().val;
15039                 // make certain it is normalized;
15040                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15041
15042                 // Call helper GETREFANY(classHandle, op1);
15043                 args = gtNewArgList(op2, op1);
15044                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
15045
15046                 impPushOnStack(op1, tiRetVal);
15047                 break;
15048
15049             case CEE_REFANYTYPE:
15050
15051                 if (tiVerificationNeeded)
15052                 {
15053                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15054                            "need refany");
15055                 }
15056
15057                 op1 = impPopStack().val;
15058
15059                 // make certain it is normalized;
15060                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15061
15062                 if (op1->gtOper == GT_OBJ)
15063                 {
15064                     // Get the address of the refany
15065                     op1 = op1->gtOp.gtOp1;
15066
15067                     // Fetch the type from the correct slot
15068                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15069                                         gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
15070                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
15071                 }
15072                 else
15073                 {
15074                     assertImp(op1->gtOper == GT_MKREFANY);
15075
15076                     // The pointer may have side-effects
15077                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
15078                     {
15079                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15080 #ifdef DEBUG
15081                         impNoteLastILoffs();
15082 #endif
15083                     }
15084
15085                     // We already have the class handle
15086                     op1 = op1->gtOp.gtOp2;
15087                 }
15088
15089                 // convert native TypeHandle to RuntimeTypeHandle
15090                 {
15091                     GenTreeArgList* helperArgs = gtNewArgList(op1);
15092
15093                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
15094                                               helperArgs);
15095
15096                     // The handle struct is returned in register
15097                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15098
15099                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
15100                 }
15101
15102                 impPushOnStack(op1, tiRetVal);
15103                 break;
15104
15105             case CEE_LDTOKEN:
15106             {
15107                 /* Get the Class index */
15108                 assertImp(sz == sizeof(unsigned));
15109                 lastLoadToken = codeAddr;
15110                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
15111
15112                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
15113
15114                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15115                 if (op1 == nullptr)
15116                 { // compDonotInline()
15117                     return;
15118                 }
15119
15120                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
15121                 assert(resolvedToken.hClass != nullptr);
15122
15123                 if (resolvedToken.hMethod != nullptr)
15124                 {
15125                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
15126                 }
15127                 else if (resolvedToken.hField != nullptr)
15128                 {
15129                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
15130                 }
15131
15132                 GenTreeArgList* helperArgs = gtNewArgList(op1);
15133
15134                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
15135
15136                 // The handle struct is returned in register
15137                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15138
15139                 tiRetVal = verMakeTypeInfo(tokenType);
15140                 impPushOnStack(op1, tiRetVal);
15141             }
15142             break;
15143
15144             case CEE_UNBOX:
15145             case CEE_UNBOX_ANY:
15146             {
15147                 /* Get the Class index */
15148                 assertImp(sz == sizeof(unsigned));
15149
15150                 _impResolveToken(CORINFO_TOKENKIND_Class);
15151
15152                 JITDUMP(" %08X", resolvedToken.token);
15153
15154                 BOOL runtimeLookup;
15155                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
15156                 if (op2 == nullptr)
15157                 {
15158                     assert(compDonotInline());
15159                     return;
15160                 }
15161
15162                 // Run this always so we can get access exceptions even with SkipVerification.
15163                 accessAllowedResult =
15164                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15165                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15166
15167                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
15168                 {
15169                     if (tiVerificationNeeded)
15170                     {
15171                         typeInfo tiUnbox = impStackTop().seTypeInfo;
15172                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
15173                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15174                         tiRetVal.NormaliseForStack();
15175                     }
15176                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
15177                     op1 = impPopStack().val;
15178                     goto CASTCLASS;
15179                 }
15180
15181                 /* Pop the object and create the unbox helper call */
15182                 /* You might think that for UNBOX_ANY we need to push a different */
15183                 /* (non-byref) type, but here we're making the tiRetVal that is used */
15184                 /* for the intermediate pointer which we then transfer onto the OBJ */
15185                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
15186                 if (tiVerificationNeeded)
15187                 {
15188                     typeInfo tiUnbox = impStackTop().seTypeInfo;
15189                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
15190
15191                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15192                     Verify(tiRetVal.IsValueClass(), "not value class");
15193                     tiRetVal.MakeByRef();
15194
15195                     // We always come from an objref, so this is safe byref
15196                     tiRetVal.SetIsPermanentHomeByRef();
15197                     tiRetVal.SetIsReadonlyByRef();
15198                 }
15199
15200                 op1 = impPopStack().val;
15201                 assertImp(op1->gtType == TYP_REF);
15202
15203                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
15204                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
15205
15206                 // Check legality and profitability of inline expansion for unboxing.
15207                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
15208                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
15209
15210                 if (canExpandInline && shouldExpandInline)
15211                 {
15212                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
15213                     // we are doing normal unboxing
15214                     // inline the common case of the unbox helper
15215                     // UNBOX(exp) morphs into
15216                     // clone = pop(exp);
15217                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
15218                     // push(clone + TARGET_POINTER_SIZE)
15219                     //
15220                     GenTree* cloneOperand;
15221                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15222                                        nullptr DEBUGARG("inline UNBOX clone1"));
15223                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
15224
15225                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
15226
15227                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15228                                        nullptr DEBUGARG("inline UNBOX clone2"));
15229                     op2 = impTokenToHandle(&resolvedToken);
15230                     if (op2 == nullptr)
15231                     { // compDonotInline()
15232                         return;
15233                     }
15234                     args = gtNewArgList(op2, op1);
15235                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
15236
15237                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
15238                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
15239
15240                     // QMARK nodes cannot reside on the evaluation stack. Because there
15241                     // may be other trees on the evaluation stack that side-effect the
15242                     // sources of the UNBOX operation we must spill the stack.
15243
15244                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15245
15246                     // Create the address-expression to reference past the object header
15247                     // to the beginning of the value-type. Today this means adjusting
15248                     // past the base of the objects vtable field which is pointer sized.
15249
15250                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
15251                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
15252                 }
15253                 else
15254                 {
15255                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
15256                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
15257
15258                     // Don't optimize, just call the helper and be done with it
15259                     args = gtNewArgList(op2, op1);
15260                     op1 =
15261                         gtNewHelperCallNode(helper,
15262                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
15263                 }
15264
15265                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
15266                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
15267                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
15268                        );
15269
15270                 /*
15271                   ----------------------------------------------------------------------
15272                   | \ helper  |                         |                              |
15273                   |   \       |                         |                              |
15274                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
15275                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
15276                   | opcode  \ |                         |                              |
15277                   |---------------------------------------------------------------------
15278                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
15279                   |           |                         | push the BYREF to this local |
15280                   |---------------------------------------------------------------------
15281                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
15282                   |           | the BYREF               | For Linux when the           |
15283                   |           |                         |  struct is returned in two   |
15284                   |           |                         |  registers create a temp     |
15285                   |           |                         |  which address is passed to  |
15286                   |           |                         |  the unbox_nullable helper.  |
15287                   |---------------------------------------------------------------------
15288                 */
15289
15290                 if (opcode == CEE_UNBOX)
15291                 {
15292                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
15293                     {
15294                         // Unbox nullable helper returns a struct type.
15295                         // We need to spill it to a temp so than can take the address of it.
15296                         // Here we need unsafe value cls check, since the address of struct is taken to be used
15297                         // further along and potetially be exploitable.
15298
15299                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
15300                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15301
15302                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15303                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15304                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15305
15306                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15307                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15308                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15309                     }
15310
15311                     assert(op1->gtType == TYP_BYREF);
15312                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15313                 }
15314                 else
15315                 {
15316                     assert(opcode == CEE_UNBOX_ANY);
15317
15318                     if (helper == CORINFO_HELP_UNBOX)
15319                     {
15320                         // Normal unbox helper returns a TYP_BYREF.
15321                         impPushOnStack(op1, tiRetVal);
15322                         oper = GT_OBJ;
15323                         goto OBJ;
15324                     }
15325
15326                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
15327
15328 #if FEATURE_MULTIREG_RET
15329
15330                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
15331                     {
15332                         // Unbox nullable helper returns a TYP_STRUCT.
15333                         // For the multi-reg case we need to spill it to a temp so that
15334                         // we can pass the address to the unbox_nullable jit helper.
15335
15336                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
15337                         lvaTable[tmp].lvIsMultiRegArg = true;
15338                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15339
15340                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15341                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15342                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15343
15344                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15345                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15346                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15347
15348                         // In this case the return value of the unbox helper is TYP_BYREF.
15349                         // Make sure the right type is placed on the operand type stack.
15350                         impPushOnStack(op1, tiRetVal);
15351
15352                         // Load the struct.
15353                         oper = GT_OBJ;
15354
15355                         assert(op1->gtType == TYP_BYREF);
15356                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15357
15358                         goto OBJ;
15359                     }
15360                     else
15361
15362 #endif // !FEATURE_MULTIREG_RET
15363
15364                     {
15365                         // If non register passable struct we have it materialized in the RetBuf.
15366                         assert(op1->gtType == TYP_STRUCT);
15367                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15368                         assert(tiRetVal.IsValueClass());
15369                     }
15370                 }
15371
15372                 impPushOnStack(op1, tiRetVal);
15373             }
15374             break;
15375
15376             case CEE_BOX:
15377             {
15378                 /* Get the Class index */
15379                 assertImp(sz == sizeof(unsigned));
15380
15381                 _impResolveToken(CORINFO_TOKENKIND_Box);
15382
15383                 JITDUMP(" %08X", resolvedToken.token);
15384
15385                 if (tiVerificationNeeded)
15386                 {
15387                     typeInfo tiActual = impStackTop().seTypeInfo;
15388                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
15389
15390                     Verify(verIsBoxable(tiBox), "boxable type expected");
15391
15392                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
15393                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
15394                            "boxed type has unsatisfied class constraints");
15395
15396                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
15397
15398                     // Observation: the following code introduces a boxed value class on the stack, but,
15399                     // according to the ECMA spec, one would simply expect: tiRetVal =
15400                     // typeInfo(TI_REF,impGetObjectClass());
15401
15402                     // Push the result back on the stack,
15403                     // even if clsHnd is a value class we want the TI_REF
15404                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
15405                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
15406                 }
15407
15408                 accessAllowedResult =
15409                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15410                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15411
15412                 // Note BOX can be used on things that are not value classes, in which
15413                 // case we get a NOP.  However the verifier's view of the type on the
15414                 // stack changes (in generic code a 'T' becomes a 'boxed T')
15415                 if (!eeIsValueClass(resolvedToken.hClass))
15416                 {
15417                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
15418                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
15419                     break;
15420                 }
15421
15422                 // Look ahead for unbox.any
15423                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
15424                 {
15425                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
15426
15427                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
15428
15429                     // See if the resolved tokens describe types that are equal.
15430                     const TypeCompareState compare =
15431                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
15432
15433                     // If so, box/unbox.any is a nop.
15434                     if (compare == TypeCompareState::Must)
15435                     {
15436                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
15437                         // Skip the next unbox.any instruction
15438                         sz += sizeof(mdToken) + 1;
15439                         break;
15440                     }
15441                 }
15442
15443                 impImportAndPushBox(&resolvedToken);
15444                 if (compDonotInline())
15445                 {
15446                     return;
15447                 }
15448             }
15449             break;
15450
15451             case CEE_SIZEOF:
15452
15453                 /* Get the Class index */
15454                 assertImp(sz == sizeof(unsigned));
15455
15456                 _impResolveToken(CORINFO_TOKENKIND_Class);
15457
15458                 JITDUMP(" %08X", resolvedToken.token);
15459
15460                 if (tiVerificationNeeded)
15461                 {
15462                     tiRetVal = typeInfo(TI_INT);
15463                 }
15464
15465                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
15466                 impPushOnStack(op1, tiRetVal);
15467                 break;
15468
15469             case CEE_CASTCLASS:
15470
15471                 /* Get the Class index */
15472
15473                 assertImp(sz == sizeof(unsigned));
15474
15475                 _impResolveToken(CORINFO_TOKENKIND_Casting);
15476
15477                 JITDUMP(" %08X", resolvedToken.token);
15478
15479                 if (!opts.IsReadyToRun())
15480                 {
15481                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15482                     if (op2 == nullptr)
15483                     { // compDonotInline()
15484                         return;
15485                     }
15486                 }
15487
15488                 if (tiVerificationNeeded)
15489                 {
15490                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
15491                     // box it
15492                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15493                 }
15494
15495                 accessAllowedResult =
15496                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15497                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15498
15499                 op1 = impPopStack().val;
15500
15501             /* Pop the address and create the 'checked cast' helper call */
15502
15503             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15504             // and op2 to contain code that creates the type handle corresponding to typeRef
15505             CASTCLASS:
15506             {
15507                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15508
15509                 if (optTree != nullptr)
15510                 {
15511                     impPushOnStack(optTree, tiRetVal);
15512                 }
15513                 else
15514                 {
15515
15516 #ifdef FEATURE_READYTORUN_COMPILER
15517                     if (opts.IsReadyToRun())
15518                     {
15519                         GenTreeCall* opLookup =
15520                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15521                                                       gtNewArgList(op1));
15522                         usingReadyToRunHelper = (opLookup != nullptr);
15523                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15524
15525                         if (!usingReadyToRunHelper)
15526                         {
15527                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15528                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15529                             //      1) Load the context
15530                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15531                             //      stub
15532                             //      3) Check the object on the stack for the type-cast
15533                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15534
15535                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15536                             if (op2 == nullptr)
15537                             { // compDonotInline()
15538                                 return;
15539                             }
15540                         }
15541                     }
15542
15543                     if (!usingReadyToRunHelper)
15544 #endif
15545                     {
15546                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15547                     }
15548                     if (compDonotInline())
15549                     {
15550                         return;
15551                     }
15552
15553                     /* Push the result back on the stack */
15554                     impPushOnStack(op1, tiRetVal);
15555                 }
15556             }
15557             break;
15558
15559             case CEE_THROW:
15560
15561                 if (compIsForInlining())
15562                 {
15563                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15564                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15565                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15566
15567                     /* Do we have just the exception on the stack ?*/
15568
15569                     if (verCurrentState.esStackDepth != 1)
15570                     {
15571                         /* if not, just don't inline the method */
15572
15573                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15574                         return;
15575                     }
15576                 }
15577
15578                 if (tiVerificationNeeded)
15579                 {
15580                     tiRetVal = impStackTop().seTypeInfo;
15581                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15582                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15583                     {
15584                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15585                     }
15586                 }
15587
15588                 block->bbSetRunRarely(); // any block with a throw is rare
15589                 /* Pop the exception object and create the 'throw' helper call */
15590
15591                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15592
15593             EVAL_APPEND:
15594                 if (verCurrentState.esStackDepth > 0)
15595                 {
15596                     impEvalSideEffects();
15597                 }
15598
15599                 assert(verCurrentState.esStackDepth == 0);
15600
15601                 goto APPEND;
15602
15603             case CEE_RETHROW:
15604
15605                 assert(!compIsForInlining());
15606
15607                 if (info.compXcptnsCount == 0)
15608                 {
15609                     BADCODE("rethrow outside catch");
15610                 }
15611
15612                 if (tiVerificationNeeded)
15613                 {
15614                     Verify(block->hasHndIndex(), "rethrow outside catch");
15615                     if (block->hasHndIndex())
15616                     {
15617                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15618                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15619                         if (HBtab->HasFilter())
15620                         {
15621                             // we better be in the handler clause part, not the filter part
15622                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15623                                    "rethrow in filter");
15624                         }
15625                     }
15626                 }
15627
15628                 /* Create the 'rethrow' helper call */
15629
15630                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15631
15632                 goto EVAL_APPEND;
15633
15634             case CEE_INITOBJ:
15635
15636                 assertImp(sz == sizeof(unsigned));
15637
15638                 _impResolveToken(CORINFO_TOKENKIND_Class);
15639
15640                 JITDUMP(" %08X", resolvedToken.token);
15641
15642                 if (tiVerificationNeeded)
15643                 {
15644                     typeInfo tiTo    = impStackTop().seTypeInfo;
15645                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15646
15647                     Verify(tiTo.IsByRef(), "byref expected");
15648                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15649
15650                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15651                            "type operand incompatible with type of address");
15652                 }
15653
15654                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15655                 op2  = gtNewIconNode(0);                                     // Value
15656                 op1  = impPopStack().val;                                    // Dest
15657                 op1  = gtNewBlockVal(op1, size);
15658                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15659                 goto SPILL_APPEND;
15660
15661             case CEE_INITBLK:
15662
15663                 if (tiVerificationNeeded)
15664                 {
15665                     Verify(false, "bad opcode");
15666                 }
15667
15668                 op3 = impPopStack().val; // Size
15669                 op2 = impPopStack().val; // Value
15670                 op1 = impPopStack().val; // Dest
15671
15672                 if (op3->IsCnsIntOrI())
15673                 {
15674                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15675                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15676                 }
15677                 else
15678                 {
15679                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15680                     size = 0;
15681                 }
15682                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15683
15684                 goto SPILL_APPEND;
15685
15686             case CEE_CPBLK:
15687
15688                 if (tiVerificationNeeded)
15689                 {
15690                     Verify(false, "bad opcode");
15691                 }
15692                 op3 = impPopStack().val; // Size
15693                 op2 = impPopStack().val; // Src
15694                 op1 = impPopStack().val; // Dest
15695
15696                 if (op3->IsCnsIntOrI())
15697                 {
15698                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15699                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15700                 }
15701                 else
15702                 {
15703                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15704                     size = 0;
15705                 }
15706                 if (op2->OperGet() == GT_ADDR)
15707                 {
15708                     op2 = op2->gtOp.gtOp1;
15709                 }
15710                 else
15711                 {
15712                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15713                 }
15714
15715                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15716                 goto SPILL_APPEND;
15717
15718             case CEE_CPOBJ:
15719
15720                 assertImp(sz == sizeof(unsigned));
15721
15722                 _impResolveToken(CORINFO_TOKENKIND_Class);
15723
15724                 JITDUMP(" %08X", resolvedToken.token);
15725
15726                 if (tiVerificationNeeded)
15727                 {
15728                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15729                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15730                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15731
15732                     Verify(tiFrom.IsByRef(), "expected byref source");
15733                     Verify(tiTo.IsByRef(), "expected byref destination");
15734
15735                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15736                            "type of source address incompatible with type operand");
15737                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15738                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15739                            "type operand incompatible with type of destination address");
15740                 }
15741
15742                 if (!eeIsValueClass(resolvedToken.hClass))
15743                 {
15744                     op1 = impPopStack().val; // address to load from
15745
15746                     impBashVarAddrsToI(op1);
15747
15748                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15749
15750                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15751                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15752
15753                     impPushOnStack(op1, typeInfo());
15754                     opcode = CEE_STIND_REF;
15755                     lclTyp = TYP_REF;
15756                     goto STIND_POST_VERIFY;
15757                 }
15758
15759                 op2 = impPopStack().val; // Src
15760                 op1 = impPopStack().val; // Dest
15761                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15762                 goto SPILL_APPEND;
15763
15764             case CEE_STOBJ:
15765             {
15766                 assertImp(sz == sizeof(unsigned));
15767
15768                 _impResolveToken(CORINFO_TOKENKIND_Class);
15769
15770                 JITDUMP(" %08X", resolvedToken.token);
15771
15772                 if (eeIsValueClass(resolvedToken.hClass))
15773                 {
15774                     lclTyp = TYP_STRUCT;
15775                 }
15776                 else
15777                 {
15778                     lclTyp = TYP_REF;
15779                 }
15780
15781                 if (tiVerificationNeeded)
15782                 {
15783
15784                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15785
15786                     // Make sure we have a good looking byref
15787                     Verify(tiPtr.IsByRef(), "pointer not byref");
15788                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15789                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15790                     {
15791                         compUnsafeCastUsed = true;
15792                     }
15793
15794                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15795                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15796
15797                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15798                     {
15799                         Verify(false, "type of value incompatible with type operand");
15800                         compUnsafeCastUsed = true;
15801                     }
15802
15803                     if (!tiCompatibleWith(argVal, ptrVal, false))
15804                     {
15805                         Verify(false, "type operand incompatible with type of address");
15806                         compUnsafeCastUsed = true;
15807                     }
15808                 }
15809                 else
15810                 {
15811                     compUnsafeCastUsed = true;
15812                 }
15813
15814                 if (lclTyp == TYP_REF)
15815                 {
15816                     opcode = CEE_STIND_REF;
15817                     goto STIND_POST_VERIFY;
15818                 }
15819
15820                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15821                 if (impIsPrimitive(jitTyp))
15822                 {
15823                     lclTyp = JITtype2varType(jitTyp);
15824                     goto STIND_POST_VERIFY;
15825                 }
15826
15827                 op2 = impPopStack().val; // Value
15828                 op1 = impPopStack().val; // Ptr
15829
15830                 assertImp(varTypeIsStruct(op2));
15831
15832                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15833
15834                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15835                 {
15836                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15837                 }
15838                 goto SPILL_APPEND;
15839             }
15840
15841             case CEE_MKREFANY:
15842
15843                 assert(!compIsForInlining());
15844
15845                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15846                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15847
15848                 JITDUMP("disabling struct promotion because of mkrefany\n");
15849                 fgNoStructPromotion = true;
15850
15851                 oper = GT_MKREFANY;
15852                 assertImp(sz == sizeof(unsigned));
15853
15854                 _impResolveToken(CORINFO_TOKENKIND_Class);
15855
15856                 JITDUMP(" %08X", resolvedToken.token);
15857
15858                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15859                 if (op2 == nullptr)
15860                 { // compDonotInline()
15861                     return;
15862                 }
15863
15864                 if (tiVerificationNeeded)
15865                 {
15866                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15867                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15868
15869                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15870                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15871                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15872                 }
15873
15874                 accessAllowedResult =
15875                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15876                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15877
15878                 op1 = impPopStack().val;
15879
15880                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15881                 // But JIT32 allowed it, so we continue to allow it.
15882                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15883
15884                 // MKREFANY returns a struct.  op2 is the class token.
15885                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15886
15887                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15888                 break;
15889
15890             case CEE_LDOBJ:
15891             {
15892                 oper = GT_OBJ;
15893                 assertImp(sz == sizeof(unsigned));
15894
15895                 _impResolveToken(CORINFO_TOKENKIND_Class);
15896
15897                 JITDUMP(" %08X", resolvedToken.token);
15898
15899             OBJ:
15900
15901                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15902
15903                 if (tiVerificationNeeded)
15904                 {
15905                     typeInfo tiPtr = impStackTop().seTypeInfo;
15906
15907                     // Make sure we have a byref
15908                     if (!tiPtr.IsByRef())
15909                     {
15910                         Verify(false, "pointer not byref");
15911                         compUnsafeCastUsed = true;
15912                     }
15913                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15914
15915                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15916                     {
15917                         Verify(false, "type of address incompatible with type operand");
15918                         compUnsafeCastUsed = true;
15919                     }
15920                     tiRetVal.NormaliseForStack();
15921                 }
15922                 else
15923                 {
15924                     compUnsafeCastUsed = true;
15925                 }
15926
15927                 if (eeIsValueClass(resolvedToken.hClass))
15928                 {
15929                     lclTyp = TYP_STRUCT;
15930                 }
15931                 else
15932                 {
15933                     lclTyp = TYP_REF;
15934                     opcode = CEE_LDIND_REF;
15935                     goto LDIND_POST_VERIFY;
15936                 }
15937
15938                 op1 = impPopStack().val;
15939
15940                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15941
15942                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15943                 if (impIsPrimitive(jitTyp))
15944                 {
15945                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15946
15947                     // Could point anywhere, example a boxed class static int
15948                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15949                     assertImp(varTypeIsArithmetic(op1->gtType));
15950                 }
15951                 else
15952                 {
15953                     // OBJ returns a struct
15954                     // and an inline argument which is the class token of the loaded obj
15955                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15956                 }
15957                 op1->gtFlags |= GTF_EXCEPT;
15958
15959                 if (prefixFlags & PREFIX_UNALIGNED)
15960                 {
15961                     op1->gtFlags |= GTF_IND_UNALIGNED;
15962                 }
15963
15964                 impPushOnStack(op1, tiRetVal);
15965                 break;
15966             }
15967
15968             case CEE_LDLEN:
15969                 if (tiVerificationNeeded)
15970                 {
15971                     typeInfo tiArray = impStackTop().seTypeInfo;
15972                     Verify(verIsSDArray(tiArray), "bad array");
15973                     tiRetVal = typeInfo(TI_INT);
15974                 }
15975
15976                 op1 = impPopStack().val;
15977                 if (!opts.MinOpts() && !opts.compDbgCode)
15978                 {
15979                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15980                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
15981
15982                     /* Mark the block as containing a length expression */
15983
15984                     if (op1->gtOper == GT_LCL_VAR)
15985                     {
15986                         block->bbFlags |= BBF_HAS_IDX_LEN;
15987                     }
15988
15989                     op1 = arrLen;
15990                 }
15991                 else
15992                 {
15993                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15994                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15995                                         gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
15996                     op1 = gtNewIndir(TYP_INT, op1);
15997                 }
15998
15999                 /* Push the result back on the stack */
16000                 impPushOnStack(op1, tiRetVal);
16001                 break;
16002
16003             case CEE_BREAK:
16004                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
16005                 goto SPILL_APPEND;
16006
16007             case CEE_NOP:
16008                 if (opts.compDbgCode)
16009                 {
16010                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
16011                     goto SPILL_APPEND;
16012                 }
16013                 break;
16014
16015             /******************************** NYI *******************************/
16016
16017             case 0xCC:
16018                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
16019
16020             case CEE_ILLEGAL:
16021             case CEE_MACRO_END:
16022
16023             default:
16024                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
16025         }
16026
16027         codeAddr += sz;
16028         prevOpcode = opcode;
16029
16030         prefixFlags = 0;
16031     }
16032
16033     return;
16034 #undef _impResolveToken
16035 }
16036 #ifdef _PREFAST_
16037 #pragma warning(pop)
16038 #endif
16039
16040 // Push a local/argument treeon the operand stack
16041 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
16042 {
16043     tiRetVal.NormaliseForStack();
16044
16045     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
16046     {
16047         tiRetVal.SetUninitialisedObjRef();
16048     }
16049
16050     impPushOnStack(op, tiRetVal);
16051 }
16052
16053 // Load a local/argument on the operand stack
16054 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
16055 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
16056 {
16057     var_types lclTyp;
16058
16059     if (lvaTable[lclNum].lvNormalizeOnLoad())
16060     {
16061         lclTyp = lvaGetRealType(lclNum);
16062     }
16063     else
16064     {
16065         lclTyp = lvaGetActualType(lclNum);
16066     }
16067
16068     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
16069 }
16070
16071 // Load an argument on the operand stack
16072 // Shared by the various CEE_LDARG opcodes
16073 // ilArgNum is the argument index as specified in IL.
16074 // It will be mapped to the correct lvaTable index
16075 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
16076 {
16077     Verify(ilArgNum < info.compILargsCount, "bad arg num");
16078
16079     if (compIsForInlining())
16080     {
16081         if (ilArgNum >= info.compArgsCount)
16082         {
16083             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
16084             return;
16085         }
16086
16087         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
16088                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
16089     }
16090     else
16091     {
16092         if (ilArgNum >= info.compArgsCount)
16093         {
16094             BADCODE("Bad IL");
16095         }
16096
16097         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
16098
16099         if (lclNum == info.compThisArg)
16100         {
16101             lclNum = lvaArg0Var;
16102         }
16103
16104         impLoadVar(lclNum, offset);
16105     }
16106 }
16107
16108 // Load a local on the operand stack
16109 // Shared by the various CEE_LDLOC opcodes
16110 // ilLclNum is the local index as specified in IL.
16111 // It will be mapped to the correct lvaTable index
16112 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
16113 {
16114     if (tiVerificationNeeded)
16115     {
16116         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
16117         Verify(info.compInitMem, "initLocals not set");
16118     }
16119
16120     if (compIsForInlining())
16121     {
16122         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16123         {
16124             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
16125             return;
16126         }
16127
16128         // Get the local type
16129         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
16130
16131         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
16132
16133         /* Have we allocated a temp for this local? */
16134
16135         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
16136
16137         // All vars of inlined methods should be !lvNormalizeOnLoad()
16138
16139         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
16140         lclTyp = genActualType(lclTyp);
16141
16142         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
16143     }
16144     else
16145     {
16146         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16147         {
16148             BADCODE("Bad IL");
16149         }
16150
16151         unsigned lclNum = info.compArgsCount + ilLclNum;
16152
16153         impLoadVar(lclNum, offset);
16154     }
16155 }
16156
16157 #ifdef _TARGET_ARM_
16158 /**************************************************************************************
16159  *
16160  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
16161  *  dst struct, because struct promotion will turn it into a float/double variable while
16162  *  the rhs will be an int/long variable. We don't code generate assignment of int into
16163  *  a float, but there is nothing that might prevent us from doing so. The tree however
16164  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
16165  *
16166  *  tmpNum - the lcl dst variable num that is a struct.
16167  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
16168  *  hClass - the type handle for the struct variable.
16169  *
16170  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
16171  *        however, we could do a codegen of transferring from int to float registers
16172  *        (transfer, not a cast.)
16173  *
16174  */
16175 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
16176 {
16177     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
16178     {
16179         int       hfaSlots = GetHfaCount(hClass);
16180         var_types hfaType  = GetHfaType(hClass);
16181
16182         // If we have varargs we morph the method's return type to be "int" irrespective of its original
16183         // type: struct/float at importer because the ABI calls out return in integer registers.
16184         // We don't want struct promotion to replace an expression like this:
16185         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
16186         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
16187         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
16188             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
16189         {
16190             // Make sure this struct type stays as struct so we can receive the call in a struct.
16191             lvaTable[tmpNum].lvIsMultiRegRet = true;
16192         }
16193     }
16194 }
16195 #endif // _TARGET_ARM_
16196
16197 //------------------------------------------------------------------------
16198 // impAssignSmallStructTypeToVar: ensure calls that return small structs whose
16199 //    sizes are not supported integral type sizes return values to temps.
16200 //
16201 // Arguments:
16202 //     op -- call returning a small struct in a register
16203 //     hClass -- class handle for struct
16204 //
16205 // Returns:
16206 //     Tree with reference to struct local to use as call return value.
16207 //
16208 // Remarks:
16209 //     The call will be spilled into a preceding statement.
16210 //     Currently handles struct returns for 3, 5, 6, and 7 byte structs.
16211
16212 GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16213 {
16214     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for small struct return."));
16215     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16216     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16217
16218     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of small struct returns.
16219     ret->gtFlags |= GTF_DONT_CSE;
16220
16221     return ret;
16222 }
16223
16224 #if FEATURE_MULTIREG_RET
16225 //------------------------------------------------------------------------
16226 // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
16227 //    registers return values to suitable temps.
16228 //
16229 // Arguments:
16230 //     op -- call returning a struct in a registers
16231 //     hClass -- class handle for struct
16232 //
16233 // Returns:
16234 //     Tree with reference to struct local to use as call return value.
16235
16236 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16237 {
16238     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
16239     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16240     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16241
16242     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
16243     ret->gtFlags |= GTF_DONT_CSE;
16244
16245     assert(IsMultiRegReturnedType(hClass));
16246
16247     // Mark the var so that fields are not promoted and stay together.
16248     lvaTable[tmpNum].lvIsMultiRegRet = true;
16249
16250     return ret;
16251 }
16252 #endif // FEATURE_MULTIREG_RET
16253
16254 // do import for a return
16255 // returns false if inlining was aborted
16256 // opcode can be ret or call in the case of a tail.call
16257 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
16258 {
16259     if (tiVerificationNeeded)
16260     {
16261         verVerifyThisPtrInitialised();
16262
16263         unsigned expectedStack = 0;
16264         if (info.compRetType != TYP_VOID)
16265         {
16266             typeInfo tiVal = impStackTop().seTypeInfo;
16267             typeInfo tiDeclared =
16268                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
16269
16270             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
16271
16272             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
16273             expectedStack = 1;
16274         }
16275         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
16276     }
16277
16278 #ifdef DEBUG
16279     // If we are importing an inlinee and have GC ref locals we always
16280     // need to have a spill temp for the return value.  This temp
16281     // should have been set up in advance, over in fgFindBasicBlocks.
16282     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
16283     {
16284         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
16285     }
16286 #endif // DEBUG
16287
16288     GenTree*             op2       = nullptr;
16289     GenTree*             op1       = nullptr;
16290     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
16291
16292     if (info.compRetType != TYP_VOID)
16293     {
16294         StackEntry se = impPopStack();
16295         retClsHnd     = se.seTypeInfo.GetClassHandle();
16296         op2           = se.val;
16297
16298         if (!compIsForInlining())
16299         {
16300             impBashVarAddrsToI(op2);
16301             op2 = impImplicitIorI4Cast(op2, info.compRetType);
16302             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
16303             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
16304                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
16305                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
16306                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
16307                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
16308
16309 #ifdef DEBUG
16310             if (opts.compGcChecks && info.compRetType == TYP_REF)
16311             {
16312                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
16313                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
16314                 // one-return BB.
16315
16316                 assert(op2->gtType == TYP_REF);
16317
16318                 // confirm that the argument is a GC pointer (for debugging (GC stress))
16319                 GenTreeArgList* args = gtNewArgList(op2);
16320                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
16321
16322                 if (verbose)
16323                 {
16324                     printf("\ncompGcChecks tree:\n");
16325                     gtDispTree(op2);
16326                 }
16327             }
16328 #endif
16329         }
16330         else
16331         {
16332             // inlinee's stack should be empty now.
16333             assert(verCurrentState.esStackDepth == 0);
16334
16335 #ifdef DEBUG
16336             if (verbose)
16337             {
16338                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
16339                 gtDispTree(op2);
16340             }
16341 #endif
16342
16343             // Make sure the type matches the original call.
16344
16345             var_types returnType       = genActualType(op2->gtType);
16346             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
16347             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
16348             {
16349                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
16350             }
16351
16352             if (returnType != originalCallType)
16353             {
16354                 // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
16355                 if (((returnType == TYP_BYREF) && (originalCallType == TYP_I_IMPL)) ||
16356                     ((returnType == TYP_I_IMPL) && (originalCallType == TYP_BYREF)))
16357                 {
16358                     JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16359                             varTypeName(originalCallType));
16360                 }
16361                 else
16362                 {
16363                     JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16364                             varTypeName(originalCallType));
16365                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
16366                     return false;
16367                 }
16368             }
16369
16370             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
16371             // expression. At this point, retExpr could already be set if there are multiple
16372             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
16373             // the other blocks already set it. If there is only a single return block,
16374             // retExpr shouldn't be set. However, this is not true if we reimport a block
16375             // with a return. In that case, retExpr will be set, then the block will be
16376             // reimported, but retExpr won't get cleared as part of setting the block to
16377             // be reimported. The reimported retExpr value should be the same, so even if
16378             // we don't unconditionally overwrite it, it shouldn't matter.
16379             if (info.compRetNativeType != TYP_STRUCT)
16380             {
16381                 // compRetNativeType is not TYP_STRUCT.
16382                 // This implies it could be either a scalar type or SIMD vector type or
16383                 // a struct type that can be normalized to a scalar type.
16384
16385                 if (varTypeIsStruct(info.compRetType))
16386                 {
16387                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
16388                     // adjust the type away from struct to integral
16389                     // and no normalizing
16390                     op2 = impFixupStructReturnType(op2, retClsHnd);
16391                 }
16392                 else
16393                 {
16394                     // Do we have to normalize?
16395                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
16396                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
16397                         fgCastNeeded(op2, fncRealRetType))
16398                     {
16399                         // Small-typed return values are normalized by the callee
16400                         op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
16401                     }
16402                 }
16403
16404                 if (fgNeedReturnSpillTemp())
16405                 {
16406                     assert(info.compRetNativeType != TYP_VOID &&
16407                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
16408
16409                     // If this method returns a ref type, track the actual types seen
16410                     // in the returns.
16411                     if (info.compRetType == TYP_REF)
16412                     {
16413                         bool                 isExact      = false;
16414                         bool                 isNonNull    = false;
16415                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
16416
16417                         if (impInlineInfo->retExpr == nullptr)
16418                         {
16419                             // This is the first return, so best known type is the type
16420                             // of this return value.
16421                             impInlineInfo->retExprClassHnd        = returnClsHnd;
16422                             impInlineInfo->retExprClassHndIsExact = isExact;
16423                         }
16424                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
16425                         {
16426                             // This return site type differs from earlier seen sites,
16427                             // so reset the info and we'll fall back to using the method's
16428                             // declared return type for the return spill temp.
16429                             impInlineInfo->retExprClassHnd        = nullptr;
16430                             impInlineInfo->retExprClassHndIsExact = false;
16431                         }
16432                     }
16433
16434                     // This is a bit of a workaround...
16435                     // If we are inlining a call that returns a struct, where the actual "native" return type is
16436                     // not a struct (for example, the struct is composed of exactly one int, and the native
16437                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
16438                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
16439                     // to the *native* return type), and at least one of the return blocks is the result of
16440                     // a call, then we have a problem. The situation is like this (from a failed test case):
16441                     //
16442                     // inliner:
16443                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
16444                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
16445                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
16446                     //
16447                     // inlinee:
16448                     //      ...
16449                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
16450                     //      ret
16451                     //      ...
16452                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
16453                     //      object&, class System.Func`1<!!0>)
16454                     //      ret
16455                     //
16456                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
16457                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
16458                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
16459                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
16460                     //
16461                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
16462                     // native return type, which is what it will be set to eventually. We generate the
16463                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
16464                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
16465
16466                     bool restoreType = false;
16467                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
16468                     {
16469                         noway_assert(op2->TypeGet() == TYP_STRUCT);
16470                         op2->gtType = info.compRetNativeType;
16471                         restoreType = true;
16472                     }
16473
16474                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16475                                      (unsigned)CHECK_SPILL_ALL);
16476
16477                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
16478
16479                     if (restoreType)
16480                     {
16481                         op2->gtType = TYP_STRUCT; // restore it to what it was
16482                     }
16483
16484                     op2 = tmpOp2;
16485
16486 #ifdef DEBUG
16487                     if (impInlineInfo->retExpr)
16488                     {
16489                         // Some other block(s) have seen the CEE_RET first.
16490                         // Better they spilled to the same temp.
16491                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
16492                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
16493                     }
16494 #endif
16495                 }
16496
16497 #ifdef DEBUG
16498                 if (verbose)
16499                 {
16500                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
16501                     gtDispTree(op2);
16502                 }
16503 #endif
16504
16505                 // Report the return expression
16506                 impInlineInfo->retExpr = op2;
16507             }
16508             else
16509             {
16510                 // compRetNativeType is TYP_STRUCT.
16511                 // This implies that struct return via RetBuf arg or multi-reg struct return
16512
16513                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
16514
16515                 // Assign the inlinee return into a spill temp.
16516                 // spill temp only exists if there are multiple return points
16517                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
16518                 {
16519                     // in this case we have to insert multiple struct copies to the temp
16520                     // and the retexpr is just the temp.
16521                     assert(info.compRetNativeType != TYP_VOID);
16522                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
16523
16524                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16525                                      (unsigned)CHECK_SPILL_ALL);
16526                 }
16527
16528 #if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
16529 #if defined(_TARGET_ARM_)
16530                 // TODO-ARM64-NYI: HFA
16531                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
16532                 // next ifdefs could be refactored in a single method with the ifdef inside.
16533                 if (IsHfa(retClsHnd))
16534                 {
16535 // Same as !IsHfa but just don't bother with impAssignStructPtr.
16536 #else  // defined(UNIX_AMD64_ABI)
16537                 ReturnTypeDesc retTypeDesc;
16538                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16539                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16540
16541                 if (retRegCount != 0)
16542                 {
16543                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16544                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16545                     // max allowed.)
16546                     assert(retRegCount == MAX_RET_REG_COUNT);
16547                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16548                     CLANG_FORMAT_COMMENT_ANCHOR;
16549 #endif // defined(UNIX_AMD64_ABI)
16550
16551                     if (fgNeedReturnSpillTemp())
16552                     {
16553                         if (!impInlineInfo->retExpr)
16554                         {
16555 #if defined(_TARGET_ARM_)
16556                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16557 #else  // defined(UNIX_AMD64_ABI)
16558                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16559                             impInlineInfo->retExpr =
16560                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16561 #endif // defined(UNIX_AMD64_ABI)
16562                         }
16563                     }
16564                     else
16565                     {
16566                         impInlineInfo->retExpr = op2;
16567                     }
16568                 }
16569                 else
16570 #elif defined(_TARGET_ARM64_)
16571                 ReturnTypeDesc retTypeDesc;
16572                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16573                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16574
16575                 if (retRegCount != 0)
16576                 {
16577                     assert(!iciCall->HasRetBufArg());
16578                     assert(retRegCount >= 2);
16579                     if (fgNeedReturnSpillTemp())
16580                     {
16581                         if (!impInlineInfo->retExpr)
16582                         {
16583                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16584                             impInlineInfo->retExpr =
16585                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16586                         }
16587                     }
16588                     else
16589                     {
16590                         impInlineInfo->retExpr = op2;
16591                     }
16592                 }
16593                 else
16594 #endif // defined(_TARGET_ARM64_)
16595                 {
16596                     assert(iciCall->HasRetBufArg());
16597                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16598                     // spill temp only exists if there are multiple return points
16599                     if (fgNeedReturnSpillTemp())
16600                     {
16601                         // if this is the first return we have seen set the retExpr
16602                         if (!impInlineInfo->retExpr)
16603                         {
16604                             impInlineInfo->retExpr =
16605                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16606                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16607                         }
16608                     }
16609                     else
16610                     {
16611                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16612                     }
16613                 }
16614             }
16615         }
16616     }
16617
16618     if (compIsForInlining())
16619     {
16620         return true;
16621     }
16622
16623     if (info.compRetType == TYP_VOID)
16624     {
16625         // return void
16626         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16627     }
16628     else if (info.compRetBuffArg != BAD_VAR_NUM)
16629     {
16630         // Assign value to return buff (first param)
16631         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16632
16633         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16634         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16635
16636         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16637         CLANG_FORMAT_COMMENT_ANCHOR;
16638
16639 #if defined(_TARGET_AMD64_)
16640
16641         // x64 (System V and Win64) calling convention requires to
16642         // return the implicit return buffer explicitly (in RAX).
16643         // Change the return type to be BYREF.
16644         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16645 #else  // !defined(_TARGET_AMD64_)
16646         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16647         // In such case the return value of the function is changed to BYREF.
16648         // If profiler hook is not needed the return type of the function is TYP_VOID.
16649         if (compIsProfilerHookNeeded())
16650         {
16651             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16652         }
16653         else
16654         {
16655             // return void
16656             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16657         }
16658 #endif // !defined(_TARGET_AMD64_)
16659     }
16660     else if (varTypeIsStruct(info.compRetType))
16661     {
16662 #if !FEATURE_MULTIREG_RET
16663         // For both ARM architectures the HFA native types are maintained as structs.
16664         // Also on System V AMD64 the multireg structs returns are also left as structs.
16665         noway_assert(info.compRetNativeType != TYP_STRUCT);
16666 #endif
16667         op2 = impFixupStructReturnType(op2, retClsHnd);
16668         // return op2
16669         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16670     }
16671     else
16672     {
16673         // return op2
16674         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16675     }
16676
16677     // We must have imported a tailcall and jumped to RET
16678     if (prefixFlags & PREFIX_TAILCALL)
16679     {
16680 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16681         // Jit64 compat:
16682         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16683         //      tail.call
16684         //      pop
16685         //      ret
16686         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16687 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16688
16689         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16690
16691         // impImportCall() would have already appended TYP_VOID calls
16692         if (info.compRetType == TYP_VOID)
16693         {
16694             return true;
16695         }
16696     }
16697
16698     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16699 #ifdef DEBUG
16700     // Remember at which BC offset the tree was finished
16701     impNoteLastILoffs();
16702 #endif
16703     return true;
16704 }
16705
16706 /*****************************************************************************
16707  *  Mark the block as unimported.
16708  *  Note that the caller is responsible for calling impImportBlockPending(),
16709  *  with the appropriate stack-state
16710  */
16711
16712 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16713 {
16714 #ifdef DEBUG
16715     if (verbose && (block->bbFlags & BBF_IMPORTED))
16716     {
16717         printf("\n" FMT_BB " will be reimported\n", block->bbNum);
16718     }
16719 #endif
16720
16721     block->bbFlags &= ~BBF_IMPORTED;
16722 }
16723
16724 /*****************************************************************************
16725  *  Mark the successors of the given block as unimported.
16726  *  Note that the caller is responsible for calling impImportBlockPending()
16727  *  for all the successors, with the appropriate stack-state.
16728  */
16729
16730 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16731 {
16732     const unsigned numSuccs = block->NumSucc();
16733     for (unsigned i = 0; i < numSuccs; i++)
16734     {
16735         impReimportMarkBlock(block->GetSucc(i));
16736     }
16737 }
16738
16739 /*****************************************************************************
16740  *
16741  *  Filter wrapper to handle only passed in exception code
16742  *  from it).
16743  */
16744
16745 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16746 {
16747     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16748     {
16749         return EXCEPTION_EXECUTE_HANDLER;
16750     }
16751
16752     return EXCEPTION_CONTINUE_SEARCH;
16753 }
16754
16755 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16756 {
16757     assert(block->hasTryIndex());
16758     assert(!compIsForInlining());
16759
16760     unsigned  tryIndex = block->getTryIndex();
16761     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16762
16763     if (isTryStart)
16764     {
16765         assert(block->bbFlags & BBF_TRY_BEG);
16766
16767         // The Stack must be empty
16768         //
16769         if (block->bbStkDepth != 0)
16770         {
16771             BADCODE("Evaluation stack must be empty on entry into a try block");
16772         }
16773     }
16774
16775     // Save the stack contents, we'll need to restore it later
16776     //
16777     SavedStack blockState;
16778     impSaveStackState(&blockState, false);
16779
16780     while (HBtab != nullptr)
16781     {
16782         if (isTryStart)
16783         {
16784             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16785             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16786             //
16787             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16788             {
16789                 // We  trigger an invalid program exception here unless we have a try/fault region.
16790                 //
16791                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16792                 {
16793                     BADCODE(
16794                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16795                 }
16796                 else
16797                 {
16798                     // Allow a try/fault region to proceed.
16799                     assert(HBtab->HasFaultHandler());
16800                 }
16801             }
16802
16803             /* Recursively process the handler block */
16804             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16805
16806             //  Construct the proper verification stack state
16807             //   either empty or one that contains just
16808             //   the Exception Object that we are dealing with
16809             //
16810             verCurrentState.esStackDepth = 0;
16811
16812             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16813             {
16814                 CORINFO_CLASS_HANDLE clsHnd;
16815
16816                 if (HBtab->HasFilter())
16817                 {
16818                     clsHnd = impGetObjectClass();
16819                 }
16820                 else
16821                 {
16822                     CORINFO_RESOLVED_TOKEN resolvedToken;
16823
16824                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16825                     resolvedToken.tokenScope   = info.compScopeHnd;
16826                     resolvedToken.token        = HBtab->ebdTyp;
16827                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16828                     info.compCompHnd->resolveToken(&resolvedToken);
16829
16830                     clsHnd = resolvedToken.hClass;
16831                 }
16832
16833                 // push catch arg the stack, spill to a temp if necessary
16834                 // Note: can update HBtab->ebdHndBeg!
16835                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16836             }
16837
16838             // Queue up the handler for importing
16839             //
16840             impImportBlockPending(hndBegBB);
16841
16842             if (HBtab->HasFilter())
16843             {
16844                 /* @VERIFICATION : Ideally the end of filter state should get
16845                    propagated to the catch handler, this is an incompleteness,
16846                    but is not a security/compliance issue, since the only
16847                    interesting state is the 'thisInit' state.
16848                    */
16849
16850                 verCurrentState.esStackDepth = 0;
16851
16852                 BasicBlock* filterBB = HBtab->ebdFilter;
16853
16854                 // push catch arg the stack, spill to a temp if necessary
16855                 // Note: can update HBtab->ebdFilter!
16856                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16857                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16858
16859                 impImportBlockPending(filterBB);
16860             }
16861         }
16862         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16863         {
16864             /* Recursively process the handler block */
16865
16866             verCurrentState.esStackDepth = 0;
16867
16868             // Queue up the fault handler for importing
16869             //
16870             impImportBlockPending(HBtab->ebdHndBeg);
16871         }
16872
16873         // Now process our enclosing try index (if any)
16874         //
16875         tryIndex = HBtab->ebdEnclosingTryIndex;
16876         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16877         {
16878             HBtab = nullptr;
16879         }
16880         else
16881         {
16882             HBtab = ehGetDsc(tryIndex);
16883         }
16884     }
16885
16886     // Restore the stack contents
16887     impRestoreStackState(&blockState);
16888 }
16889
16890 //***************************************************************
16891 // Import the instructions for the given basic block.  Perform
16892 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16893 // time, or whose verification pre-state is changed.
16894
16895 #ifdef _PREFAST_
16896 #pragma warning(push)
16897 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16898 #endif
16899 void Compiler::impImportBlock(BasicBlock* block)
16900 {
16901     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16902     // handle them specially. In particular, there is no IL to import for them, but we do need
16903     // to mark them as imported and put their successors on the pending import list.
16904     if (block->bbFlags & BBF_INTERNAL)
16905     {
16906         JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
16907         block->bbFlags |= BBF_IMPORTED;
16908
16909         const unsigned numSuccs = block->NumSucc();
16910         for (unsigned i = 0; i < numSuccs; i++)
16911         {
16912             impImportBlockPending(block->GetSucc(i));
16913         }
16914
16915         return;
16916     }
16917
16918     bool markImport;
16919
16920     assert(block);
16921
16922     /* Make the block globaly available */
16923
16924     compCurBB = block;
16925
16926 #ifdef DEBUG
16927     /* Initialize the debug variables */
16928     impCurOpcName = "unknown";
16929     impCurOpcOffs = block->bbCodeOffs;
16930 #endif
16931
16932     /* Set the current stack state to the merged result */
16933     verResetCurrentState(block, &verCurrentState);
16934
16935     /* Now walk the code and import the IL into GenTrees */
16936
16937     struct FilterVerificationExceptionsParam
16938     {
16939         Compiler*   pThis;
16940         BasicBlock* block;
16941     };
16942     FilterVerificationExceptionsParam param;
16943
16944     param.pThis = this;
16945     param.block = block;
16946
16947     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16948     {
16949         /* @VERIFICATION : For now, the only state propagation from try
16950            to it's handler is "thisInit" state (stack is empty at start of try).
16951            In general, for state that we track in verification, we need to
16952            model the possibility that an exception might happen at any IL
16953            instruction, so we really need to merge all states that obtain
16954            between IL instructions in a try block into the start states of
16955            all handlers.
16956
16957            However we do not allow the 'this' pointer to be uninitialized when
16958            entering most kinds try regions (only try/fault are allowed to have
16959            an uninitialized this pointer on entry to the try)
16960
16961            Fortunately, the stack is thrown away when an exception
16962            leads to a handler, so we don't have to worry about that.
16963            We DO, however, have to worry about the "thisInit" state.
16964            But only for the try/fault case.
16965
16966            The only allowed transition is from TIS_Uninit to TIS_Init.
16967
16968            So for a try/fault region for the fault handler block
16969            we will merge the start state of the try begin
16970            and the post-state of each block that is part of this try region
16971         */
16972
16973         // merge the start state of the try begin
16974         //
16975         if (pParam->block->bbFlags & BBF_TRY_BEG)
16976         {
16977             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16978         }
16979
16980         pParam->pThis->impImportBlockCode(pParam->block);
16981
16982         // As discussed above:
16983         // merge the post-state of each block that is part of this try region
16984         //
16985         if (pParam->block->hasTryIndex())
16986         {
16987             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16988         }
16989     }
16990     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16991     {
16992         verHandleVerificationFailure(block DEBUGARG(false));
16993     }
16994     PAL_ENDTRY
16995
16996     if (compDonotInline())
16997     {
16998         return;
16999     }
17000
17001     assert(!compDonotInline());
17002
17003     markImport = false;
17004
17005 SPILLSTACK:
17006
17007     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
17008     bool        reimportSpillClique = false;
17009     BasicBlock* tgtBlock            = nullptr;
17010
17011     /* If the stack is non-empty, we might have to spill its contents */
17012
17013     if (verCurrentState.esStackDepth != 0)
17014     {
17015         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
17016                                   // on the stack, its lifetime is hard to determine, simply
17017                                   // don't reuse such temps.
17018
17019         GenTree* addStmt = nullptr;
17020
17021         /* Do the successors of 'block' have any other predecessors ?
17022            We do not want to do some of the optimizations related to multiRef
17023            if we can reimport blocks */
17024
17025         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
17026
17027         switch (block->bbJumpKind)
17028         {
17029             case BBJ_COND:
17030
17031                 /* Temporarily remove the 'jtrue' from the end of the tree list */
17032
17033                 assert(impTreeLast);
17034                 assert(impTreeLast->gtOper == GT_STMT);
17035                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
17036
17037                 addStmt     = impTreeLast;
17038                 impTreeLast = impTreeLast->gtPrev;
17039
17040                 /* Note if the next block has more than one ancestor */
17041
17042                 multRef |= block->bbNext->bbRefs;
17043
17044                 /* Does the next block have temps assigned? */
17045
17046                 baseTmp  = block->bbNext->bbStkTempsIn;
17047                 tgtBlock = block->bbNext;
17048
17049                 if (baseTmp != NO_BASE_TMP)
17050                 {
17051                     break;
17052                 }
17053
17054                 /* Try the target of the jump then */
17055
17056                 multRef |= block->bbJumpDest->bbRefs;
17057                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
17058                 tgtBlock = block->bbJumpDest;
17059                 break;
17060
17061             case BBJ_ALWAYS:
17062                 multRef |= block->bbJumpDest->bbRefs;
17063                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
17064                 tgtBlock = block->bbJumpDest;
17065                 break;
17066
17067             case BBJ_NONE:
17068                 multRef |= block->bbNext->bbRefs;
17069                 baseTmp  = block->bbNext->bbStkTempsIn;
17070                 tgtBlock = block->bbNext;
17071                 break;
17072
17073             case BBJ_SWITCH:
17074
17075                 BasicBlock** jmpTab;
17076                 unsigned     jmpCnt;
17077
17078                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
17079
17080                 assert(impTreeLast);
17081                 assert(impTreeLast->gtOper == GT_STMT);
17082                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
17083
17084                 addStmt     = impTreeLast;
17085                 impTreeLast = impTreeLast->gtPrev;
17086
17087                 jmpCnt = block->bbJumpSwt->bbsCount;
17088                 jmpTab = block->bbJumpSwt->bbsDstTab;
17089
17090                 do
17091                 {
17092                     tgtBlock = (*jmpTab);
17093
17094                     multRef |= tgtBlock->bbRefs;
17095
17096                     // Thanks to spill cliques, we should have assigned all or none
17097                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
17098                     baseTmp = tgtBlock->bbStkTempsIn;
17099                     if (multRef > 1)
17100                     {
17101                         break;
17102                     }
17103                 } while (++jmpTab, --jmpCnt);
17104
17105                 break;
17106
17107             case BBJ_CALLFINALLY:
17108             case BBJ_EHCATCHRET:
17109             case BBJ_RETURN:
17110             case BBJ_EHFINALLYRET:
17111             case BBJ_EHFILTERRET:
17112             case BBJ_THROW:
17113                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
17114                 break;
17115
17116             default:
17117                 noway_assert(!"Unexpected bbJumpKind");
17118                 break;
17119         }
17120
17121         assert(multRef >= 1);
17122
17123         /* Do we have a base temp number? */
17124
17125         bool newTemps = (baseTmp == NO_BASE_TMP);
17126
17127         if (newTemps)
17128         {
17129             /* Grab enough temps for the whole stack */
17130             baseTmp = impGetSpillTmpBase(block);
17131         }
17132
17133         /* Spill all stack entries into temps */
17134         unsigned level, tempNum;
17135
17136         JITDUMP("\nSpilling stack entries into temps\n");
17137         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
17138         {
17139             GenTree* tree = verCurrentState.esStack[level].val;
17140
17141             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
17142                the other. This should merge to a byref in unverifiable code.
17143                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
17144                successor would be imported assuming there was a TYP_I_IMPL on
17145                the stack. Thus the value would not get GC-tracked. Hence,
17146                change the temp to TYP_BYREF and reimport the successors.
17147                Note: We should only allow this in unverifiable code.
17148             */
17149             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
17150             {
17151                 lvaTable[tempNum].lvType = TYP_BYREF;
17152                 impReimportMarkSuccessors(block);
17153                 markImport = true;
17154             }
17155
17156 #ifdef _TARGET_64BIT_
17157             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
17158             {
17159                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
17160                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
17161                 {
17162                     // Merge the current state into the entry state of block;
17163                     // the call to verMergeEntryStates must have changed
17164                     // the entry state of the block by merging the int local var
17165                     // and the native-int stack entry.
17166                     bool changed = false;
17167                     if (verMergeEntryStates(tgtBlock, &changed))
17168                     {
17169                         impRetypeEntryStateTemps(tgtBlock);
17170                         impReimportBlockPending(tgtBlock);
17171                         assert(changed);
17172                     }
17173                     else
17174                     {
17175                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
17176                         break;
17177                     }
17178                 }
17179
17180                 // Some other block in the spill clique set this to "int", but now we have "native int".
17181                 // Change the type and go back to re-import any blocks that used the wrong type.
17182                 lvaTable[tempNum].lvType = TYP_I_IMPL;
17183                 reimportSpillClique      = true;
17184             }
17185             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
17186             {
17187                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
17188                 // Insert a sign-extension to "native int" so we match the clique.
17189                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17190             }
17191
17192             // Consider the case where one branch left a 'byref' on the stack and the other leaves
17193             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
17194             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
17195             // behavior instead of asserting and then generating bad code (where we save/restore the
17196             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
17197             // imported already, we need to change the type of the local and reimport the spill clique.
17198             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
17199             // the 'byref' size.
17200             if (!tiVerificationNeeded)
17201             {
17202                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
17203                 {
17204                     // Some other block in the spill clique set this to "int", but now we have "byref".
17205                     // Change the type and go back to re-import any blocks that used the wrong type.
17206                     lvaTable[tempNum].lvType = TYP_BYREF;
17207                     reimportSpillClique      = true;
17208                 }
17209                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
17210                 {
17211                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
17212                     // Insert a sign-extension to "native int" so we match the clique size.
17213                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17214                 }
17215             }
17216 #endif // _TARGET_64BIT_
17217
17218             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
17219             {
17220                 // Some other block in the spill clique set this to "float", but now we have "double".
17221                 // Change the type and go back to re-import any blocks that used the wrong type.
17222                 lvaTable[tempNum].lvType = TYP_DOUBLE;
17223                 reimportSpillClique      = true;
17224             }
17225             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
17226             {
17227                 // Spill clique has decided this should be "double", but this block only pushes a "float".
17228                 // Insert a cast to "double" so we match the clique.
17229                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
17230             }
17231
17232             /* If addStmt has a reference to tempNum (can only happen if we
17233                are spilling to the temps already used by a previous block),
17234                we need to spill addStmt */
17235
17236             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
17237             {
17238                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
17239
17240                 if (addTree->gtOper == GT_JTRUE)
17241                 {
17242                     GenTree* relOp = addTree->gtOp.gtOp1;
17243                     assert(relOp->OperIsCompare());
17244
17245                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
17246
17247                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
17248                     {
17249                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
17250                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
17251                         type              = genActualType(lvaTable[temp].TypeGet());
17252                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
17253                     }
17254
17255                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
17256                     {
17257                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
17258                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
17259                         type              = genActualType(lvaTable[temp].TypeGet());
17260                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
17261                     }
17262                 }
17263                 else
17264                 {
17265                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
17266
17267                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
17268                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
17269                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
17270                 }
17271             }
17272
17273             /* Spill the stack entry, and replace with the temp */
17274
17275             if (!impSpillStackEntry(level, tempNum
17276 #ifdef DEBUG
17277                                     ,
17278                                     true, "Spill Stack Entry"
17279 #endif
17280                                     ))
17281             {
17282                 if (markImport)
17283                 {
17284                     BADCODE("bad stack state");
17285                 }
17286
17287                 // Oops. Something went wrong when spilling. Bad code.
17288                 verHandleVerificationFailure(block DEBUGARG(true));
17289
17290                 goto SPILLSTACK;
17291             }
17292         }
17293
17294         /* Put back the 'jtrue'/'switch' if we removed it earlier */
17295
17296         if (addStmt)
17297         {
17298             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
17299         }
17300     }
17301
17302     // Some of the append/spill logic works on compCurBB
17303
17304     assert(compCurBB == block);
17305
17306     /* Save the tree list in the block */
17307     impEndTreeList(block);
17308
17309     // impEndTreeList sets BBF_IMPORTED on the block
17310     // We do *NOT* want to set it later than this because
17311     // impReimportSpillClique might clear it if this block is both a
17312     // predecessor and successor in the current spill clique
17313     assert(block->bbFlags & BBF_IMPORTED);
17314
17315     // If we had a int/native int, or float/double collision, we need to re-import
17316     if (reimportSpillClique)
17317     {
17318         // This will re-import all the successors of block (as well as each of their predecessors)
17319         impReimportSpillClique(block);
17320
17321         // For blocks that haven't been imported yet, we still need to mark them as pending import.
17322         const unsigned numSuccs = block->NumSucc();
17323         for (unsigned i = 0; i < numSuccs; i++)
17324         {
17325             BasicBlock* succ = block->GetSucc(i);
17326             if ((succ->bbFlags & BBF_IMPORTED) == 0)
17327             {
17328                 impImportBlockPending(succ);
17329             }
17330         }
17331     }
17332     else // the normal case
17333     {
17334         // otherwise just import the successors of block
17335
17336         /* Does this block jump to any other blocks? */
17337         const unsigned numSuccs = block->NumSucc();
17338         for (unsigned i = 0; i < numSuccs; i++)
17339         {
17340             impImportBlockPending(block->GetSucc(i));
17341         }
17342     }
17343 }
17344 #ifdef _PREFAST_
17345 #pragma warning(pop)
17346 #endif
17347
17348 /*****************************************************************************/
17349 //
17350 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17351 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17352 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
17353 // (its "pre-state").
17354
17355 void Compiler::impImportBlockPending(BasicBlock* block)
17356 {
17357 #ifdef DEBUG
17358     if (verbose)
17359     {
17360         printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
17361     }
17362 #endif
17363
17364     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
17365     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
17366     // (When we're doing verification, we always attempt the merge to detect verification errors.)
17367
17368     // If the block has not been imported, add to pending set.
17369     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
17370
17371     // Initialize bbEntryState just the first time we try to add this block to the pending list
17372     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
17373     // We use NULL to indicate the 'common' state to avoid memory allocation
17374     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
17375         (impGetPendingBlockMember(block) == 0))
17376     {
17377         verInitBBEntryState(block, &verCurrentState);
17378         assert(block->bbStkDepth == 0);
17379         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
17380         assert(addToPending);
17381         assert(impGetPendingBlockMember(block) == 0);
17382     }
17383     else
17384     {
17385         // The stack should have the same height on entry to the block from all its predecessors.
17386         if (block->bbStkDepth != verCurrentState.esStackDepth)
17387         {
17388 #ifdef DEBUG
17389             char buffer[400];
17390             sprintf_s(buffer, sizeof(buffer),
17391                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
17392                       "Previous depth was %d, current depth is %d",
17393                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
17394                       verCurrentState.esStackDepth);
17395             buffer[400 - 1] = 0;
17396             NO_WAY(buffer);
17397 #else
17398             NO_WAY("Block entered with different stack depths");
17399 #endif
17400         }
17401
17402         // Additionally, if we need to verify, merge the verification state.
17403         if (tiVerificationNeeded)
17404         {
17405             // Merge the current state into the entry state of block; if this does not change the entry state
17406             // by merging, do not add the block to the pending-list.
17407             bool changed = false;
17408             if (!verMergeEntryStates(block, &changed))
17409             {
17410                 block->bbFlags |= BBF_FAILED_VERIFICATION;
17411                 addToPending = true; // We will pop it off, and check the flag set above.
17412             }
17413             else if (changed)
17414             {
17415                 addToPending = true;
17416
17417                 JITDUMP("Adding " FMT_BB " to pending set due to new merge result\n", block->bbNum);
17418             }
17419         }
17420
17421         if (!addToPending)
17422         {
17423             return;
17424         }
17425
17426         if (block->bbStkDepth > 0)
17427         {
17428             // We need to fix the types of any spill temps that might have changed:
17429             //   int->native int, float->double, int->byref, etc.
17430             impRetypeEntryStateTemps(block);
17431         }
17432
17433         // OK, we must add to the pending list, if it's not already in it.
17434         if (impGetPendingBlockMember(block) != 0)
17435         {
17436             return;
17437         }
17438     }
17439
17440     // Get an entry to add to the pending list
17441
17442     PendingDsc* dsc;
17443
17444     if (impPendingFree)
17445     {
17446         // We can reuse one of the freed up dscs.
17447         dsc            = impPendingFree;
17448         impPendingFree = dsc->pdNext;
17449     }
17450     else
17451     {
17452         // We have to create a new dsc
17453         dsc = new (this, CMK_Unknown) PendingDsc;
17454     }
17455
17456     dsc->pdBB                 = block;
17457     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
17458     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
17459
17460     // Save the stack trees for later
17461
17462     if (verCurrentState.esStackDepth)
17463     {
17464         impSaveStackState(&dsc->pdSavedStack, false);
17465     }
17466
17467     // Add the entry to the pending list
17468
17469     dsc->pdNext    = impPendingList;
17470     impPendingList = dsc;
17471     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17472
17473     // Various assertions require us to now to consider the block as not imported (at least for
17474     // the final time...)
17475     block->bbFlags &= ~BBF_IMPORTED;
17476
17477 #ifdef DEBUG
17478     if (verbose && 0)
17479     {
17480         printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17481     }
17482 #endif
17483 }
17484
17485 /*****************************************************************************/
17486 //
17487 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17488 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17489 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
17490
17491 void Compiler::impReimportBlockPending(BasicBlock* block)
17492 {
17493     JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
17494
17495     assert(block->bbFlags & BBF_IMPORTED);
17496
17497     // OK, we must add to the pending list, if it's not already in it.
17498     if (impGetPendingBlockMember(block) != 0)
17499     {
17500         return;
17501     }
17502
17503     // Get an entry to add to the pending list
17504
17505     PendingDsc* dsc;
17506
17507     if (impPendingFree)
17508     {
17509         // We can reuse one of the freed up dscs.
17510         dsc            = impPendingFree;
17511         impPendingFree = dsc->pdNext;
17512     }
17513     else
17514     {
17515         // We have to create a new dsc
17516         dsc = new (this, CMK_ImpStack) PendingDsc;
17517     }
17518
17519     dsc->pdBB = block;
17520
17521     if (block->bbEntryState)
17522     {
17523         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
17524         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17525         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17526     }
17527     else
17528     {
17529         dsc->pdThisPtrInit        = TIS_Bottom;
17530         dsc->pdSavedStack.ssDepth = 0;
17531         dsc->pdSavedStack.ssTrees = nullptr;
17532     }
17533
17534     // Add the entry to the pending list
17535
17536     dsc->pdNext    = impPendingList;
17537     impPendingList = dsc;
17538     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17539
17540     // Various assertions require us to now to consider the block as not imported (at least for
17541     // the final time...)
17542     block->bbFlags &= ~BBF_IMPORTED;
17543
17544 #ifdef DEBUG
17545     if (verbose && 0)
17546     {
17547         printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17548     }
17549 #endif
17550 }
17551
17552 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17553 {
17554     if (comp->impBlockListNodeFreeList == nullptr)
17555     {
17556         return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
17557     }
17558     else
17559     {
17560         BlockListNode* res             = comp->impBlockListNodeFreeList;
17561         comp->impBlockListNodeFreeList = res->m_next;
17562         return res;
17563     }
17564 }
17565
17566 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17567 {
17568     node->m_next             = impBlockListNodeFreeList;
17569     impBlockListNodeFreeList = node;
17570 }
17571
17572 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17573 {
17574     bool toDo = true;
17575
17576     noway_assert(!fgComputePredsDone);
17577     if (!fgCheapPredsValid)
17578     {
17579         fgComputeCheapPreds();
17580     }
17581
17582     BlockListNode* succCliqueToDo = nullptr;
17583     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17584     while (toDo)
17585     {
17586         toDo = false;
17587         // Look at the successors of every member of the predecessor to-do list.
17588         while (predCliqueToDo != nullptr)
17589         {
17590             BlockListNode* node = predCliqueToDo;
17591             predCliqueToDo      = node->m_next;
17592             BasicBlock* blk     = node->m_blk;
17593             FreeBlockListNode(node);
17594
17595             const unsigned numSuccs = blk->NumSucc();
17596             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17597             {
17598                 BasicBlock* succ = blk->GetSucc(succNum);
17599                 // If it's not already in the clique, add it, and also add it
17600                 // as a member of the successor "toDo" set.
17601                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17602                 {
17603                     callback->Visit(SpillCliqueSucc, succ);
17604                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17605                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17606                     toDo           = true;
17607                 }
17608             }
17609         }
17610         // Look at the predecessors of every member of the successor to-do list.
17611         while (succCliqueToDo != nullptr)
17612         {
17613             BlockListNode* node = succCliqueToDo;
17614             succCliqueToDo      = node->m_next;
17615             BasicBlock* blk     = node->m_blk;
17616             FreeBlockListNode(node);
17617
17618             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17619             {
17620                 BasicBlock* predBlock = pred->block;
17621                 // If it's not already in the clique, add it, and also add it
17622                 // as a member of the predecessor "toDo" set.
17623                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17624                 {
17625                     callback->Visit(SpillCliquePred, predBlock);
17626                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17627                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17628                     toDo           = true;
17629                 }
17630             }
17631         }
17632     }
17633
17634     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17635     // miss walking back to include the predecessor we started from.
17636     // This most likely cause: missing or out of date bbPreds
17637     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17638 }
17639
17640 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17641 {
17642     if (predOrSucc == SpillCliqueSucc)
17643     {
17644         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17645         blk->bbStkTempsIn = m_baseTmp;
17646     }
17647     else
17648     {
17649         assert(predOrSucc == SpillCliquePred);
17650         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17651         blk->bbStkTempsOut = m_baseTmp;
17652     }
17653 }
17654
17655 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17656 {
17657     // For Preds we could be a little smarter and just find the existing store
17658     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17659     // just re-import the whole block (just like we do for successors)
17660
17661     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17662     {
17663         // If we haven't imported this block and we're not going to (because it isn't on
17664         // the pending list) then just ignore it for now.
17665
17666         // This block has either never been imported (EntryState == NULL) or it failed
17667         // verification. Neither state requires us to force it to be imported now.
17668         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17669         return;
17670     }
17671
17672     // For successors we have a valid verCurrentState, so just mark them for reimport
17673     // the 'normal' way
17674     // Unlike predecessors, we *DO* need to reimport the current block because the
17675     // initial import had the wrong entry state types.
17676     // Similarly, blocks that are currently on the pending list, still need to call
17677     // impImportBlockPending to fixup their entry state.
17678     if (predOrSucc == SpillCliqueSucc)
17679     {
17680         m_pComp->impReimportMarkBlock(blk);
17681
17682         // Set the current stack state to that of the blk->bbEntryState
17683         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17684         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17685
17686         m_pComp->impImportBlockPending(blk);
17687     }
17688     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17689     {
17690         // As described above, we are only visiting predecessors so they can
17691         // add the appropriate casts, since we have already done that for the current
17692         // block, it does not need to be reimported.
17693         // Nor do we need to reimport blocks that are still pending, but not yet
17694         // imported.
17695         //
17696         // For predecessors, we have no state to seed the EntryState, so we just have
17697         // to assume the existing one is correct.
17698         // If the block is also a successor, it will get the EntryState properly
17699         // updated when it is visited as a successor in the above "if" block.
17700         assert(predOrSucc == SpillCliquePred);
17701         m_pComp->impReimportBlockPending(blk);
17702     }
17703 }
17704
17705 // Re-type the incoming lclVar nodes to match the varDsc.
17706 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17707 {
17708     if (blk->bbEntryState != nullptr)
17709     {
17710         EntryState* es = blk->bbEntryState;
17711         for (unsigned level = 0; level < es->esStackDepth; level++)
17712         {
17713             GenTree* tree = es->esStack[level].val;
17714             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17715             {
17716                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17717                 noway_assert(lclNum < lvaCount);
17718                 LclVarDsc* varDsc              = lvaTable + lclNum;
17719                 es->esStack[level].val->gtType = varDsc->TypeGet();
17720             }
17721         }
17722     }
17723 }
17724
17725 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17726 {
17727     if (block->bbStkTempsOut != NO_BASE_TMP)
17728     {
17729         return block->bbStkTempsOut;
17730     }
17731
17732 #ifdef DEBUG
17733     if (verbose)
17734     {
17735         printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
17736     }
17737 #endif // DEBUG
17738
17739     // Otherwise, choose one, and propagate to all members of the spill clique.
17740     // Grab enough temps for the whole stack.
17741     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17742     SetSpillTempsBase callback(baseTmp);
17743
17744     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17745     // to one spill clique, and similarly can only be the sucessor to one spill clique
17746     impWalkSpillCliqueFromPred(block, &callback);
17747
17748     return baseTmp;
17749 }
17750
17751 void Compiler::impReimportSpillClique(BasicBlock* block)
17752 {
17753 #ifdef DEBUG
17754     if (verbose)
17755     {
17756         printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
17757     }
17758 #endif // DEBUG
17759
17760     // If we get here, it is because this block is already part of a spill clique
17761     // and one predecessor had an outgoing live stack slot of type int, and this
17762     // block has an outgoing live stack slot of type native int.
17763     // We need to reset these before traversal because they have already been set
17764     // by the previous walk to determine all the members of the spill clique.
17765     impInlineRoot()->impSpillCliquePredMembers.Reset();
17766     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17767
17768     ReimportSpillClique callback(this);
17769
17770     impWalkSpillCliqueFromPred(block, &callback);
17771 }
17772
17773 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17774 // a copy of "srcState", cloning tree pointers as required.
17775 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17776 {
17777     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17778     {
17779         block->bbEntryState = nullptr;
17780         return;
17781     }
17782
17783     block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
17784
17785     // block->bbEntryState.esRefcount = 1;
17786
17787     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17788     block->bbEntryState->thisInitialized = TIS_Bottom;
17789
17790     if (srcState->esStackDepth > 0)
17791     {
17792         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17793         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17794
17795         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17796         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17797         {
17798             GenTree* tree                           = srcState->esStack[level].val;
17799             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17800         }
17801     }
17802
17803     if (verTrackObjCtorInitState)
17804     {
17805         verSetThisInit(block, srcState->thisInitialized);
17806     }
17807
17808     return;
17809 }
17810
17811 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17812 {
17813     assert(tis != TIS_Bottom); // Precondition.
17814     if (block->bbEntryState == nullptr)
17815     {
17816         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17817     }
17818
17819     block->bbEntryState->thisInitialized = tis;
17820 }
17821
17822 /*
17823  * Resets the current state to the state at the start of the basic block
17824  */
17825 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17826 {
17827
17828     if (block->bbEntryState == nullptr)
17829     {
17830         destState->esStackDepth    = 0;
17831         destState->thisInitialized = TIS_Bottom;
17832         return;
17833     }
17834
17835     destState->esStackDepth = block->bbEntryState->esStackDepth;
17836
17837     if (destState->esStackDepth > 0)
17838     {
17839         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17840
17841         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17842     }
17843
17844     destState->thisInitialized = block->bbThisOnEntry();
17845
17846     return;
17847 }
17848
17849 ThisInitState BasicBlock::bbThisOnEntry()
17850 {
17851     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17852 }
17853
17854 unsigned BasicBlock::bbStackDepthOnEntry()
17855 {
17856     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17857 }
17858
17859 void BasicBlock::bbSetStack(void* stackBuffer)
17860 {
17861     assert(bbEntryState);
17862     assert(stackBuffer);
17863     bbEntryState->esStack = (StackEntry*)stackBuffer;
17864 }
17865
17866 StackEntry* BasicBlock::bbStackOnEntry()
17867 {
17868     assert(bbEntryState);
17869     return bbEntryState->esStack;
17870 }
17871
17872 void Compiler::verInitCurrentState()
17873 {
17874     verTrackObjCtorInitState        = FALSE;
17875     verCurrentState.thisInitialized = TIS_Bottom;
17876
17877     if (tiVerificationNeeded)
17878     {
17879         // Track this ptr initialization
17880         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17881         {
17882             verTrackObjCtorInitState        = TRUE;
17883             verCurrentState.thisInitialized = TIS_Uninit;
17884         }
17885     }
17886
17887     // initialize stack info
17888
17889     verCurrentState.esStackDepth = 0;
17890     assert(verCurrentState.esStack != nullptr);
17891
17892     // copy current state to entry state of first BB
17893     verInitBBEntryState(fgFirstBB, &verCurrentState);
17894 }
17895
17896 Compiler* Compiler::impInlineRoot()
17897 {
17898     if (impInlineInfo == nullptr)
17899     {
17900         return this;
17901     }
17902     else
17903     {
17904         return impInlineInfo->InlineRoot;
17905     }
17906 }
17907
17908 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17909 {
17910     if (predOrSucc == SpillCliquePred)
17911     {
17912         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17913     }
17914     else
17915     {
17916         assert(predOrSucc == SpillCliqueSucc);
17917         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17918     }
17919 }
17920
17921 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17922 {
17923     if (predOrSucc == SpillCliquePred)
17924     {
17925         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17926     }
17927     else
17928     {
17929         assert(predOrSucc == SpillCliqueSucc);
17930         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17931     }
17932 }
17933
17934 /*****************************************************************************
17935  *
17936  *  Convert the instrs ("import") into our internal format (trees). The
17937  *  basic flowgraph has already been constructed and is passed in.
17938  */
17939
17940 void Compiler::impImport(BasicBlock* method)
17941 {
17942 #ifdef DEBUG
17943     if (verbose)
17944     {
17945         printf("*************** In impImport() for %s\n", info.compFullName);
17946     }
17947 #endif
17948
17949     Compiler* inlineRoot = impInlineRoot();
17950
17951     if (info.compMaxStack <= SMALL_STACK_SIZE)
17952     {
17953         impStkSize = SMALL_STACK_SIZE;
17954     }
17955     else
17956     {
17957         impStkSize = info.compMaxStack;
17958     }
17959
17960     if (this == inlineRoot)
17961     {
17962         // Allocate the stack contents
17963         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17964     }
17965     else
17966     {
17967         // This is the inlinee compiler, steal the stack from the inliner compiler
17968         // (after ensuring that it is large enough).
17969         if (inlineRoot->impStkSize < impStkSize)
17970         {
17971             inlineRoot->impStkSize              = impStkSize;
17972             inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17973         }
17974
17975         verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
17976     }
17977
17978     // initialize the entry state at start of method
17979     verInitCurrentState();
17980
17981     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17982     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17983     {
17984         // We have initialized these previously, but to size 0.  Make them larger.
17985         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17986         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17987         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17988     }
17989     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17990     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17991     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17992     impBlockListNodeFreeList = nullptr;
17993
17994 #ifdef DEBUG
17995     impLastILoffsStmt   = nullptr;
17996     impNestedStackSpill = false;
17997 #endif
17998     impBoxTemp = BAD_VAR_NUM;
17999
18000     impPendingList = impPendingFree = nullptr;
18001
18002     /* Add the entry-point to the worker-list */
18003
18004     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
18005     // from EH normalization.
18006     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
18007     // out.
18008     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
18009     {
18010         // Treat these as imported.
18011         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
18012         JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", method->bbNum);
18013         method->bbFlags |= BBF_IMPORTED;
18014     }
18015
18016     impImportBlockPending(method);
18017
18018     /* Import blocks in the worker-list until there are no more */
18019
18020     while (impPendingList)
18021     {
18022         /* Remove the entry at the front of the list */
18023
18024         PendingDsc* dsc = impPendingList;
18025         impPendingList  = impPendingList->pdNext;
18026         impSetPendingBlockMember(dsc->pdBB, 0);
18027
18028         /* Restore the stack state */
18029
18030         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
18031         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
18032         if (verCurrentState.esStackDepth)
18033         {
18034             impRestoreStackState(&dsc->pdSavedStack);
18035         }
18036
18037         /* Add the entry to the free list for reuse */
18038
18039         dsc->pdNext    = impPendingFree;
18040         impPendingFree = dsc;
18041
18042         /* Now import the block */
18043
18044         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
18045         {
18046
18047 #ifdef _TARGET_64BIT_
18048             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
18049             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
18050             // method for further explanation on why we raise this exception instead of making the jitted
18051             // code throw the verification exception during execution.
18052             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
18053             {
18054                 BADCODE("Basic block marked as not verifiable");
18055             }
18056             else
18057 #endif // _TARGET_64BIT_
18058             {
18059                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
18060                 impEndTreeList(dsc->pdBB);
18061             }
18062         }
18063         else
18064         {
18065             impImportBlock(dsc->pdBB);
18066
18067             if (compDonotInline())
18068             {
18069                 return;
18070             }
18071             if (compIsForImportOnly() && !tiVerificationNeeded)
18072             {
18073                 return;
18074             }
18075         }
18076     }
18077
18078 #ifdef DEBUG
18079     if (verbose && info.compXcptnsCount)
18080     {
18081         printf("\nAfter impImport() added block for try,catch,finally");
18082         fgDispBasicBlocks();
18083         printf("\n");
18084     }
18085
18086     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
18087     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
18088     {
18089         block->bbFlags &= ~BBF_VISITED;
18090     }
18091 #endif
18092
18093     assert(!compIsForInlining() || !tiVerificationNeeded);
18094 }
18095
18096 // Checks if a typeinfo (usually stored in the type stack) is a struct.
18097 // The invariant here is that if it's not a ref or a method and has a class handle
18098 // it's a valuetype
18099 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
18100 {
18101     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
18102     {
18103         return true;
18104     }
18105     else
18106     {
18107         return false;
18108     }
18109 }
18110
18111 /*****************************************************************************
18112  *  Check to see if the tree is the address of a local or
18113     the address of a field in a local.
18114
18115     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
18116
18117  */
18118
18119 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
18120 {
18121     if (tree->gtOper != GT_ADDR)
18122     {
18123         return FALSE;
18124     }
18125
18126     GenTree* op = tree->gtOp.gtOp1;
18127     while (op->gtOper == GT_FIELD)
18128     {
18129         op = op->gtField.gtFldObj;
18130         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
18131         {
18132             op = op->gtOp.gtOp1;
18133         }
18134         else
18135         {
18136             return false;
18137         }
18138     }
18139
18140     if (op->gtOper == GT_LCL_VAR)
18141     {
18142         *lclVarTreeOut = op;
18143         return TRUE;
18144     }
18145     else
18146     {
18147         return FALSE;
18148     }
18149 }
18150
18151 //------------------------------------------------------------------------
18152 // impMakeDiscretionaryInlineObservations: make observations that help
18153 // determine the profitability of a discretionary inline
18154 //
18155 // Arguments:
18156 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
18157 //    inlineResult -- InlineResult accumulating information about this inline
18158 //
18159 // Notes:
18160 //    If inlining or prejitting the root, this method also makes
18161 //    various observations about the method that factor into inline
18162 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
18163
18164 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
18165 {
18166     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
18167            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
18168            );
18169
18170     // If we're really inlining, we should just have one result in play.
18171     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
18172
18173     // If this is a "forceinline" method, the JIT probably shouldn't have gone
18174     // to the trouble of estimating the native code size. Even if it did, it
18175     // shouldn't be relying on the result of this method.
18176     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
18177
18178     // Note if the caller contains NEWOBJ or NEWARR.
18179     Compiler* rootCompiler = impInlineRoot();
18180
18181     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
18182     {
18183         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
18184     }
18185
18186     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
18187     {
18188         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
18189     }
18190
18191     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
18192     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
18193
18194     if (isSpecialMethod)
18195     {
18196         if (calleeIsStatic)
18197         {
18198             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
18199         }
18200         else
18201         {
18202             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
18203         }
18204     }
18205     else if (!calleeIsStatic)
18206     {
18207         // Callee is an instance method.
18208         //
18209         // Check if the callee has the same 'this' as the root.
18210         if (pInlineInfo != nullptr)
18211         {
18212             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
18213             assert(thisArg);
18214             bool isSameThis = impIsThis(thisArg);
18215             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
18216         }
18217     }
18218
18219     // Note if the callee's class is a promotable struct
18220     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
18221     {
18222         assert(structPromotionHelper != nullptr);
18223         if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
18224         {
18225             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
18226         }
18227     }
18228
18229 #ifdef FEATURE_SIMD
18230
18231     // Note if this method is has SIMD args or return value
18232     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
18233     {
18234         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
18235     }
18236
18237 #endif // FEATURE_SIMD
18238
18239     // Roughly classify callsite frequency.
18240     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
18241
18242     // If this is a prejit root, or a maximally hot block...
18243     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
18244     {
18245         frequency = InlineCallsiteFrequency::HOT;
18246     }
18247     // No training data.  Look for loop-like things.
18248     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
18249     // However, give it to things nearby.
18250     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
18251              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
18252     {
18253         frequency = InlineCallsiteFrequency::LOOP;
18254     }
18255     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
18256     {
18257         frequency = InlineCallsiteFrequency::WARM;
18258     }
18259     // Now modify the multiplier based on where we're called from.
18260     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
18261     {
18262         frequency = InlineCallsiteFrequency::RARE;
18263     }
18264     else
18265     {
18266         frequency = InlineCallsiteFrequency::BORING;
18267     }
18268
18269     // Also capture the block weight of the call site.  In the prejit
18270     // root case, assume there's some hot call site for this method.
18271     unsigned weight = 0;
18272
18273     if (pInlineInfo != nullptr)
18274     {
18275         weight = pInlineInfo->iciBlock->bbWeight;
18276     }
18277     else
18278     {
18279         weight = BB_MAX_WEIGHT;
18280     }
18281
18282     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
18283     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
18284 }
18285
18286 /*****************************************************************************
18287  This method makes STATIC inlining decision based on the IL code.
18288  It should not make any inlining decision based on the context.
18289  If forceInline is true, then the inlining decision should not depend on
18290  performance heuristics (code size, etc.).
18291  */
18292
18293 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
18294                               CORINFO_METHOD_INFO*  methInfo,
18295                               bool                  forceInline,
18296                               InlineResult*         inlineResult)
18297 {
18298     unsigned codeSize = methInfo->ILCodeSize;
18299
18300     // We shouldn't have made up our minds yet...
18301     assert(!inlineResult->IsDecided());
18302
18303     if (methInfo->EHcount)
18304     {
18305         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
18306         return;
18307     }
18308
18309     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
18310     {
18311         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
18312         return;
18313     }
18314
18315     // For now we don't inline varargs (import code can't handle it)
18316
18317     if (methInfo->args.isVarArg())
18318     {
18319         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
18320         return;
18321     }
18322
18323     // Reject if it has too many locals.
18324     // This is currently an implementation limit due to fixed-size arrays in the
18325     // inline info, rather than a performance heuristic.
18326
18327     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
18328
18329     if (methInfo->locals.numArgs > MAX_INL_LCLS)
18330     {
18331         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
18332         return;
18333     }
18334
18335     // Make sure there aren't too many arguments.
18336     // This is currently an implementation limit due to fixed-size arrays in the
18337     // inline info, rather than a performance heuristic.
18338
18339     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
18340
18341     if (methInfo->args.numArgs > MAX_INL_ARGS)
18342     {
18343         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
18344         return;
18345     }
18346
18347     // Note force inline state
18348
18349     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
18350
18351     // Note IL code size
18352
18353     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
18354
18355     if (inlineResult->IsFailure())
18356     {
18357         return;
18358     }
18359
18360     // Make sure maxstack is not too big
18361
18362     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
18363
18364     if (inlineResult->IsFailure())
18365     {
18366         return;
18367     }
18368 }
18369
18370 /*****************************************************************************
18371  */
18372
18373 void Compiler::impCheckCanInline(GenTree*               call,
18374                                  CORINFO_METHOD_HANDLE  fncHandle,
18375                                  unsigned               methAttr,
18376                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
18377                                  InlineCandidateInfo**  ppInlineCandidateInfo,
18378                                  InlineResult*          inlineResult)
18379 {
18380     // Either EE or JIT might throw exceptions below.
18381     // If that happens, just don't inline the method.
18382
18383     struct Param
18384     {
18385         Compiler*              pThis;
18386         GenTree*               call;
18387         CORINFO_METHOD_HANDLE  fncHandle;
18388         unsigned               methAttr;
18389         CORINFO_CONTEXT_HANDLE exactContextHnd;
18390         InlineResult*          result;
18391         InlineCandidateInfo**  ppInlineCandidateInfo;
18392     } param;
18393     memset(&param, 0, sizeof(param));
18394
18395     param.pThis                 = this;
18396     param.call                  = call;
18397     param.fncHandle             = fncHandle;
18398     param.methAttr              = methAttr;
18399     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
18400     param.result                = inlineResult;
18401     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
18402
18403     bool success = eeRunWithErrorTrap<Param>(
18404         [](Param* pParam) {
18405             DWORD                  dwRestrictions = 0;
18406             CorInfoInitClassResult initClassResult;
18407
18408 #ifdef DEBUG
18409             const char* methodName;
18410             const char* className;
18411             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
18412
18413             if (JitConfig.JitNoInline())
18414             {
18415                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
18416                 goto _exit;
18417             }
18418 #endif
18419
18420             /* Try to get the code address/size for the method */
18421
18422             CORINFO_METHOD_INFO methInfo;
18423             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
18424             {
18425                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
18426                 goto _exit;
18427             }
18428
18429             bool forceInline;
18430             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
18431
18432             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
18433
18434             if (pParam->result->IsFailure())
18435             {
18436                 assert(pParam->result->IsNever());
18437                 goto _exit;
18438             }
18439
18440             // Speculatively check if initClass() can be done.
18441             // If it can be done, we will try to inline the method. If inlining
18442             // succeeds, then we will do the non-speculative initClass() and commit it.
18443             // If this speculative call to initClass() fails, there is no point
18444             // trying to inline this method.
18445             initClassResult =
18446                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
18447                                                            pParam->exactContextHnd /* context */,
18448                                                            TRUE /* speculative */);
18449
18450             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
18451             {
18452                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
18453                 goto _exit;
18454             }
18455
18456             // Given the EE the final say in whether to inline or not.
18457             // This should be last since for verifiable code, this can be expensive
18458
18459             /* VM Inline check also ensures that the method is verifiable if needed */
18460             CorInfoInline vmResult;
18461             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
18462                                                                   &dwRestrictions);
18463
18464             if (vmResult == INLINE_FAIL)
18465             {
18466                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
18467             }
18468             else if (vmResult == INLINE_NEVER)
18469             {
18470                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
18471             }
18472
18473             if (pParam->result->IsFailure())
18474             {
18475                 // Make sure not to report this one.  It was already reported by the VM.
18476                 pParam->result->SetReported();
18477                 goto _exit;
18478             }
18479
18480             // check for unsupported inlining restrictions
18481             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
18482
18483             if (dwRestrictions & INLINE_SAME_THIS)
18484             {
18485                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
18486                 assert(thisArg);
18487
18488                 if (!pParam->pThis->impIsThis(thisArg))
18489                 {
18490                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
18491                     goto _exit;
18492                 }
18493             }
18494
18495             /* Get the method properties */
18496
18497             CORINFO_CLASS_HANDLE clsHandle;
18498             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
18499             unsigned clsAttr;
18500             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
18501
18502             /* Get the return type */
18503
18504             var_types fncRetType;
18505             fncRetType = pParam->call->TypeGet();
18506
18507 #ifdef DEBUG
18508             var_types fncRealRetType;
18509             fncRealRetType = JITtype2varType(methInfo.args.retType);
18510
18511             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
18512                    // <BUGNUM> VSW 288602 </BUGNUM>
18513                    // In case of IJW, we allow to assign a native pointer to a BYREF.
18514                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
18515                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
18516 #endif
18517
18518             //
18519             // Allocate an InlineCandidateInfo structure
18520             //
18521             InlineCandidateInfo* pInfo;
18522             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
18523
18524             pInfo->dwRestrictions       = dwRestrictions;
18525             pInfo->methInfo             = methInfo;
18526             pInfo->methAttr             = pParam->methAttr;
18527             pInfo->clsHandle            = clsHandle;
18528             pInfo->clsAttr              = clsAttr;
18529             pInfo->fncRetType           = fncRetType;
18530             pInfo->exactContextHnd      = pParam->exactContextHnd;
18531             pInfo->ilCallerHandle       = pParam->pThis->info.compMethodHnd;
18532             pInfo->initClassResult      = initClassResult;
18533             pInfo->preexistingSpillTemp = BAD_VAR_NUM;
18534
18535             *(pParam->ppInlineCandidateInfo) = pInfo;
18536
18537         _exit:;
18538         },
18539         &param);
18540     if (!success)
18541     {
18542         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18543     }
18544 }
18545
18546 //------------------------------------------------------------------------
18547 // impInlineRecordArgInfo: record information about an inline candidate argument
18548 //
18549 // Arguments:
18550 //   pInlineInfo - inline info for the inline candidate
18551 //   curArgVal - tree for the caller actual argument value
18552 //   argNum - logical index of this argument
18553 //   inlineResult - result of ongoing inline evaluation
18554 //
18555 // Notes:
18556 //
18557 //   Checks for various inline blocking conditions and makes notes in
18558 //   the inline info arg table about the properties of the actual. These
18559 //   properties are used later by impFetchArg to determine how best to
18560 //   pass the argument into the inlinee.
18561
18562 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18563                                       GenTree*      curArgVal,
18564                                       unsigned      argNum,
18565                                       InlineResult* inlineResult)
18566 {
18567     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18568
18569     if (curArgVal->gtOper == GT_MKREFANY)
18570     {
18571         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18572         return;
18573     }
18574
18575     inlCurArgInfo->argNode = curArgVal;
18576
18577     GenTree* lclVarTree;
18578     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18579     {
18580         inlCurArgInfo->argIsByRefToStructLocal = true;
18581 #ifdef FEATURE_SIMD
18582         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18583         {
18584             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18585         }
18586 #endif // FEATURE_SIMD
18587     }
18588
18589     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18590     {
18591         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18592         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18593     }
18594
18595     if (curArgVal->gtOper == GT_LCL_VAR)
18596     {
18597         inlCurArgInfo->argIsLclVar = true;
18598
18599         /* Remember the "original" argument number */
18600         curArgVal->gtLclVar.gtLclILoffs = argNum;
18601     }
18602
18603     if ((curArgVal->OperKind() & GTK_CONST) ||
18604         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18605     {
18606         inlCurArgInfo->argIsInvariant = true;
18607         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18608         {
18609             // Abort inlining at this call site
18610             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18611             return;
18612         }
18613     }
18614
18615     // If the arg is a local that is address-taken, we can't safely
18616     // directly substitute it into the inlinee.
18617     //
18618     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18619     // that has a stronger meaning: that the arg value can change in
18620     // the method body. Using that flag prevents type propagation,
18621     // which is safe in this case.
18622     //
18623     // Instead mark the arg as having a caller local ref.
18624     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18625     {
18626         inlCurArgInfo->argHasCallerLocalRef = true;
18627     }
18628
18629 #ifdef DEBUG
18630     if (verbose)
18631     {
18632         if (inlCurArgInfo->argIsThis)
18633         {
18634             printf("thisArg:");
18635         }
18636         else
18637         {
18638             printf("\nArgument #%u:", argNum);
18639         }
18640         if (inlCurArgInfo->argIsLclVar)
18641         {
18642             printf(" is a local var");
18643         }
18644         if (inlCurArgInfo->argIsInvariant)
18645         {
18646             printf(" is a constant");
18647         }
18648         if (inlCurArgInfo->argHasGlobRef)
18649         {
18650             printf(" has global refs");
18651         }
18652         if (inlCurArgInfo->argHasCallerLocalRef)
18653         {
18654             printf(" has caller local ref");
18655         }
18656         if (inlCurArgInfo->argHasSideEff)
18657         {
18658             printf(" has side effects");
18659         }
18660         if (inlCurArgInfo->argHasLdargaOp)
18661         {
18662             printf(" has ldarga effect");
18663         }
18664         if (inlCurArgInfo->argHasStargOp)
18665         {
18666             printf(" has starg effect");
18667         }
18668         if (inlCurArgInfo->argIsByRefToStructLocal)
18669         {
18670             printf(" is byref to a struct local");
18671         }
18672
18673         printf("\n");
18674         gtDispTree(curArgVal);
18675         printf("\n");
18676     }
18677 #endif
18678 }
18679
18680 //------------------------------------------------------------------------
18681 // impInlineInitVars: setup inline information for inlinee args and locals
18682 //
18683 // Arguments:
18684 //    pInlineInfo - inline info for the inline candidate
18685 //
18686 // Notes:
18687 //    This method primarily adds caller-supplied info to the inlArgInfo
18688 //    and sets up the lclVarInfo table.
18689 //
18690 //    For args, the inlArgInfo records properties of the actual argument
18691 //    including the tree node that produces the arg value. This node is
18692 //    usually the tree node present at the call, but may also differ in
18693 //    various ways:
18694 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18695 //      expr chain for the actual node. Note this will either be the original
18696 //      call (which will be a failed inline by this point), or the return
18697 //      expression from some set of inlines.
18698 //    - when argument type casting is needed the necessary casts are added
18699 //      around the argument node.
18700 //    - if an argment can be simplified by folding then the node here is the
18701 //      folded value.
18702 //
18703 //   The method may make observations that lead to marking this candidate as
18704 //   a failed inline. If this happens the initialization is abandoned immediately
18705 //   to try and reduce the jit time cost for a failed inline.
18706
18707 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18708 {
18709     assert(!compIsForInlining());
18710
18711     GenTree*             call         = pInlineInfo->iciCall;
18712     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18713     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18714     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18715     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18716     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18717
18718     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18719
18720     /* init the argument stuct */
18721
18722     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18723
18724     /* Get hold of the 'this' pointer and the argument list proper */
18725
18726     GenTree* thisArg = call->gtCall.gtCallObjp;
18727     GenTree* argList = call->gtCall.gtCallArgs;
18728     unsigned argCnt  = 0; // Count of the arguments
18729
18730     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18731
18732     if (thisArg)
18733     {
18734         inlArgInfo[0].argIsThis = true;
18735         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18736         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18737
18738         if (inlineResult->IsFailure())
18739         {
18740             return;
18741         }
18742
18743         /* Increment the argument count */
18744         argCnt++;
18745     }
18746
18747     /* Record some information about each of the arguments */
18748     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18749
18750 #if USER_ARGS_COME_LAST
18751     unsigned typeCtxtArg = thisArg ? 1 : 0;
18752 #else  // USER_ARGS_COME_LAST
18753     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18754 #endif // USER_ARGS_COME_LAST
18755
18756     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18757     {
18758         if (argTmp == argList && hasRetBuffArg)
18759         {
18760             continue;
18761         }
18762
18763         // Ignore the type context argument
18764         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18765         {
18766             pInlineInfo->typeContextArg = typeCtxtArg;
18767             typeCtxtArg                 = 0xFFFFFFFF;
18768             continue;
18769         }
18770
18771         assert(argTmp->gtOper == GT_LIST);
18772         GenTree* arg       = argTmp->gtOp.gtOp1;
18773         GenTree* actualArg = arg->gtRetExprVal();
18774         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18775
18776         if (inlineResult->IsFailure())
18777         {
18778             return;
18779         }
18780
18781         /* Increment the argument count */
18782         argCnt++;
18783     }
18784
18785     /* Make sure we got the arg number right */
18786     assert(argCnt == methInfo->args.totalILArgs());
18787
18788 #ifdef FEATURE_SIMD
18789     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18790 #endif // FEATURE_SIMD
18791
18792     /* We have typeless opcodes, get type information from the signature */
18793
18794     if (thisArg)
18795     {
18796         var_types sigType;
18797
18798         if (clsAttr & CORINFO_FLG_VALUECLASS)
18799         {
18800             sigType = TYP_BYREF;
18801         }
18802         else
18803         {
18804             sigType = TYP_REF;
18805         }
18806
18807         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18808         lclVarInfo[0].lclHasLdlocaOp = false;
18809
18810 #ifdef FEATURE_SIMD
18811         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18812         // the inlining multiplier) for anything in that assembly.
18813         // But we only need to normalize it if it is a TYP_STRUCT
18814         // (which we need to do even if we have already set foundSIMDType).
18815         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18816         {
18817             if (sigType == TYP_STRUCT)
18818             {
18819                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18820             }
18821             foundSIMDType = true;
18822         }
18823 #endif // FEATURE_SIMD
18824         lclVarInfo[0].lclTypeInfo = sigType;
18825
18826         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18827                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18828                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18829
18830         if (genActualType(thisArg->gtType) != genActualType(sigType))
18831         {
18832             if (sigType == TYP_REF)
18833             {
18834                 /* The argument cannot be bashed into a ref (see bug 750871) */
18835                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18836                 return;
18837             }
18838
18839             /* This can only happen with byrefs <-> ints/shorts */
18840
18841             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18842             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18843
18844             if (sigType == TYP_BYREF)
18845             {
18846                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18847             }
18848             else if (thisArg->gtType == TYP_BYREF)
18849             {
18850                 assert(sigType == TYP_I_IMPL);
18851
18852                 /* If possible change the BYREF to an int */
18853                 if (thisArg->IsVarAddr())
18854                 {
18855                     thisArg->gtType              = TYP_I_IMPL;
18856                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18857                 }
18858                 else
18859                 {
18860                     /* Arguments 'int <- byref' cannot be bashed */
18861                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18862                     return;
18863                 }
18864             }
18865         }
18866     }
18867
18868     /* Init the types of the arguments and make sure the types
18869      * from the trees match the types in the signature */
18870
18871     CORINFO_ARG_LIST_HANDLE argLst;
18872     argLst = methInfo->args.args;
18873
18874     unsigned i;
18875     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18876     {
18877         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18878
18879         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18880
18881 #ifdef FEATURE_SIMD
18882         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18883         {
18884             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18885             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18886             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18887             foundSIMDType = true;
18888             if (sigType == TYP_STRUCT)
18889             {
18890                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18891                 sigType              = structType;
18892             }
18893         }
18894 #endif // FEATURE_SIMD
18895
18896         lclVarInfo[i].lclTypeInfo    = sigType;
18897         lclVarInfo[i].lclHasLdlocaOp = false;
18898
18899         /* Does the tree type match the signature type? */
18900
18901         GenTree* inlArgNode = inlArgInfo[i].argNode;
18902
18903         if (sigType != inlArgNode->gtType)
18904         {
18905             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18906                but in bad IL cases with caller-callee signature mismatches we can see other types.
18907                Intentionally reject cases with mismatches so the jit is more flexible when
18908                encountering bad IL. */
18909
18910             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18911                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18912                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18913
18914             if (!isPlausibleTypeMatch)
18915             {
18916                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18917                 return;
18918             }
18919
18920             /* Is it a narrowing or widening cast?
18921              * Widening casts are ok since the value computed is already
18922              * normalized to an int (on the IL stack) */
18923
18924             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18925             {
18926                 if (sigType == TYP_BYREF)
18927                 {
18928                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18929                 }
18930                 else if (inlArgNode->gtType == TYP_BYREF)
18931                 {
18932                     assert(varTypeIsIntOrI(sigType));
18933
18934                     /* If possible bash the BYREF to an int */
18935                     if (inlArgNode->IsVarAddr())
18936                     {
18937                         inlArgNode->gtType           = TYP_I_IMPL;
18938                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18939                     }
18940                     else
18941                     {
18942                         /* Arguments 'int <- byref' cannot be changed */
18943                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18944                         return;
18945                     }
18946                 }
18947                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18948                 {
18949                     /* Narrowing cast */
18950
18951                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18952                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18953                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18954                     {
18955                         /* We don't need to insert a cast here as the variable
18956                            was assigned a normalized value of the right type */
18957
18958                         continue;
18959                     }
18960
18961                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18962
18963                     inlArgInfo[i].argIsLclVar = false;
18964
18965                     /* Try to fold the node in case we have constant arguments */
18966
18967                     if (inlArgInfo[i].argIsInvariant)
18968                     {
18969                         inlArgNode            = gtFoldExprConst(inlArgNode);
18970                         inlArgInfo[i].argNode = inlArgNode;
18971                         assert(inlArgNode->OperIsConst());
18972                     }
18973                 }
18974 #ifdef _TARGET_64BIT_
18975                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18976                 {
18977                     // This should only happen for int -> native int widening
18978                     inlArgNode = inlArgInfo[i].argNode =
18979                         gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18980
18981                     inlArgInfo[i].argIsLclVar = false;
18982
18983                     /* Try to fold the node in case we have constant arguments */
18984
18985                     if (inlArgInfo[i].argIsInvariant)
18986                     {
18987                         inlArgNode            = gtFoldExprConst(inlArgNode);
18988                         inlArgInfo[i].argNode = inlArgNode;
18989                         assert(inlArgNode->OperIsConst());
18990                     }
18991                 }
18992 #endif // _TARGET_64BIT_
18993             }
18994         }
18995     }
18996
18997     /* Init the types of the local variables */
18998
18999     CORINFO_ARG_LIST_HANDLE localsSig;
19000     localsSig = methInfo->locals.args;
19001
19002     for (i = 0; i < methInfo->locals.numArgs; i++)
19003     {
19004         bool      isPinned;
19005         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
19006
19007         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
19008         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
19009         lclVarInfo[i + argCnt].lclTypeInfo    = type;
19010
19011         if (varTypeIsGC(type))
19012         {
19013             pInlineInfo->numberOfGcRefLocals++;
19014         }
19015
19016         if (isPinned)
19017         {
19018             // Pinned locals may cause inlines to fail.
19019             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
19020             if (inlineResult->IsFailure())
19021             {
19022                 return;
19023             }
19024         }
19025
19026         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
19027
19028         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
19029         // out on the inline.
19030         if (type == TYP_STRUCT)
19031         {
19032             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
19033             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
19034             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
19035             {
19036                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
19037                 if (inlineResult->IsFailure())
19038                 {
19039                     return;
19040                 }
19041
19042                 // Do further notification in the case where the call site is rare; some policies do
19043                 // not track the relative hotness of call sites for "always" inline cases.
19044                 if (pInlineInfo->iciBlock->isRunRarely())
19045                 {
19046                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
19047                     if (inlineResult->IsFailure())
19048                     {
19049
19050                         return;
19051                     }
19052                 }
19053             }
19054         }
19055
19056         localsSig = info.compCompHnd->getArgNext(localsSig);
19057
19058 #ifdef FEATURE_SIMD
19059         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
19060         {
19061             foundSIMDType = true;
19062             if (featureSIMD && type == TYP_STRUCT)
19063             {
19064                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
19065                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
19066             }
19067         }
19068 #endif // FEATURE_SIMD
19069     }
19070
19071 #ifdef FEATURE_SIMD
19072     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
19073     {
19074         foundSIMDType = true;
19075     }
19076     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
19077 #endif // FEATURE_SIMD
19078 }
19079
19080 //------------------------------------------------------------------------
19081 // impInlineFetchLocal: get a local var that represents an inlinee local
19082 //
19083 // Arguments:
19084 //    lclNum -- number of the inlinee local
19085 //    reason -- debug string describing purpose of the local var
19086 //
19087 // Returns:
19088 //    Number of the local to use
19089 //
19090 // Notes:
19091 //    This method is invoked only for locals actually used in the
19092 //    inlinee body.
19093 //
19094 //    Allocates a new temp if necessary, and copies key properties
19095 //    over from the inlinee local var info.
19096
19097 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
19098 {
19099     assert(compIsForInlining());
19100
19101     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
19102
19103     if (tmpNum == BAD_VAR_NUM)
19104     {
19105         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
19106         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
19107
19108         // The lifetime of this local might span multiple BBs.
19109         // So it is a long lifetime local.
19110         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
19111
19112         // Copy over key info
19113         lvaTable[tmpNum].lvType                 = lclTyp;
19114         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
19115         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
19116         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
19117         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
19118
19119         // Copy over class handle for ref types. Note this may be a
19120         // shared type -- someday perhaps we can get the exact
19121         // signature and pass in a more precise type.
19122         if (lclTyp == TYP_REF)
19123         {
19124             assert(lvaTable[tmpNum].lvSingleDef == 0);
19125
19126             lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
19127             if (lvaTable[tmpNum].lvSingleDef)
19128             {
19129                 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19130             }
19131
19132             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
19133         }
19134
19135         if (inlineeLocal.lclVerTypeInfo.IsStruct())
19136         {
19137             if (varTypeIsStruct(lclTyp))
19138             {
19139                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19140             }
19141             else
19142             {
19143                 // This is a wrapped primitive.  Make sure the verstate knows that
19144                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
19145             }
19146         }
19147
19148 #ifdef DEBUG
19149         // Sanity check that we're properly prepared for gc ref locals.
19150         if (varTypeIsGC(lclTyp))
19151         {
19152             // Since there are gc locals we should have seen them earlier
19153             // and if there was a return value, set up the spill temp.
19154             assert(impInlineInfo->HasGcRefLocals());
19155             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
19156         }
19157         else
19158         {
19159             // Make sure all pinned locals count as gc refs.
19160             assert(!inlineeLocal.lclIsPinned);
19161         }
19162 #endif // DEBUG
19163     }
19164
19165     return tmpNum;
19166 }
19167
19168 //------------------------------------------------------------------------
19169 // impInlineFetchArg: return tree node for argument value in an inlinee
19170 //
19171 // Arguments:
19172 //    lclNum -- argument number in inlinee IL
19173 //    inlArgInfo -- argument info for inlinee
19174 //    lclVarInfo -- var info for inlinee
19175 //
19176 // Returns:
19177 //    Tree for the argument's value. Often an inlinee-scoped temp
19178 //    GT_LCL_VAR but can be other tree kinds, if the argument
19179 //    expression from the caller can be directly substituted into the
19180 //    inlinee body.
19181 //
19182 // Notes:
19183 //    Must be used only for arguments -- use impInlineFetchLocal for
19184 //    inlinee locals.
19185 //
19186 //    Direct substitution is performed when the formal argument cannot
19187 //    change value in the inlinee body (no starg or ldarga), and the
19188 //    actual argument expression's value cannot be changed if it is
19189 //    substituted it into the inlinee body.
19190 //
19191 //    Even if an inlinee-scoped temp is returned here, it may later be
19192 //    "bashed" to a caller-supplied tree when arguments are actually
19193 //    passed (see fgInlinePrependStatements). Bashing can happen if
19194 //    the argument ends up being single use and other conditions are
19195 //    met. So the contents of the tree returned here may not end up
19196 //    being the ones ultimately used for the argument.
19197 //
19198 //    This method will side effect inlArgInfo. It should only be called
19199 //    for actual uses of the argument in the inlinee.
19200
19201 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
19202 {
19203     // Cache the relevant arg and lcl info for this argument.
19204     // We will modify argInfo but not lclVarInfo.
19205     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
19206     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
19207     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
19208     const var_types      lclTyp           = lclInfo.lclTypeInfo;
19209     GenTree*             op1              = nullptr;
19210
19211     if (argInfo.argIsInvariant && !argCanBeModified)
19212     {
19213         // Directly substitute constants or addresses of locals
19214         //
19215         // Clone the constant. Note that we cannot directly use
19216         // argNode in the trees even if !argInfo.argIsUsed as this
19217         // would introduce aliasing between inlArgInfo[].argNode and
19218         // impInlineExpr. Then gtFoldExpr() could change it, causing
19219         // further references to the argument working off of the
19220         // bashed copy.
19221         op1 = gtCloneExpr(argInfo.argNode);
19222         PREFIX_ASSUME(op1 != nullptr);
19223         argInfo.argTmpNum = BAD_VAR_NUM;
19224
19225         // We may need to retype to ensure we match the callee's view of the type.
19226         // Otherwise callee-pass throughs of arguments can create return type
19227         // mismatches that block inlining.
19228         //
19229         // Note argument type mismatches that prevent inlining should
19230         // have been caught in impInlineInitVars.
19231         if (op1->TypeGet() != lclTyp)
19232         {
19233             op1->gtType = genActualType(lclTyp);
19234         }
19235     }
19236     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
19237     {
19238         // Directly substitute unaliased caller locals for args that cannot be modified
19239         //
19240         // Use the caller-supplied node if this is the first use.
19241         op1               = argInfo.argNode;
19242         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
19243
19244         // Use an equivalent copy if this is the second or subsequent
19245         // use, or if we need to retype.
19246         //
19247         // Note argument type mismatches that prevent inlining should
19248         // have been caught in impInlineInitVars.
19249         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
19250         {
19251             assert(op1->gtOper == GT_LCL_VAR);
19252             assert(lclNum == op1->gtLclVar.gtLclILoffs);
19253
19254             var_types newTyp = lclTyp;
19255
19256             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
19257             {
19258                 newTyp = genActualType(lclTyp);
19259             }
19260
19261             // Create a new lcl var node - remember the argument lclNum
19262             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
19263         }
19264     }
19265     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
19266     {
19267         /* Argument is a by-ref address to a struct, a normed struct, or its field.
19268            In these cases, don't spill the byref to a local, simply clone the tree and use it.
19269            This way we will increase the chance for this byref to be optimized away by
19270            a subsequent "dereference" operation.
19271
19272            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
19273            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
19274            For example, if the caller is:
19275                 ldloca.s   V_1  // V_1 is a local struct
19276                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
19277            and the callee being inlined has:
19278                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
19279                     ldarga.s   ptrToInts
19280                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
19281            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
19282            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
19283         */
19284         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
19285         op1 = gtCloneExpr(argInfo.argNode);
19286     }
19287     else
19288     {
19289         /* Argument is a complex expression - it must be evaluated into a temp */
19290
19291         if (argInfo.argHasTmp)
19292         {
19293             assert(argInfo.argIsUsed);
19294             assert(argInfo.argTmpNum < lvaCount);
19295
19296             /* Create a new lcl var node - remember the argument lclNum */
19297             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
19298
19299             /* This is the second or later use of the this argument,
19300             so we have to use the temp (instead of the actual arg) */
19301             argInfo.argBashTmpNode = nullptr;
19302         }
19303         else
19304         {
19305             /* First time use */
19306             assert(!argInfo.argIsUsed);
19307
19308             /* Reserve a temp for the expression.
19309             * Use a large size node as we may change it later */
19310
19311             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
19312
19313             lvaTable[tmpNum].lvType = lclTyp;
19314
19315             // For ref types, determine the type of the temp.
19316             if (lclTyp == TYP_REF)
19317             {
19318                 if (!argCanBeModified)
19319                 {
19320                     // If the arg can't be modified in the method
19321                     // body, use the type of the value, if
19322                     // known. Otherwise, use the declared type.
19323                     assert(lvaTable[tmpNum].lvSingleDef == 0);
19324                     lvaTable[tmpNum].lvSingleDef = 1;
19325                     JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19326                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19327                 }
19328                 else
19329                 {
19330                     // Arg might be modified, use the declared type of
19331                     // the argument.
19332                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19333                 }
19334             }
19335
19336             assert(lvaTable[tmpNum].lvAddrExposed == 0);
19337             if (argInfo.argHasLdargaOp)
19338             {
19339                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
19340             }
19341
19342             if (lclInfo.lclVerTypeInfo.IsStruct())
19343             {
19344                 if (varTypeIsStruct(lclTyp))
19345                 {
19346                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19347                     if (info.compIsVarArgs)
19348                     {
19349                         lvaSetStructUsedAsVarArg(tmpNum);
19350                     }
19351                 }
19352                 else
19353                 {
19354                     // This is a wrapped primitive.  Make sure the verstate knows that
19355                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
19356                 }
19357             }
19358
19359             argInfo.argHasTmp = true;
19360             argInfo.argTmpNum = tmpNum;
19361
19362             // If we require strict exception order, then arguments must
19363             // be evaluated in sequence before the body of the inlined method.
19364             // So we need to evaluate them to a temp.
19365             // Also, if arguments have global or local references, we need to
19366             // evaluate them to a temp before the inlined body as the
19367             // inlined body may be modifying the global ref.
19368             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
19369             // if it is a struct, because it requires some additional handling.
19370
19371             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
19372                 !argInfo.argHasCallerLocalRef)
19373             {
19374                 /* Get a *LARGE* LCL_VAR node */
19375                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
19376
19377                 /* Record op1 as the very first use of this argument.
19378                 If there are no further uses of the arg, we may be
19379                 able to use the actual arg node instead of the temp.
19380                 If we do see any further uses, we will clear this. */
19381                 argInfo.argBashTmpNode = op1;
19382             }
19383             else
19384             {
19385                 /* Get a small LCL_VAR node */
19386                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
19387                 /* No bashing of this argument */
19388                 argInfo.argBashTmpNode = nullptr;
19389             }
19390         }
19391     }
19392
19393     // Mark this argument as used.
19394     argInfo.argIsUsed = true;
19395
19396     return op1;
19397 }
19398
19399 /******************************************************************************
19400  Is this the original "this" argument to the call being inlined?
19401
19402  Note that we do not inline methods with "starg 0", and so we do not need to
19403  worry about it.
19404 */
19405
19406 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
19407 {
19408     assert(compIsForInlining());
19409     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
19410 }
19411
19412 //-----------------------------------------------------------------------------
19413 // This function checks if a dereference in the inlinee can guarantee that
19414 // the "this" is non-NULL.
19415 // If we haven't hit a branch or a side effect, and we are dereferencing
19416 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
19417 // then we can avoid a separate null pointer check.
19418 //
19419 // "additionalTreesToBeEvaluatedBefore"
19420 // is the set of pending trees that have not yet been added to the statement list,
19421 // and which have been removed from verCurrentState.esStack[]
19422
19423 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
19424                                                                   GenTree*    variableBeingDereferenced,
19425                                                                   InlArgInfo* inlArgInfo)
19426 {
19427     assert(compIsForInlining());
19428     assert(opts.OptEnabled(CLFLG_INLINING));
19429
19430     BasicBlock* block = compCurBB;
19431
19432     GenTree* stmt;
19433     GenTree* expr;
19434
19435     if (block != fgFirstBB)
19436     {
19437         return FALSE;
19438     }
19439
19440     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
19441     {
19442         return FALSE;
19443     }
19444
19445     if (additionalTreesToBeEvaluatedBefore &&
19446         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
19447     {
19448         return FALSE;
19449     }
19450
19451     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
19452     {
19453         expr = stmt->gtStmt.gtStmtExpr;
19454
19455         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
19456         {
19457             return FALSE;
19458         }
19459     }
19460
19461     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
19462     {
19463         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
19464         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
19465         {
19466             return FALSE;
19467         }
19468     }
19469
19470     return TRUE;
19471 }
19472
19473 //------------------------------------------------------------------------
19474 // impMarkInlineCandidate: determine if this call can be subsequently inlined
19475 //
19476 // Arguments:
19477 //    callNode -- call under scrutiny
19478 //    exactContextHnd -- context handle for inlining
19479 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
19480 //    callInfo -- call info from VM
19481 //
19482 // Notes:
19483 //    If callNode is an inline candidate, this method sets the flag
19484 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
19485 //    filled in the associated InlineCandidateInfo.
19486 //
19487 //    If callNode is not an inline candidate, and the reason is
19488 //    something that is inherent to the method being called, the
19489 //    method may be marked as "noinline" to short-circuit any
19490 //    future assessments of calls to this method.
19491
19492 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
19493                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
19494                                       bool                   exactContextNeedsRuntimeLookup,
19495                                       CORINFO_CALL_INFO*     callInfo)
19496 {
19497     // Let the strategy know there's another call
19498     impInlineRoot()->m_inlineStrategy->NoteCall();
19499
19500     if (!opts.OptEnabled(CLFLG_INLINING))
19501     {
19502         /* XXX Mon 8/18/2008
19503          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
19504          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
19505          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
19506          * figure out why we did not set MAXOPT for this compile.
19507          */
19508         assert(!compIsForInlining());
19509         return;
19510     }
19511
19512     if (compIsForImportOnly())
19513     {
19514         // Don't bother creating the inline candidate during verification.
19515         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
19516         // that leads to the creation of multiple instances of Compiler.
19517         return;
19518     }
19519
19520     GenTreeCall* call = callNode->AsCall();
19521     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
19522
19523     // Don't inline if not optimizing root method
19524     if (opts.compDbgCode)
19525     {
19526         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
19527         return;
19528     }
19529
19530     // Don't inline if inlining into root method is disabled.
19531     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
19532     {
19533         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
19534         return;
19535     }
19536
19537     // Inlining candidate determination needs to honor only IL tail prefix.
19538     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
19539     if (call->IsTailPrefixedCall())
19540     {
19541         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
19542         return;
19543     }
19544
19545     // Tail recursion elimination takes precedence over inlining.
19546     // TODO: We may want to do some of the additional checks from fgMorphCall
19547     // here to reduce the chance we don't inline a call that won't be optimized
19548     // as a fast tail call or turned into a loop.
19549     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
19550     {
19551         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
19552         return;
19553     }
19554
19555     if (call->IsVirtual())
19556     {
19557         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19558         return;
19559     }
19560
19561     /* Ignore helper calls */
19562
19563     if (call->gtCallType == CT_HELPER)
19564     {
19565         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19566         return;
19567     }
19568
19569     /* Ignore indirect calls */
19570     if (call->gtCallType == CT_INDIRECT)
19571     {
19572         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19573         return;
19574     }
19575
19576     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19577      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19578      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19579
19580     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19581     unsigned              methAttr;
19582
19583     // Reuse method flags from the original callInfo if possible
19584     if (fncHandle == callInfo->hMethod)
19585     {
19586         methAttr = callInfo->methodFlags;
19587     }
19588     else
19589     {
19590         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19591     }
19592
19593 #ifdef DEBUG
19594     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19595     {
19596         methAttr |= CORINFO_FLG_FORCEINLINE;
19597     }
19598 #endif
19599
19600     // Check for COMPlus_AggressiveInlining
19601     if (compDoAggressiveInlining)
19602     {
19603         methAttr |= CORINFO_FLG_FORCEINLINE;
19604     }
19605
19606     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19607     {
19608         /* Don't bother inline blocks that are in the filter region */
19609         if (bbInCatchHandlerILRange(compCurBB))
19610         {
19611 #ifdef DEBUG
19612             if (verbose)
19613             {
19614                 printf("\nWill not inline blocks that are in the catch handler region\n");
19615             }
19616
19617 #endif
19618
19619             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19620             return;
19621         }
19622
19623         if (bbInFilterILRange(compCurBB))
19624         {
19625 #ifdef DEBUG
19626             if (verbose)
19627             {
19628                 printf("\nWill not inline blocks that are in the filter region\n");
19629             }
19630 #endif
19631
19632             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19633             return;
19634         }
19635     }
19636
19637     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19638
19639     if (opts.compNeedSecurityCheck)
19640     {
19641         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19642         return;
19643     }
19644
19645     /* Check if we tried to inline this method before */
19646
19647     if (methAttr & CORINFO_FLG_DONT_INLINE)
19648     {
19649         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19650         return;
19651     }
19652
19653     /* Cannot inline synchronized methods */
19654
19655     if (methAttr & CORINFO_FLG_SYNCH)
19656     {
19657         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19658         return;
19659     }
19660
19661     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19662
19663     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19664     {
19665         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19666         return;
19667     }
19668
19669     /* Check legality of PInvoke callsite (for inlining of marshalling code) */
19670
19671     if (methAttr & CORINFO_FLG_PINVOKE)
19672     {
19673         // See comment in impCheckForPInvokeCall
19674         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
19675         if (!impCanPInvokeInlineCallSite(block))
19676         {
19677             inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
19678             return;
19679         }
19680     }
19681
19682     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19683     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19684
19685     if (inlineResult.IsFailure())
19686     {
19687         return;
19688     }
19689
19690     // The old value should be NULL
19691     assert(call->gtInlineCandidateInfo == nullptr);
19692
19693     // The new value should not be NULL.
19694     assert(inlineCandidateInfo != nullptr);
19695     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19696
19697     call->gtInlineCandidateInfo = inlineCandidateInfo;
19698
19699     // Mark the call node as inline candidate.
19700     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19701
19702     // Let the strategy know there's another candidate.
19703     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19704
19705     // Since we're not actually inlining yet, and this call site is
19706     // still just an inline candidate, there's nothing to report.
19707     inlineResult.SetReported();
19708 }
19709
19710 /******************************************************************************/
19711 // Returns true if the given intrinsic will be implemented by target-specific
19712 // instructions
19713
19714 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19715 {
19716 #if defined(_TARGET_XARCH_)
19717     switch (intrinsicId)
19718     {
19719         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19720         // instructions to directly compute round/ceiling/floor.
19721         //
19722         // TODO: Because the x86 backend only targets SSE for floating-point code,
19723         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19724         //       implemented those intrinsics as x87 instructions). If this poses
19725         //       a CQ problem, it may be necessary to change the implementation of
19726         //       the helper calls to decrease call overhead or switch back to the
19727         //       x87 instructions. This is tracked by #7097.
19728         case CORINFO_INTRINSIC_Sqrt:
19729         case CORINFO_INTRINSIC_Abs:
19730             return true;
19731
19732         case CORINFO_INTRINSIC_Round:
19733         case CORINFO_INTRINSIC_Ceiling:
19734         case CORINFO_INTRINSIC_Floor:
19735             return compSupports(InstructionSet_SSE41);
19736
19737         default:
19738             return false;
19739     }
19740 #elif defined(_TARGET_ARM64_)
19741     switch (intrinsicId)
19742     {
19743         case CORINFO_INTRINSIC_Sqrt:
19744         case CORINFO_INTRINSIC_Abs:
19745         case CORINFO_INTRINSIC_Round:
19746         case CORINFO_INTRINSIC_Floor:
19747         case CORINFO_INTRINSIC_Ceiling:
19748             return true;
19749
19750         default:
19751             return false;
19752     }
19753 #elif defined(_TARGET_ARM_)
19754     switch (intrinsicId)
19755     {
19756         case CORINFO_INTRINSIC_Sqrt:
19757         case CORINFO_INTRINSIC_Abs:
19758         case CORINFO_INTRINSIC_Round:
19759             return true;
19760
19761         default:
19762             return false;
19763     }
19764 #else
19765     // TODO: This portion of logic is not implemented for other arch.
19766     // The reason for returning true is that on all other arch the only intrinsic
19767     // enabled are target intrinsics.
19768     return true;
19769 #endif
19770 }
19771
19772 /******************************************************************************/
19773 // Returns true if the given intrinsic will be implemented by calling System.Math
19774 // methods.
19775
19776 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19777 {
19778     // Currently, if a math intrinsic is not implemented by target-specific
19779     // instructions, it will be implemented by a System.Math call. In the
19780     // future, if we turn to implementing some of them with helper calls,
19781     // this predicate needs to be revisited.
19782     return !IsTargetIntrinsic(intrinsicId);
19783 }
19784
19785 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19786 {
19787     switch (intrinsicId)
19788     {
19789         case CORINFO_INTRINSIC_Sin:
19790         case CORINFO_INTRINSIC_Cbrt:
19791         case CORINFO_INTRINSIC_Sqrt:
19792         case CORINFO_INTRINSIC_Abs:
19793         case CORINFO_INTRINSIC_Cos:
19794         case CORINFO_INTRINSIC_Round:
19795         case CORINFO_INTRINSIC_Cosh:
19796         case CORINFO_INTRINSIC_Sinh:
19797         case CORINFO_INTRINSIC_Tan:
19798         case CORINFO_INTRINSIC_Tanh:
19799         case CORINFO_INTRINSIC_Asin:
19800         case CORINFO_INTRINSIC_Asinh:
19801         case CORINFO_INTRINSIC_Acos:
19802         case CORINFO_INTRINSIC_Acosh:
19803         case CORINFO_INTRINSIC_Atan:
19804         case CORINFO_INTRINSIC_Atan2:
19805         case CORINFO_INTRINSIC_Atanh:
19806         case CORINFO_INTRINSIC_Log10:
19807         case CORINFO_INTRINSIC_Pow:
19808         case CORINFO_INTRINSIC_Exp:
19809         case CORINFO_INTRINSIC_Ceiling:
19810         case CORINFO_INTRINSIC_Floor:
19811             return true;
19812         default:
19813             return false;
19814     }
19815 }
19816
19817 bool Compiler::IsMathIntrinsic(GenTree* tree)
19818 {
19819     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19820 }
19821
19822 //------------------------------------------------------------------------
19823 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19824 //   normal call
19825 //
19826 // Arguments:
19827 //     call -- the call node to examine/modify
19828 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19829 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19830 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19831 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19832 //
19833 // Notes:
19834 //     Virtual calls in IL will always "invoke" the base class method.
19835 //
19836 //     This transformation looks for evidence that the type of 'this'
19837 //     in the call is exactly known, is a final class or would invoke
19838 //     a final method, and if that and other safety checks pan out,
19839 //     modifies the call and the call info to create a direct call.
19840 //
19841 //     This transformation is initially done in the importer and not
19842 //     in some subsequent optimization pass because we want it to be
19843 //     upstream of inline candidate identification.
19844 //
19845 //     However, later phases may supply improved type information that
19846 //     can enable further devirtualization. We currently reinvoke this
19847 //     code after inlining, if the return value of the inlined call is
19848 //     the 'this obj' of a subsequent virtual call.
19849 //
19850 //     If devirtualization succeeds and the call's this object is the
19851 //     result of a box, the jit will ask the EE for the unboxed entry
19852 //     point. If this exists, the jit will see if it can rework the box
19853 //     to instead make a local copy. If that is doable, the call is
19854 //     updated to invoke the unboxed entry on the local copy.
19855 //
19856 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19857                                    CORINFO_METHOD_HANDLE*  method,
19858                                    unsigned*               methodFlags,
19859                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19860                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19861 {
19862     assert(call != nullptr);
19863     assert(method != nullptr);
19864     assert(methodFlags != nullptr);
19865     assert(contextHandle != nullptr);
19866
19867     // This should be a virtual vtable or virtual stub call.
19868     assert(call->IsVirtual());
19869
19870     // Bail if not optimizing
19871     if (opts.MinOpts())
19872     {
19873         return;
19874     }
19875
19876     // Bail if debuggable codegen
19877     if (opts.compDbgCode)
19878     {
19879         return;
19880     }
19881
19882 #if defined(DEBUG)
19883     // Bail if devirt is disabled.
19884     if (JitConfig.JitEnableDevirtualization() == 0)
19885     {
19886         return;
19887     }
19888
19889     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19890 #endif // DEBUG
19891
19892     // Fetch information about the virtual method we're calling.
19893     CORINFO_METHOD_HANDLE baseMethod        = *method;
19894     unsigned              baseMethodAttribs = *methodFlags;
19895
19896     if (baseMethodAttribs == 0)
19897     {
19898         // For late devirt we may not have method attributes, so fetch them.
19899         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19900     }
19901     else
19902     {
19903 #if defined(DEBUG)
19904         // Validate that callInfo has up to date method flags
19905         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19906
19907         // All the base method attributes should agree, save that
19908         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19909         // because of concurrent jitting activity.
19910         //
19911         // Note we don't look at this particular flag bit below, and
19912         // later on (if we do try and inline) we will rediscover why
19913         // the method can't be inlined, so there's no danger here in
19914         // seeing this particular flag bit in different states between
19915         // the cached and fresh values.
19916         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19917         {
19918             assert(!"mismatched method attributes");
19919         }
19920 #endif // DEBUG
19921     }
19922
19923     // In R2R mode, we might see virtual stub calls to
19924     // non-virtuals. For instance cases where the non-virtual method
19925     // is in a different assembly but is called via CALLVIRT. For
19926     // verison resilience we must allow for the fact that the method
19927     // might become virtual in some update.
19928     //
19929     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19930     // regular call+nullcheck upstream, so we won't reach this
19931     // point.
19932     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19933     {
19934         assert(call->IsVirtualStub());
19935         assert(opts.IsReadyToRun());
19936         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19937         return;
19938     }
19939
19940     // See what we know about the type of 'this' in the call.
19941     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19942     GenTree*             actualThisObj = nullptr;
19943     bool                 isExact       = false;
19944     bool                 objIsNonNull  = false;
19945     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19946
19947     // See if we have special knowlege that can get us a type or a better type.
19948     if ((objClass == nullptr) || !isExact)
19949     {
19950         // Walk back through any return expression placeholders
19951         actualThisObj = thisObj->gtRetExprVal();
19952
19953         // See if we landed on a call to a special intrinsic method
19954         if (actualThisObj->IsCall())
19955         {
19956             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19957             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19958             {
19959                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19960                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19961                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19962                 if (specialObjClass != nullptr)
19963                 {
19964                     objClass     = specialObjClass;
19965                     isExact      = true;
19966                     objIsNonNull = true;
19967                 }
19968             }
19969         }
19970     }
19971
19972     // Bail if we know nothing.
19973     if (objClass == nullptr)
19974     {
19975         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19976         return;
19977     }
19978
19979     // Fetch information about the class that introduced the virtual method.
19980     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19981     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19982
19983 #if !defined(FEATURE_CORECLR)
19984     // If base class is not beforefieldinit then devirtualizing may
19985     // cause us to miss a base class init trigger. Spec says we don't
19986     // need a trigger for ref class callvirts but desktop seems to
19987     // have one anyways. So defer.
19988     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19989     {
19990         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19991         return;
19992     }
19993 #endif // FEATURE_CORECLR
19994
19995     // Is the call an interface call?
19996     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19997
19998     // If the objClass is sealed (final), then we may be able to devirtualize.
19999     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
20000     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
20001
20002 #if defined(DEBUG)
20003     const char* callKind       = isInterface ? "interface" : "virtual";
20004     const char* objClassNote   = "[?]";
20005     const char* objClassName   = "?objClass";
20006     const char* baseClassName  = "?baseClass";
20007     const char* baseMethodName = "?baseMethod";
20008
20009     if (verbose || doPrint)
20010     {
20011         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
20012         objClassName   = info.compCompHnd->getClassName(objClass);
20013         baseClassName  = info.compCompHnd->getClassName(baseClass);
20014         baseMethodName = eeGetMethodName(baseMethod, nullptr);
20015
20016         if (verbose)
20017         {
20018             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
20019                    "    class for 'this' is %s%s (attrib %08x)\n"
20020                    "    base method is %s::%s\n",
20021                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
20022         }
20023     }
20024 #endif // defined(DEBUG)
20025
20026     // Bail if obj class is an interface.
20027     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
20028     //   IL_021d:  ldloc.0
20029     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
20030     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
20031     {
20032         JITDUMP("--- obj class is interface, sorry\n");
20033         return;
20034     }
20035
20036     if (isInterface)
20037     {
20038         assert(call->IsVirtualStub());
20039         JITDUMP("--- base class is interface\n");
20040     }
20041
20042     // Fetch the method that would be called based on the declared type of 'this'
20043     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
20044     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
20045
20046     // If we failed to get a handle, we can't devirtualize.  This can
20047     // happen when prejitting, if the devirtualization crosses
20048     // servicing bubble boundaries.
20049     if (derivedMethod == nullptr)
20050     {
20051         JITDUMP("--- no derived method, sorry\n");
20052         return;
20053     }
20054
20055     // Fetch method attributes to see if method is marked final.
20056     DWORD      derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
20057     const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
20058
20059 #if defined(DEBUG)
20060     const char* derivedClassName  = "?derivedClass";
20061     const char* derivedMethodName = "?derivedMethod";
20062
20063     const char* note = "speculative";
20064     if (isExact)
20065     {
20066         note = "exact";
20067     }
20068     else if (objClassIsFinal)
20069     {
20070         note = "final class";
20071     }
20072     else if (derivedMethodIsFinal)
20073     {
20074         note = "final method";
20075     }
20076
20077     if (verbose || doPrint)
20078     {
20079         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
20080         if (verbose)
20081         {
20082             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
20083             gtDispTree(call);
20084         }
20085     }
20086 #endif // defined(DEBUG)
20087
20088     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
20089     {
20090         // Type is not exact, and neither class or method is final.
20091         //
20092         // We could speculatively devirtualize, but there's no
20093         // reason to believe the derived method is the one that
20094         // is likely to be invoked.
20095         //
20096         // If there's currently no further overriding (that is, at
20097         // the time of jitting, objClass has no subclasses that
20098         // override this method), then perhaps we'd be willing to
20099         // make a bet...?
20100         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
20101         return;
20102     }
20103
20104     // For interface calls we must have an exact type or final class.
20105     if (isInterface && !isExact && !objClassIsFinal)
20106     {
20107         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
20108         return;
20109     }
20110
20111     JITDUMP("    %s; can devirtualize\n", note);
20112
20113     // Make the updates.
20114     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
20115     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
20116     call->gtCallMethHnd = derivedMethod;
20117     call->gtCallType    = CT_USER_FUNC;
20118     call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
20119
20120     // Virtual calls include an implicit null check, which we may
20121     // now need to make explicit.
20122     if (!objIsNonNull)
20123     {
20124         call->gtFlags |= GTF_CALL_NULLCHECK;
20125     }
20126
20127     // Clear the inline candidate info (may be non-null since
20128     // it's a union field used for other things by virtual
20129     // stubs)
20130     call->gtInlineCandidateInfo = nullptr;
20131
20132 #if defined(DEBUG)
20133     if (verbose)
20134     {
20135         printf("... after devirt...\n");
20136         gtDispTree(call);
20137     }
20138
20139     if (doPrint)
20140     {
20141         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
20142                baseMethodName, derivedClassName, derivedMethodName, note);
20143     }
20144 #endif // defined(DEBUG)
20145
20146     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
20147     if (thisObj->IsBoxedValue())
20148     {
20149         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
20150
20151         // Note for some shared methods the unboxed entry point requires an extra parameter.
20152         bool                  requiresInstMethodTableArg = false;
20153         CORINFO_METHOD_HANDLE unboxedEntryMethod =
20154             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
20155
20156         if (unboxedEntryMethod != nullptr)
20157         {
20158             // Since the call is the only consumer of the box, we know the box can't escape
20159             // since it is being passed an interior pointer.
20160             //
20161             // So, revise the box to simply create a local copy, use the address of that copy
20162             // as the this pointer, and update the entry point to the unboxed entry.
20163             //
20164             // Ideally, we then inline the boxed method and and if it turns out not to modify
20165             // the copy, we can undo the copy too.
20166             if (requiresInstMethodTableArg)
20167             {
20168                 // Perform a trial box removal and ask for the type handle tree.
20169                 JITDUMP("Unboxed entry needs method table arg...\n");
20170                 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
20171
20172                 if (methodTableArg != nullptr)
20173                 {
20174                     // If that worked, turn the box into a copy to a local var
20175                     JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
20176                     GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20177
20178                     if (localCopyThis != nullptr)
20179                     {
20180                         // Pass the local var as this and the type handle as a new arg
20181                         JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
20182                         call->gtCallObjp = localCopyThis;
20183                         call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20184
20185                         // Prepend for R2L arg passing or empty L2R passing
20186                         if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
20187                         {
20188                             call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
20189                         }
20190                         // Append for non-empty L2R
20191                         else
20192                         {
20193                             GenTreeArgList* beforeArg = call->gtCallArgs;
20194                             while (beforeArg->Rest() != nullptr)
20195                             {
20196                                 beforeArg = beforeArg->Rest();
20197                             }
20198
20199                             beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
20200                         }
20201
20202                         call->gtCallMethHnd = unboxedEntryMethod;
20203                         derivedMethod       = unboxedEntryMethod;
20204
20205                         // Method attributes will differ because unboxed entry point is shared
20206                         const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
20207                         JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
20208                                 unboxedMethodAttribs);
20209                         derivedMethodAttribs = unboxedMethodAttribs;
20210                     }
20211                     else
20212                     {
20213                         JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
20214                     }
20215                 }
20216                 else
20217                 {
20218                     JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
20219                 }
20220             }
20221             else
20222             {
20223                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
20224                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20225
20226                 if (localCopyThis != nullptr)
20227                 {
20228                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
20229                     call->gtCallObjp    = localCopyThis;
20230                     call->gtCallMethHnd = unboxedEntryMethod;
20231                     call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20232                     derivedMethod = unboxedEntryMethod;
20233                 }
20234                 else
20235                 {
20236                     JITDUMP("Sorry, failed to undo the box\n");
20237                 }
20238             }
20239         }
20240         else
20241         {
20242             // Many of the low-level methods on value classes won't have unboxed entries,
20243             // as they need access to the type of the object.
20244             //
20245             // Note this may be a cue for us to stack allocate the boxed object, since
20246             // we probably know that these objects don't escape.
20247             JITDUMP("Sorry, failed to find unboxed entry point\n");
20248         }
20249     }
20250
20251     // Fetch the class that introduced the derived method.
20252     //
20253     // Note this may not equal objClass, if there is a
20254     // final method that objClass inherits.
20255     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
20256
20257     // Need to update call info too. This is fragile
20258     // but hopefully the derived method conforms to
20259     // the base in most other ways.
20260     *method        = derivedMethod;
20261     *methodFlags   = derivedMethodAttribs;
20262     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
20263
20264     // Update context handle.
20265     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
20266     {
20267         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
20268     }
20269
20270 #ifdef FEATURE_READYTORUN_COMPILER
20271     if (opts.IsReadyToRun())
20272     {
20273         // For R2R, getCallInfo triggers bookkeeping on the zap
20274         // side so we need to call it here.
20275         //
20276         // First, cons up a suitable resolved token.
20277         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
20278
20279         derivedResolvedToken.tokenScope   = info.compScopeHnd;
20280         derivedResolvedToken.tokenContext = *contextHandle;
20281         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
20282         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
20283         derivedResolvedToken.hClass       = derivedClass;
20284         derivedResolvedToken.hMethod      = derivedMethod;
20285
20286         // Look up the new call info.
20287         CORINFO_CALL_INFO derivedCallInfo;
20288         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
20289
20290         // Update the call.
20291         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
20292         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
20293         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
20294     }
20295 #endif // FEATURE_READYTORUN_COMPILER
20296 }
20297
20298 //------------------------------------------------------------------------
20299 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
20300 //   to an intrinsic returns an exact type
20301 //
20302 // Arguments:
20303 //     methodHnd -- handle for the special intrinsic method
20304 //
20305 // Returns:
20306 //     Exact class handle returned by the intrinsic call, if known.
20307 //     Nullptr if not known, or not likely to lead to beneficial optimization.
20308
20309 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
20310 {
20311     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
20312
20313     CORINFO_CLASS_HANDLE result = nullptr;
20314
20315     // See what intrinisc we have...
20316     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
20317     switch (ni)
20318     {
20319         case NI_System_Collections_Generic_EqualityComparer_get_Default:
20320         {
20321             // Expect one class generic parameter; figure out which it is.
20322             CORINFO_SIG_INFO sig;
20323             info.compCompHnd->getMethodSig(methodHnd, &sig);
20324             assert(sig.sigInst.classInstCount == 1);
20325             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
20326             assert(typeHnd != nullptr);
20327
20328             // Lookup can incorrect when we have __Canon as it won't appear
20329             // to implement any interface types.
20330             //
20331             // And if we do not have a final type, devirt & inlining is
20332             // unlikely to result in much simplification.
20333             //
20334             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
20335             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
20336             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
20337
20338             if (isFinalType)
20339             {
20340                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
20341                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
20342                         result != nullptr ? eeGetClassName(result) : "unknown");
20343             }
20344             else
20345             {
20346                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
20347             }
20348
20349             break;
20350         }
20351
20352         default:
20353         {
20354             JITDUMP("This special intrinsic not handled, sorry...\n");
20355             break;
20356         }
20357     }
20358
20359     return result;
20360 }
20361
20362 //------------------------------------------------------------------------
20363 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
20364 //
20365 // Arguments:
20366 //    token - init value for the allocated token.
20367 //
20368 // Return Value:
20369 //    pointer to token into jit-allocated memory.
20370 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
20371 {
20372     CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
20373     *memory                        = token;
20374     return memory;
20375 }
20376
20377 //------------------------------------------------------------------------
20378 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
20379 //
20380 class SpillRetExprHelper
20381 {
20382 public:
20383     SpillRetExprHelper(Compiler* comp) : comp(comp)
20384     {
20385     }
20386
20387     void StoreRetExprResultsInArgs(GenTreeCall* call)
20388     {
20389         GenTreeArgList** pArgs = &call->gtCallArgs;
20390         if (*pArgs != nullptr)
20391         {
20392             comp->fgWalkTreePre((GenTree**)pArgs, SpillRetExprVisitor, this);
20393         }
20394
20395         GenTree** pThisArg = &call->gtCallObjp;
20396         if (*pThisArg != nullptr)
20397         {
20398             comp->fgWalkTreePre(pThisArg, SpillRetExprVisitor, this);
20399         }
20400     }
20401
20402 private:
20403     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
20404     {
20405         assert((pTree != nullptr) && (*pTree != nullptr));
20406         GenTree* tree = *pTree;
20407         if ((tree->gtFlags & GTF_CALL) == 0)
20408         {
20409             // Trees with ret_expr are marked as GTF_CALL.
20410             return Compiler::WALK_SKIP_SUBTREES;
20411         }
20412         if (tree->OperGet() == GT_RET_EXPR)
20413         {
20414             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
20415             walker->StoreRetExprAsLocalVar(pTree);
20416         }
20417         return Compiler::WALK_CONTINUE;
20418     }
20419
20420     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
20421     {
20422         GenTree* retExpr = *pRetExpr;
20423         assert(retExpr->OperGet() == GT_RET_EXPR);
20424         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
20425         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
20426         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
20427         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
20428     }
20429
20430 private:
20431     Compiler* comp;
20432 };
20433
20434 //------------------------------------------------------------------------
20435 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
20436 //                         Spill ret_expr in the call node, because they can't be cloned.
20437 //
20438 // Arguments:
20439 //    call - fat calli candidate
20440 //
20441 void Compiler::addFatPointerCandidate(GenTreeCall* call)
20442 {
20443     setMethodHasFatPointer();
20444     call->SetFatPointerCandidate();
20445     SpillRetExprHelper helper(this);
20446     helper.StoreRetExprResultsInArgs(call);
20447 }