Don't treat the Vector64/128/256<T> helper methods as intrinsic if featureSIMD is...
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
240
241             // Preserve 'small' int types
242             if (!varTypeIsSmall(lclTyp))
243             {
244                 lclTyp = genActualType(lclTyp);
245             }
246
247             if (varTypeIsSmall(lclTyp))
248             {
249                 return false;
250             }
251
252             return true;
253         }
254         default:
255             break;
256     }
257
258     return false;
259 }
260
261 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
262 {
263     pResolvedToken->tokenContext = impTokenLookupContextHandle;
264     pResolvedToken->tokenScope   = info.compScopeHnd;
265     pResolvedToken->token        = getU4LittleEndian(addr);
266     pResolvedToken->tokenType    = kind;
267
268     if (!tiVerificationNeeded)
269     {
270         info.compCompHnd->resolveToken(pResolvedToken);
271     }
272     else
273     {
274         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
275     }
276 }
277
278 /*****************************************************************************
279  *
280  *  Pop one tree from the stack.
281  */
282
283 StackEntry Compiler::impPopStack()
284 {
285     if (verCurrentState.esStackDepth == 0)
286     {
287         BADCODE("stack underflow");
288     }
289
290 #ifdef DEBUG
291 #if VERBOSE_VERIFY
292     if (VERBOSE && tiVerificationNeeded)
293     {
294         JITDUMP("\n");
295         printf(TI_DUMP_PADDING);
296         printf("About to pop from the stack: ");
297         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
298         ti.Dump();
299     }
300 #endif // VERBOSE_VERIFY
301 #endif // DEBUG
302
303     return verCurrentState.esStack[--verCurrentState.esStackDepth];
304 }
305
306 /*****************************************************************************
307  *
308  *  Peep at n'th (0-based) tree on the top of the stack.
309  */
310
311 StackEntry& Compiler::impStackTop(unsigned n)
312 {
313     if (verCurrentState.esStackDepth <= n)
314     {
315         BADCODE("stack underflow");
316     }
317
318     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
319 }
320
321 unsigned Compiler::impStackHeight()
322 {
323     return verCurrentState.esStackDepth;
324 }
325
326 /*****************************************************************************
327  *  Some of the trees are spilled specially. While unspilling them, or
328  *  making a copy, these need to be handled specially. The function
329  *  enumerates the operators possible after spilling.
330  */
331
332 #ifdef DEBUG // only used in asserts
333 static bool impValidSpilledStackEntry(GenTree* tree)
334 {
335     if (tree->gtOper == GT_LCL_VAR)
336     {
337         return true;
338     }
339
340     if (tree->OperIsConst())
341     {
342         return true;
343     }
344
345     return false;
346 }
347 #endif
348
349 /*****************************************************************************
350  *
351  *  The following logic is used to save/restore stack contents.
352  *  If 'copy' is true, then we make a copy of the trees on the stack. These
353  *  have to all be cloneable/spilled values.
354  */
355
356 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
357 {
358     savePtr->ssDepth = verCurrentState.esStackDepth;
359
360     if (verCurrentState.esStackDepth)
361     {
362         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
363         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
364
365         if (copy)
366         {
367             StackEntry* table = savePtr->ssTrees;
368
369             /* Make a fresh copy of all the stack entries */
370
371             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
372             {
373                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
374                 GenTree* tree     = verCurrentState.esStack[level].val;
375
376                 assert(impValidSpilledStackEntry(tree));
377
378                 switch (tree->gtOper)
379                 {
380                     case GT_CNS_INT:
381                     case GT_CNS_LNG:
382                     case GT_CNS_DBL:
383                     case GT_CNS_STR:
384                     case GT_LCL_VAR:
385                         table->val = gtCloneExpr(tree);
386                         break;
387
388                     default:
389                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
390                         break;
391                 }
392             }
393         }
394         else
395         {
396             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
397         }
398     }
399 }
400
401 void Compiler::impRestoreStackState(SavedStack* savePtr)
402 {
403     verCurrentState.esStackDepth = savePtr->ssDepth;
404
405     if (verCurrentState.esStackDepth)
406     {
407         memcpy(verCurrentState.esStack, savePtr->ssTrees,
408                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
409     }
410 }
411
412 /*****************************************************************************
413  *
414  *  Get the tree list started for a new basic block.
415  */
416 inline void Compiler::impBeginTreeList()
417 {
418     assert(impTreeList == nullptr && impTreeLast == nullptr);
419
420     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
421 }
422
423 /*****************************************************************************
424  *
425  *  Store the given start and end stmt in the given basic block. This is
426  *  mostly called by impEndTreeList(BasicBlock *block). It is called
427  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
428  */
429
430 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
431 {
432     assert(firstStmt->gtOper == GT_STMT);
433     assert(lastStmt->gtOper == GT_STMT);
434
435     /* Make the list circular, so that we can easily walk it backwards */
436
437     firstStmt->gtPrev = lastStmt;
438
439     /* Store the tree list in the basic block */
440
441     block->bbTreeList = firstStmt;
442
443     /* The block should not already be marked as imported */
444     assert((block->bbFlags & BBF_IMPORTED) == 0);
445
446     block->bbFlags |= BBF_IMPORTED;
447 }
448
449 /*****************************************************************************
450  *
451  *  Store the current tree list in the given basic block.
452  */
453
454 inline void Compiler::impEndTreeList(BasicBlock* block)
455 {
456     assert(impTreeList->gtOper == GT_BEG_STMTS);
457
458     GenTree* firstTree = impTreeList->gtNext;
459
460     if (!firstTree)
461     {
462         /* The block should not already be marked as imported */
463         assert((block->bbFlags & BBF_IMPORTED) == 0);
464
465         // Empty block. Just mark it as imported
466         block->bbFlags |= BBF_IMPORTED;
467     }
468     else
469     {
470         // Ignore the GT_BEG_STMTS
471         assert(firstTree->gtPrev == impTreeList);
472
473         impEndTreeList(block, firstTree, impTreeLast);
474     }
475
476 #ifdef DEBUG
477     if (impLastILoffsStmt != nullptr)
478     {
479         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
480         impLastILoffsStmt                          = nullptr;
481     }
482
483     impTreeList = impTreeLast = nullptr;
484 #endif
485 }
486
487 /*****************************************************************************
488  *
489  *  Check that storing the given tree doesnt mess up the semantic order. Note
490  *  that this has only limited value as we can only check [0..chkLevel).
491  */
492
493 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
494 {
495 #ifndef DEBUG
496     return;
497 #else
498     assert(stmt->gtOper == GT_STMT);
499
500     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
501     {
502         chkLevel = verCurrentState.esStackDepth;
503     }
504
505     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
506     {
507         return;
508     }
509
510     GenTree* tree = stmt->gtStmt.gtStmtExpr;
511
512     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
513
514     if (tree->gtFlags & GTF_CALL)
515     {
516         for (unsigned level = 0; level < chkLevel; level++)
517         {
518             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
519         }
520     }
521
522     if (tree->gtOper == GT_ASG)
523     {
524         // For an assignment to a local variable, all references of that
525         // variable have to be spilled. If it is aliased, all calls and
526         // indirect accesses have to be spilled
527
528         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
529         {
530             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
531             for (unsigned level = 0; level < chkLevel; level++)
532             {
533                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
534                 assert(!lvaTable[lclNum].lvAddrExposed ||
535                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
536             }
537         }
538
539         // If the access may be to global memory, all side effects have to be spilled.
540
541         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
542         {
543             for (unsigned level = 0; level < chkLevel; level++)
544             {
545                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
546             }
547         }
548     }
549 #endif
550 }
551
552 /*****************************************************************************
553  *
554  *  Append the given GT_STMT node to the current block's tree list.
555  *  [0..chkLevel) is the portion of the stack which we will check for
556  *    interference with stmt and spill if needed.
557  */
558
559 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
560 {
561     assert(stmt->gtOper == GT_STMT);
562     noway_assert(impTreeLast != nullptr);
563
564     /* If the statement being appended has any side-effects, check the stack
565        to see if anything needs to be spilled to preserve correct ordering. */
566
567     GenTree* expr  = stmt->gtStmt.gtStmtExpr;
568     unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
569
570     // Assignment to (unaliased) locals don't count as a side-effect as
571     // we handle them specially using impSpillLclRefs(). Temp locals should
572     // be fine too.
573
574     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
575         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
576     {
577         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
578         assert(flags == (op2Flags | GTF_ASG));
579         flags = op2Flags;
580     }
581
582     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
583     {
584         chkLevel = verCurrentState.esStackDepth;
585     }
586
587     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
588     {
589         assert(chkLevel <= verCurrentState.esStackDepth);
590
591         if (flags)
592         {
593             // If there is a call, we have to spill global refs
594             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
595
596             if (expr->gtOper == GT_ASG)
597             {
598                 GenTree* lhs = expr->gtGetOp1();
599                 // If we are assigning to a global ref, we have to spill global refs on stack.
600                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
601                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
602                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
603                 if (!expr->OperIsBlkOp())
604                 {
605                     // If we are assigning to a global ref, we have to spill global refs on stack
606                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
607                     {
608                         spillGlobEffects = true;
609                     }
610                 }
611                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
612                          ((lhs->OperGet() == GT_LCL_VAR) &&
613                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
614                 {
615                     spillGlobEffects = true;
616                 }
617             }
618
619             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
620         }
621         else
622         {
623             impSpillSpecialSideEff();
624         }
625     }
626
627     impAppendStmtCheck(stmt, chkLevel);
628
629     /* Point 'prev' at the previous node, so that we can walk backwards */
630
631     stmt->gtPrev = impTreeLast;
632
633     /* Append the expression statement to the list */
634
635     impTreeLast->gtNext = stmt;
636     impTreeLast         = stmt;
637
638 #ifdef FEATURE_SIMD
639     impMarkContiguousSIMDFieldAssignments(stmt);
640 #endif
641
642     /* Once we set impCurStmtOffs in an appended tree, we are ready to
643        report the following offsets. So reset impCurStmtOffs */
644
645     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
646     {
647         impCurStmtOffsSet(BAD_IL_OFFSET);
648     }
649
650 #ifdef DEBUG
651     if (impLastILoffsStmt == nullptr)
652     {
653         impLastILoffsStmt = stmt;
654     }
655
656     if (verbose)
657     {
658         printf("\n\n");
659         gtDispTree(stmt);
660     }
661 #endif
662 }
663
664 /*****************************************************************************
665  *
666  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
667  */
668
669 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
670 {
671     assert(stmt->gtOper == GT_STMT);
672     assert(stmtBefore->gtOper == GT_STMT);
673
674     GenTree* stmtPrev  = stmtBefore->gtPrev;
675     stmt->gtPrev       = stmtPrev;
676     stmt->gtNext       = stmtBefore;
677     stmtPrev->gtNext   = stmt;
678     stmtBefore->gtPrev = stmt;
679 }
680
681 /*****************************************************************************
682  *
683  *  Append the given expression tree to the current block's tree list.
684  *  Return the newly created statement.
685  */
686
687 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
688 {
689     assert(tree);
690
691     /* Allocate an 'expression statement' node */
692
693     GenTree* expr = gtNewStmt(tree, offset);
694
695     /* Append the statement to the current block's stmt list */
696
697     impAppendStmt(expr, chkLevel);
698
699     return expr;
700 }
701
702 /*****************************************************************************
703  *
704  *  Insert the given exression tree before GT_STMT "stmtBefore"
705  */
706
707 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
708 {
709     assert(stmtBefore->gtOper == GT_STMT);
710
711     /* Allocate an 'expression statement' node */
712
713     GenTree* expr = gtNewStmt(tree, offset);
714
715     /* Append the statement to the current block's stmt list */
716
717     impInsertStmtBefore(expr, stmtBefore);
718 }
719
720 /*****************************************************************************
721  *
722  *  Append an assignment of the given value to a temp to the current tree list.
723  *  curLevel is the stack level for which the spill to the temp is being done.
724  */
725
726 void Compiler::impAssignTempGen(unsigned    tmp,
727                                 GenTree*    val,
728                                 unsigned    curLevel,
729                                 GenTree**   pAfterStmt, /* = NULL */
730                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
731                                 BasicBlock* block       /* = NULL */
732                                 )
733 {
734     GenTree* asg = gtNewTempAssign(tmp, val);
735
736     if (!asg->IsNothingNode())
737     {
738         if (pAfterStmt)
739         {
740             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
741             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
742         }
743         else
744         {
745             impAppendTree(asg, curLevel, impCurStmtOffs);
746         }
747     }
748 }
749
750 /*****************************************************************************
751  * same as above, but handle the valueclass case too
752  */
753
754 void Compiler::impAssignTempGen(unsigned             tmpNum,
755                                 GenTree*             val,
756                                 CORINFO_CLASS_HANDLE structType,
757                                 unsigned             curLevel,
758                                 GenTree**            pAfterStmt, /* = NULL */
759                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
760                                 BasicBlock*          block       /* = NULL */
761                                 )
762 {
763     GenTree* asg;
764
765     if (varTypeIsStruct(val))
766     {
767         assert(tmpNum < lvaCount);
768         assert(structType != NO_CLASS_HANDLE);
769
770         // if the method is non-verifiable the assert is not true
771         // so at least ignore it in the case when verification is turned on
772         // since any block that tries to use the temp would have failed verification.
773         var_types varType = lvaTable[tmpNum].lvType;
774         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
775         lvaSetStruct(tmpNum, structType, false);
776
777         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
778         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
779         // that has been passed in for the value being assigned to the temp, in which case we
780         // need to set 'val' to that same type.
781         // Note also that if we always normalized the types of any node that might be a struct
782         // type, this would not be necessary - but that requires additional JIT/EE interface
783         // calls that may not actually be required - e.g. if we only access a field of a struct.
784
785         val->gtType = lvaTable[tmpNum].lvType;
786
787         GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
788         asg          = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block);
789     }
790     else
791     {
792         asg = gtNewTempAssign(tmpNum, val);
793     }
794
795     if (!asg->IsNothingNode())
796     {
797         if (pAfterStmt)
798         {
799             GenTree* asgStmt = gtNewStmt(asg, ilOffset);
800             *pAfterStmt      = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
801         }
802         else
803         {
804             impAppendTree(asg, curLevel, impCurStmtOffs);
805         }
806     }
807 }
808
809 /*****************************************************************************
810  *
811  *  Pop the given number of values from the stack and return a list node with
812  *  their values.
813  *  The 'prefixTree' argument may optionally contain an argument
814  *  list that is prepended to the list returned from this function.
815  *
816  *  The notion of prepended is a bit misleading in that the list is backwards
817  *  from the way I would expect: The first element popped is at the end of
818  *  the returned list, and prefixTree is 'before' that, meaning closer to
819  *  the end of the list.  To get to prefixTree, you have to walk to the
820  *  end of the list.
821  *
822  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
823  *  such we reverse its meaning such that returnValue has a reversed
824  *  prefixTree at the head of the list.
825  */
826
827 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
828 {
829     assert(sig == nullptr || count == sig->numArgs);
830
831     CORINFO_CLASS_HANDLE structType;
832     GenTreeArgList*      treeList;
833
834     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
835     {
836         treeList = nullptr;
837     }
838     else
839     { // ARG_ORDER_L2R
840         treeList = prefixTree;
841     }
842
843     while (count--)
844     {
845         StackEntry se   = impPopStack();
846         typeInfo   ti   = se.seTypeInfo;
847         GenTree*   temp = se.val;
848
849         if (varTypeIsStruct(temp))
850         {
851             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
852             assert(ti.IsType(TI_STRUCT));
853             structType = ti.GetClassHandleForValueClass();
854 #ifdef DEBUG
855             if (verbose)
856             {
857                 printf("Calling impNormStructVal on:\n");
858                 gtDispTree(temp);
859             }
860 #endif
861             temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
862 #ifdef DEBUG
863             if (verbose)
864             {
865                 printf("resulting tree:\n");
866                 gtDispTree(temp);
867             }
868 #endif
869         }
870
871         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
872         treeList = gtNewListNode(temp, treeList);
873     }
874
875     if (sig != nullptr)
876     {
877         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
878             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
879         {
880             // Make sure that all valuetypes (including enums) that we push are loaded.
881             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
882             // all valuetypes in the method signature are already loaded.
883             // We need to be able to find the size of the valuetypes, but we cannot
884             // do a class-load from within GC.
885             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
886         }
887
888         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
889         CORINFO_CLASS_HANDLE    argClass;
890         CORINFO_CLASS_HANDLE    argRealClass;
891         GenTreeArgList*         args;
892
893         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
894         {
895             PREFIX_ASSUME(args != nullptr);
896
897             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
898
899             // insert implied casts (from float to double or double to float)
900
901             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
902             {
903                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
904             }
905             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
906             {
907                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
908             }
909
910             // insert any widening or narrowing casts for backwards compatibility
911
912             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
913
914             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
915                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
916             {
917                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
918                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
919                 // primitive types.
920                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
921                 // details).
922                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
923                 {
924                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
925                 }
926
927                 // Make sure that all valuetypes (including enums) that we push are loaded.
928                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
929                 // all valuetypes in the method signature are already loaded.
930                 // We need to be able to find the size of the valuetypes, but we cannot
931                 // do a class-load from within GC.
932                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
933             }
934
935             argLst = info.compCompHnd->getArgNext(argLst);
936         }
937     }
938
939     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
940     {
941         // Prepend the prefixTree
942
943         // Simple in-place reversal to place treeList
944         // at the end of a reversed prefixTree
945         while (prefixTree != nullptr)
946         {
947             GenTreeArgList* next = prefixTree->Rest();
948             prefixTree->Rest()   = treeList;
949             treeList             = prefixTree;
950             prefixTree           = next;
951         }
952     }
953     return treeList;
954 }
955
956 /*****************************************************************************
957  *
958  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
959  *  The first "skipReverseCount" items are not reversed.
960  */
961
962 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
963
964 {
965     assert(skipReverseCount <= count);
966
967     GenTreeArgList* list = impPopList(count, sig);
968
969     // reverse the list
970     if (list == nullptr || skipReverseCount == count)
971     {
972         return list;
973     }
974
975     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
976     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
977
978     if (skipReverseCount == 0)
979     {
980         ptr = list;
981     }
982     else
983     {
984         lastSkipNode = list;
985         // Get to the first node that needs to be reversed
986         for (unsigned i = 0; i < skipReverseCount - 1; i++)
987         {
988             lastSkipNode = lastSkipNode->Rest();
989         }
990
991         PREFIX_ASSUME(lastSkipNode != nullptr);
992         ptr = lastSkipNode->Rest();
993     }
994
995     GenTreeArgList* reversedList = nullptr;
996
997     do
998     {
999         GenTreeArgList* tmp = ptr->Rest();
1000         ptr->Rest()         = reversedList;
1001         reversedList        = ptr;
1002         ptr                 = tmp;
1003     } while (ptr != nullptr);
1004
1005     if (skipReverseCount)
1006     {
1007         lastSkipNode->Rest() = reversedList;
1008         return list;
1009     }
1010     else
1011     {
1012         return reversedList;
1013     }
1014 }
1015
1016 //------------------------------------------------------------------------
1017 // impAssignStruct: Assign (copy) the structure from 'src' to 'dest'.
1018 //
1019 // Arguments:
1020 //    dest         - destination of the assignment
1021 //    src          - source of the assignment
1022 //    structHnd    - handle representing the struct type
1023 //    curLevel     - stack level for which a spill may be being done
1024 //    pAfterStmt   - statement to insert any additional statements after
1025 //    ilOffset     - il offset for new statements
1026 //    block        - block to insert any additional statements in
1027 //
1028 // Return Value:
1029 //    The tree that should be appended to the statement list that represents the assignment.
1030 //
1031 // Notes:
1032 //    Temp assignments may be appended to impTreeList if spilling is necessary.
1033
1034 GenTree* Compiler::impAssignStruct(GenTree*             dest,
1035                                    GenTree*             src,
1036                                    CORINFO_CLASS_HANDLE structHnd,
1037                                    unsigned             curLevel,
1038                                    GenTree**            pAfterStmt, /* = nullptr */
1039                                    IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
1040                                    BasicBlock*          block       /* = nullptr */
1041                                    )
1042 {
1043     assert(varTypeIsStruct(dest));
1044
1045     if (ilOffset == BAD_IL_OFFSET)
1046     {
1047         ilOffset = impCurStmtOffs;
1048     }
1049
1050     while (dest->gtOper == GT_COMMA)
1051     {
1052         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1053
1054         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1055         if (pAfterStmt)
1056         {
1057             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, ilOffset));
1058         }
1059         else
1060         {
1061             impAppendTree(dest->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1062         }
1063
1064         // set dest to the second thing
1065         dest = dest->gtOp.gtOp2;
1066     }
1067
1068     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1069            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1070
1071     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1072         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1073     {
1074         // Make this a NOP
1075         return gtNewNothingNode();
1076     }
1077
1078     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1079     // or re-creating a Blk node if it is.
1080     GenTree* destAddr;
1081
1082     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1083     {
1084         destAddr = dest->gtOp.gtOp1;
1085     }
1086     else
1087     {
1088         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1089     }
1090
1091     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, ilOffset, block));
1092 }
1093
1094 //------------------------------------------------------------------------
1095 // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
1096 //
1097 // Arguments:
1098 //    destAddr     - address of the destination of the assignment
1099 //    src          - source of the assignment
1100 //    structHnd    - handle representing the struct type
1101 //    curLevel     - stack level for which a spill may be being done
1102 //    pAfterStmt   - statement to insert any additional statements after
1103 //    ilOffset     - il offset for new statements
1104 //    block        - block to insert any additional statements in
1105 //
1106 // Return Value:
1107 //    The tree that should be appended to the statement list that represents the assignment.
1108 //
1109 // Notes:
1110 //    Temp assignments may be appended to impTreeList if spilling is necessary.
1111
1112 GenTree* Compiler::impAssignStructPtr(GenTree*             destAddr,
1113                                       GenTree*             src,
1114                                       CORINFO_CLASS_HANDLE structHnd,
1115                                       unsigned             curLevel,
1116                                       GenTree**            pAfterStmt, /* = NULL */
1117                                       IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
1118                                       BasicBlock*          block       /* = NULL */
1119                                       )
1120 {
1121     var_types destType;
1122     GenTree*  dest      = nullptr;
1123     unsigned  destFlags = 0;
1124
1125     if (ilOffset == BAD_IL_OFFSET)
1126     {
1127         ilOffset = impCurStmtOffs;
1128     }
1129
1130 #if defined(UNIX_AMD64_ABI)
1131     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1132     // TODO-ARM-BUG: Does ARM need this?
1133     // TODO-ARM64-BUG: Does ARM64 need this?
1134     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1135            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1136            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1137            (src->TypeGet() != TYP_STRUCT &&
1138             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1139 #else  // !defined(UNIX_AMD64_ABI)
1140     assert(varTypeIsStruct(src));
1141
1142     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1143            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1144            src->gtOper == GT_COMMA ||
1145            (src->TypeGet() != TYP_STRUCT &&
1146             (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1147 #endif // !defined(UNIX_AMD64_ABI)
1148     if (destAddr->OperGet() == GT_ADDR)
1149     {
1150         GenTree* destNode = destAddr->gtGetOp1();
1151         // If the actual destination is a local, or already a block node, or is a node that
1152         // will be morphed, don't insert an OBJ(ADDR).
1153         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk() ||
1154             ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet())))
1155         {
1156             dest = destNode;
1157         }
1158         destType = destNode->TypeGet();
1159     }
1160     else
1161     {
1162         destType = src->TypeGet();
1163     }
1164
1165     var_types asgType = src->TypeGet();
1166
1167     if (src->gtOper == GT_CALL)
1168     {
1169         if (src->AsCall()->TreatAsHasRetBufArg(this))
1170         {
1171             // Case of call returning a struct via hidden retbuf arg
1172
1173             // insert the return value buffer into the argument list as first byref parameter
1174             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1175
1176             // now returns void, not a struct
1177             src->gtType = TYP_VOID;
1178
1179             // return the morphed call node
1180             return src;
1181         }
1182         else
1183         {
1184             // Case of call returning a struct in one or more registers.
1185
1186             var_types returnType = (var_types)src->gtCall.gtReturnType;
1187
1188             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1189             src->gtType = genActualType(returnType);
1190
1191             // First we try to change this to "LclVar/LclFld = call"
1192             //
1193             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1194             {
1195                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1196                 // That is, the IR will be of the form lclVar = call for multi-reg return
1197                 //
1198                 GenTree* lcl = destAddr->gtOp.gtOp1;
1199                 if (src->AsCall()->HasMultiRegRetVal())
1200                 {
1201                     // Mark the struct LclVar as used in a MultiReg return context
1202                     //  which currently makes it non promotable.
1203                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1204                     // handle multireg returns.
1205                     lcl->gtFlags |= GTF_DONT_CSE;
1206                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1207                 }
1208                 else // The call result is not a multireg return
1209                 {
1210                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1211                     lcl->ChangeOper(GT_LCL_FLD);
1212                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1213                     lcl->gtType = src->gtType;
1214                     asgType     = src->gtType;
1215                 }
1216
1217                 dest = lcl;
1218
1219 #if defined(_TARGET_ARM_)
1220                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1221                 // but that method has not been updadted to include ARM.
1222                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1223                 lcl->gtFlags |= GTF_DONT_CSE;
1224 #elif defined(UNIX_AMD64_ABI)
1225                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1226                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1227
1228                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1229                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1230                 // handle multireg returns.
1231                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1232                 // non-multireg returns.
1233                 lcl->gtFlags |= GTF_DONT_CSE;
1234                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1235 #endif
1236             }
1237             else // we don't have a GT_ADDR of a GT_LCL_VAR
1238             {
1239                 // !!! The destination could be on stack. !!!
1240                 // This flag will let us choose the correct write barrier.
1241                 asgType   = returnType;
1242                 destFlags = GTF_IND_TGTANYWHERE;
1243             }
1244         }
1245     }
1246     else if (src->gtOper == GT_RET_EXPR)
1247     {
1248         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1249         noway_assert(call->gtOper == GT_CALL);
1250
1251         if (call->HasRetBufArg())
1252         {
1253             // insert the return value buffer into the argument list as first byref parameter
1254             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1255
1256             // now returns void, not a struct
1257             src->gtType  = TYP_VOID;
1258             call->gtType = TYP_VOID;
1259
1260             // We already have appended the write to 'dest' GT_CALL's args
1261             // So now we just return an empty node (pruning the GT_RET_EXPR)
1262             return src;
1263         }
1264         else
1265         {
1266             // Case of inline method returning a struct in one or more registers.
1267             //
1268             var_types returnType = (var_types)call->gtReturnType;
1269
1270             // We won't need a return buffer
1271             asgType      = returnType;
1272             src->gtType  = genActualType(returnType);
1273             call->gtType = src->gtType;
1274
1275             // If we've changed the type, and it no longer matches a local destination,
1276             // we must use an indirection.
1277             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1278             {
1279                 dest = nullptr;
1280             }
1281
1282             // !!! The destination could be on stack. !!!
1283             // This flag will let us choose the correct write barrier.
1284             destFlags = GTF_IND_TGTANYWHERE;
1285         }
1286     }
1287     else if (src->OperIsBlk())
1288     {
1289         asgType = impNormStructType(structHnd);
1290         if (src->gtOper == GT_OBJ)
1291         {
1292             assert(src->gtObj.gtClass == structHnd);
1293         }
1294     }
1295     else if (src->gtOper == GT_INDEX)
1296     {
1297         asgType = impNormStructType(structHnd);
1298         assert(src->gtIndex.gtStructElemClass == structHnd);
1299     }
1300     else if (src->gtOper == GT_MKREFANY)
1301     {
1302         // Since we are assigning the result of a GT_MKREFANY,
1303         // "destAddr" must point to a refany.
1304
1305         GenTree* destAddrClone;
1306         destAddr =
1307             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1308
1309         assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
1310         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1311         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1312         GenTree*       ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1313         GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
1314         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1315         GenTree* typeSlot =
1316             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1317
1318         // append the assign of the pointer value
1319         GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1320         if (pAfterStmt)
1321         {
1322             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, ilOffset));
1323         }
1324         else
1325         {
1326             impAppendTree(asg, curLevel, ilOffset);
1327         }
1328
1329         // return the assign of the type value, to be appended
1330         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1331     }
1332     else if (src->gtOper == GT_COMMA)
1333     {
1334         // The second thing is the struct or its address.
1335         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1336         if (pAfterStmt)
1337         {
1338             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, ilOffset));
1339         }
1340         else
1341         {
1342             impAppendTree(src->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1343         }
1344
1345         // Evaluate the second thing using recursion.
1346         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
1347     }
1348     else if (src->IsLocal())
1349     {
1350         asgType = src->TypeGet();
1351     }
1352     else if (asgType == TYP_STRUCT)
1353     {
1354         asgType     = impNormStructType(structHnd);
1355         src->gtType = asgType;
1356     }
1357     if (dest == nullptr)
1358     {
1359         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1360         // if this is a known struct type.
1361         if (asgType == TYP_STRUCT)
1362         {
1363             dest = gtNewObjNode(structHnd, destAddr);
1364             gtSetObjGcInfo(dest->AsObj());
1365             // Although an obj as a call argument was always assumed to be a globRef
1366             // (which is itself overly conservative), that is not true of the operands
1367             // of a block assignment.
1368             dest->gtFlags &= ~GTF_GLOB_REF;
1369             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1370         }
1371         else if (varTypeIsStruct(asgType))
1372         {
1373             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1374         }
1375         else
1376         {
1377             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1378         }
1379     }
1380     else
1381     {
1382         dest->gtType = asgType;
1383     }
1384
1385     dest->gtFlags |= destFlags;
1386     destFlags = dest->gtFlags;
1387
1388     // return an assignment node, to be appended
1389     GenTree* asgNode = gtNewAssignNode(dest, src);
1390     gtBlockOpInit(asgNode, dest, src, false);
1391
1392     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1393     // of assignments.
1394     if ((destFlags & GTF_DONT_CSE) == 0)
1395     {
1396         dest->gtFlags &= ~(GTF_DONT_CSE);
1397     }
1398     return asgNode;
1399 }
1400
1401 /*****************************************************************************
1402    Given a struct value, and the class handle for that structure, return
1403    the expression for the address for that structure value.
1404
1405    willDeref - does the caller guarantee to dereference the pointer.
1406 */
1407
1408 GenTree* Compiler::impGetStructAddr(GenTree*             structVal,
1409                                     CORINFO_CLASS_HANDLE structHnd,
1410                                     unsigned             curLevel,
1411                                     bool                 willDeref)
1412 {
1413     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1414
1415     var_types type = structVal->TypeGet();
1416
1417     genTreeOps oper = structVal->gtOper;
1418
1419     if (oper == GT_OBJ && willDeref)
1420     {
1421         assert(structVal->gtObj.gtClass == structHnd);
1422         return (structVal->gtObj.Addr());
1423     }
1424     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1425              structVal->OperIsSimdHWIntrinsic())
1426     {
1427         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1428
1429         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1430
1431         // The 'return value' is now the temp itself
1432
1433         type          = genActualType(lvaTable[tmpNum].TypeGet());
1434         GenTree* temp = gtNewLclvNode(tmpNum, type);
1435         temp          = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1436         return temp;
1437     }
1438     else if (oper == GT_COMMA)
1439     {
1440         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1441
1442         GenTree* oldTreeLast  = impTreeLast;
1443         structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1444         structVal->gtType     = TYP_BYREF;
1445
1446         if (oldTreeLast != impTreeLast)
1447         {
1448             // Some temp assignment statement was placed on the statement list
1449             // for Op2, but that would be out of order with op1, so we need to
1450             // spill op1 onto the statement list after whatever was last
1451             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1452             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1453             structVal->gtOp.gtOp1 = gtNewNothingNode();
1454         }
1455
1456         return (structVal);
1457     }
1458
1459     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1460 }
1461
1462 //------------------------------------------------------------------------
1463 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1464 //                    and optionally determine the GC layout of the struct.
1465 //
1466 // Arguments:
1467 //    structHnd       - The class handle for the struct type of interest.
1468 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1469 //                      into which the gcLayout will be written.
1470 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1471 //                      which will be set to the number of GC fields in the struct.
1472 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1473 //                      type, set to the SIMD base type
1474 //
1475 // Return Value:
1476 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1477 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1478 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1479 //
1480 // Assumptions:
1481 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1482 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1483 //
1484 // Notes:
1485 //    Normalizing the type involves examining the struct type to determine if it should
1486 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1487 //    for full enregistration, e.g. TYP_SIMD16.
1488
1489 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1490                                       BYTE*                gcLayout,
1491                                       unsigned*            pNumGCVars,
1492                                       var_types*           pSimdBaseType)
1493 {
1494     assert(structHnd != NO_CLASS_HANDLE);
1495
1496     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1497     var_types   structType  = TYP_STRUCT;
1498
1499     // On coreclr the check for GC includes a "may" to account for the special
1500     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1501     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1502     // pointer.
1503     const bool mayContainGCPtrs =
1504         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1505
1506 #ifdef FEATURE_SIMD
1507     // Check to see if this is a SIMD type.
1508     if (featureSIMD && !mayContainGCPtrs)
1509     {
1510         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1511
1512         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1513         {
1514             unsigned int sizeBytes;
1515             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1516             if (simdBaseType != TYP_UNKNOWN)
1517             {
1518                 assert(sizeBytes == originalSize);
1519                 structType = getSIMDTypeForSize(sizeBytes);
1520                 if (pSimdBaseType != nullptr)
1521                 {
1522                     *pSimdBaseType = simdBaseType;
1523                 }
1524                 // Also indicate that we use floating point registers.
1525                 compFloatingPointUsed = true;
1526             }
1527         }
1528     }
1529 #endif // FEATURE_SIMD
1530
1531     // Fetch GC layout info if requested
1532     if (gcLayout != nullptr)
1533     {
1534         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1535
1536         // Verify that the quick test up above via the class attributes gave a
1537         // safe view of the type's GCness.
1538         //
1539         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1540         // does not report any gc fields.
1541
1542         assert(mayContainGCPtrs || (numGCVars == 0));
1543
1544         if (pNumGCVars != nullptr)
1545         {
1546             *pNumGCVars = numGCVars;
1547         }
1548     }
1549     else
1550     {
1551         // Can't safely ask for number of GC pointers without also
1552         // asking for layout.
1553         assert(pNumGCVars == nullptr);
1554     }
1555
1556     return structType;
1557 }
1558
1559 //****************************************************************************
1560 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1561 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1562 //
1563 GenTree* Compiler::impNormStructVal(GenTree*             structVal,
1564                                     CORINFO_CLASS_HANDLE structHnd,
1565                                     unsigned             curLevel,
1566                                     bool                 forceNormalization /*=false*/)
1567 {
1568     assert(forceNormalization || varTypeIsStruct(structVal));
1569     assert(structHnd != NO_CLASS_HANDLE);
1570     var_types structType = structVal->TypeGet();
1571     bool      makeTemp   = false;
1572     if (structType == TYP_STRUCT)
1573     {
1574         structType = impNormStructType(structHnd);
1575     }
1576     bool                 alreadyNormalized = false;
1577     GenTreeLclVarCommon* structLcl         = nullptr;
1578
1579     genTreeOps oper = structVal->OperGet();
1580     switch (oper)
1581     {
1582         // GT_RETURN and GT_MKREFANY don't capture the handle.
1583         case GT_RETURN:
1584             break;
1585         case GT_MKREFANY:
1586             alreadyNormalized = true;
1587             break;
1588
1589         case GT_CALL:
1590             structVal->gtCall.gtRetClsHnd = structHnd;
1591             makeTemp                      = true;
1592             break;
1593
1594         case GT_RET_EXPR:
1595             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1596             makeTemp                         = true;
1597             break;
1598
1599         case GT_ARGPLACE:
1600             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1601             break;
1602
1603         case GT_INDEX:
1604             // This will be transformed to an OBJ later.
1605             alreadyNormalized                    = true;
1606             structVal->gtIndex.gtStructElemClass = structHnd;
1607             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1608             break;
1609
1610         case GT_FIELD:
1611             // Wrap it in a GT_OBJ.
1612             structVal->gtType = structType;
1613             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1614             break;
1615
1616         case GT_LCL_VAR:
1617         case GT_LCL_FLD:
1618             structLcl = structVal->AsLclVarCommon();
1619             // Wrap it in a GT_OBJ.
1620             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1621             __fallthrough;
1622
1623         case GT_OBJ:
1624         case GT_BLK:
1625         case GT_DYN_BLK:
1626         case GT_ASG:
1627             // These should already have the appropriate type.
1628             assert(structVal->gtType == structType);
1629             alreadyNormalized = true;
1630             break;
1631
1632         case GT_IND:
1633             assert(structVal->gtType == structType);
1634             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1635             alreadyNormalized = true;
1636             break;
1637
1638 #ifdef FEATURE_SIMD
1639         case GT_SIMD:
1640             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1641             break;
1642 #endif // FEATURE_SIMD
1643 #ifdef FEATURE_HW_INTRINSICS
1644         case GT_HWIntrinsic:
1645             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1646             break;
1647 #endif
1648
1649         case GT_COMMA:
1650         {
1651             // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1652             GenTree* blockNode = structVal->gtOp.gtOp2;
1653             assert(blockNode->gtType == structType);
1654
1655             // Is this GT_COMMA(op1, GT_COMMA())?
1656             GenTree* parent = structVal;
1657             if (blockNode->OperGet() == GT_COMMA)
1658             {
1659                 // Find the last node in the comma chain.
1660                 do
1661                 {
1662                     assert(blockNode->gtType == structType);
1663                     parent    = blockNode;
1664                     blockNode = blockNode->gtOp.gtOp2;
1665                 } while (blockNode->OperGet() == GT_COMMA);
1666             }
1667
1668             if (blockNode->OperGet() == GT_FIELD)
1669             {
1670                 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1671                 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1672             }
1673
1674 #ifdef FEATURE_SIMD
1675             if (blockNode->OperIsSIMDorSimdHWintrinsic())
1676             {
1677                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1678                 alreadyNormalized  = true;
1679             }
1680             else
1681 #endif
1682             {
1683                 noway_assert(blockNode->OperIsBlk());
1684
1685                 // Sink the GT_COMMA below the blockNode addr.
1686                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1687                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1688                 //
1689                 // In case of a chained GT_COMMA case, we sink the last
1690                 // GT_COMMA below the blockNode addr.
1691                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1692                 assert(blockNodeAddr->gtType == TYP_BYREF);
1693                 GenTree* commaNode    = parent;
1694                 commaNode->gtType     = TYP_BYREF;
1695                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1696                 blockNode->gtOp.gtOp1 = commaNode;
1697                 if (parent == structVal)
1698                 {
1699                     structVal = blockNode;
1700                 }
1701                 alreadyNormalized = true;
1702             }
1703         }
1704         break;
1705
1706         default:
1707             noway_assert(!"Unexpected node in impNormStructVal()");
1708             break;
1709     }
1710     structVal->gtType  = structType;
1711     GenTree* structObj = structVal;
1712
1713     if (!alreadyNormalized || forceNormalization)
1714     {
1715         if (makeTemp)
1716         {
1717             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1718
1719             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1720
1721             // The structVal is now the temp itself
1722
1723             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1724             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1725             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1726         }
1727         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1728         {
1729             // Wrap it in a GT_OBJ
1730             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1731         }
1732     }
1733
1734     if (structLcl != nullptr)
1735     {
1736         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1737         // so we don't set GTF_EXCEPT here.
1738         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1739         {
1740             structObj->gtFlags &= ~GTF_GLOB_REF;
1741         }
1742     }
1743     else
1744     {
1745         // In general a OBJ is an indirection and could raise an exception.
1746         structObj->gtFlags |= GTF_EXCEPT;
1747     }
1748     return (structObj);
1749 }
1750
1751 /******************************************************************************/
1752 // Given a type token, generate code that will evaluate to the correct
1753 // handle representation of that token (type handle, field handle, or method handle)
1754 //
1755 // For most cases, the handle is determined at compile-time, and the code
1756 // generated is simply an embedded handle.
1757 //
1758 // Run-time lookup is required if the enclosing method is shared between instantiations
1759 // and the token refers to formal type parameters whose instantiation is not known
1760 // at compile-time.
1761 //
1762 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1763                                     BOOL*                   pRuntimeLookup /* = NULL */,
1764                                     BOOL                    mustRestoreHandle /* = FALSE */,
1765                                     BOOL                    importParent /* = FALSE */)
1766 {
1767     assert(!fgGlobalMorph);
1768
1769     CORINFO_GENERICHANDLE_RESULT embedInfo;
1770     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1771
1772     if (pRuntimeLookup)
1773     {
1774         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1775     }
1776
1777     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1778     {
1779         switch (embedInfo.handleType)
1780         {
1781             case CORINFO_HANDLETYPE_CLASS:
1782                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1783                 break;
1784
1785             case CORINFO_HANDLETYPE_METHOD:
1786                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1787                 break;
1788
1789             case CORINFO_HANDLETYPE_FIELD:
1790                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1791                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1792                 break;
1793
1794             default:
1795                 break;
1796         }
1797     }
1798
1799     // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1800     GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1801                                       embedInfo.compileTimeHandle);
1802
1803     // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1804     if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1805     {
1806         result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1807     }
1808
1809     return result;
1810 }
1811
1812 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1813                                    CORINFO_LOOKUP*         pLookup,
1814                                    unsigned                handleFlags,
1815                                    void*                   compileTimeHandle)
1816 {
1817     if (!pLookup->lookupKind.needsRuntimeLookup)
1818     {
1819         // No runtime lookup is required.
1820         // Access is direct or memory-indirect (of a fixed address) reference
1821
1822         CORINFO_GENERIC_HANDLE handle       = nullptr;
1823         void*                  pIndirection = nullptr;
1824         assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1825
1826         if (pLookup->constLookup.accessType == IAT_VALUE)
1827         {
1828             handle = pLookup->constLookup.handle;
1829         }
1830         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1831         {
1832             pIndirection = pLookup->constLookup.addr;
1833         }
1834         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1835     }
1836     else if (compIsForInlining())
1837     {
1838         // Don't import runtime lookups when inlining
1839         // Inlining has to be aborted in such a case
1840         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1841         return nullptr;
1842     }
1843     else
1844     {
1845         // Need to use dictionary-based access which depends on the typeContext
1846         // which is only available at runtime, not at compile-time.
1847
1848         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1849     }
1850 }
1851
1852 #ifdef FEATURE_READYTORUN_COMPILER
1853 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1854                                              unsigned              handleFlags,
1855                                              void*                 compileTimeHandle)
1856 {
1857     CORINFO_GENERIC_HANDLE handle       = nullptr;
1858     void*                  pIndirection = nullptr;
1859     assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1860
1861     if (pLookup->accessType == IAT_VALUE)
1862     {
1863         handle = pLookup->handle;
1864     }
1865     else if (pLookup->accessType == IAT_PVALUE)
1866     {
1867         pIndirection = pLookup->addr;
1868     }
1869     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1870 }
1871
1872 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1873     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1874     CorInfoHelpFunc         helper,
1875     var_types               type,
1876     GenTreeArgList*         args /* =NULL*/,
1877     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1878 {
1879     CORINFO_CONST_LOOKUP lookup;
1880     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1881     {
1882         return nullptr;
1883     }
1884
1885     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1886
1887     op1->setEntryPoint(lookup);
1888
1889     return op1;
1890 }
1891 #endif
1892
1893 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1894 {
1895     GenTree* op1 = nullptr;
1896
1897     switch (pCallInfo->kind)
1898     {
1899         case CORINFO_CALL:
1900             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1901
1902 #ifdef FEATURE_READYTORUN_COMPILER
1903             if (opts.IsReadyToRun())
1904             {
1905                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1906             }
1907             else
1908             {
1909                 op1->gtFptrVal.gtEntryPoint.addr       = nullptr;
1910                 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1911             }
1912 #endif
1913             break;
1914
1915         case CORINFO_CALL_CODE_POINTER:
1916             if (compIsForInlining())
1917             {
1918                 // Don't import runtime lookups when inlining
1919                 // Inlining has to be aborted in such a case
1920                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1921                 return nullptr;
1922             }
1923
1924             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1925             break;
1926
1927         default:
1928             noway_assert(!"unknown call kind");
1929             break;
1930     }
1931
1932     return op1;
1933 }
1934
1935 //------------------------------------------------------------------------
1936 // getRuntimeContextTree: find pointer to context for runtime lookup.
1937 //
1938 // Arguments:
1939 //    kind - lookup kind.
1940 //
1941 // Return Value:
1942 //    Return GenTree pointer to generic shared context.
1943 //
1944 // Notes:
1945 //    Reports about generic context using.
1946
1947 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1948 {
1949     GenTree* ctxTree = nullptr;
1950
1951     // Collectible types requires that for shared generic code, if we use the generic context parameter
1952     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1953     // context parameter is this that we don't need the eager reporting logic.)
1954     lvaGenericsContextUseCount++;
1955
1956     if (kind == CORINFO_LOOKUP_THISOBJ)
1957     {
1958         // this Object
1959         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1960
1961         // Vtable pointer of this object
1962         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1963         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1964         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1965     }
1966     else
1967     {
1968         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1969
1970         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1971     }
1972     return ctxTree;
1973 }
1974
1975 /*****************************************************************************/
1976 /* Import a dictionary lookup to access a handle in code shared between
1977    generic instantiations.
1978    The lookup depends on the typeContext which is only available at
1979    runtime, and not at compile-time.
1980    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1981    The cases are:
1982
1983    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1984       instantiation-specific handle, and the tokens to lookup the handle.
1985    2. pLookup->indirections != CORINFO_USEHELPER :
1986       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1987           to get the handle.
1988       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1989           If it is non-NULL, it is the handle required. Else, call a helper
1990           to lookup the handle.
1991  */
1992
1993 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1994                                           CORINFO_LOOKUP*         pLookup,
1995                                           void*                   compileTimeHandle)
1996 {
1997
1998     // This method can only be called from the importer instance of the Compiler.
1999     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
2000     assert(!compIsForInlining());
2001
2002     GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
2003
2004     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
2005     // It's available only via the run-time helper function
2006     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
2007     {
2008 #ifdef FEATURE_READYTORUN_COMPILER
2009         if (opts.IsReadyToRun())
2010         {
2011             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
2012                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
2013         }
2014 #endif
2015         GenTree* argNode =
2016             gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2017         GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2018
2019         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2020     }
2021
2022     // Slot pointer
2023     GenTree* slotPtrTree = ctxTree;
2024
2025     if (pRuntimeLookup->testForNull)
2026     {
2027         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2028                                    nullptr DEBUGARG("impRuntimeLookup slot"));
2029     }
2030
2031     GenTree* indOffTree = nullptr;
2032
2033     // Applied repeated indirections
2034     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2035     {
2036         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2037         {
2038             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2039                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2040         }
2041
2042         if (i != 0)
2043         {
2044             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2045             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2046             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2047         }
2048
2049         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2050         {
2051             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2052         }
2053
2054         if (pRuntimeLookup->offsets[i] != 0)
2055         {
2056             slotPtrTree =
2057                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2058         }
2059     }
2060
2061     // No null test required
2062     if (!pRuntimeLookup->testForNull)
2063     {
2064         if (pRuntimeLookup->indirections == 0)
2065         {
2066             return slotPtrTree;
2067         }
2068
2069         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2070         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2071
2072         if (!pRuntimeLookup->testForFixup)
2073         {
2074             return slotPtrTree;
2075         }
2076
2077         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2078
2079         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2080         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2081
2082         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2083         // downcast the pointer to a TYP_INT on 64-bit targets
2084         slot = impImplicitIorI4Cast(slot, TYP_INT);
2085         // Use a GT_AND to check for the lowest bit and indirect if it is set
2086         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2087         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2088
2089         // slot = GT_IND(slot - 1)
2090         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2091         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2092         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2093         indir->gtFlags |= GTF_IND_NONFAULTING;
2094         indir->gtFlags |= GTF_IND_INVARIANT;
2095
2096         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2097         GenTree* asg   = gtNewAssignNode(slot, indir);
2098         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2099         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2100         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2101
2102         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2103     }
2104
2105     assert(pRuntimeLookup->indirections != 0);
2106
2107     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2108
2109     // Extract the handle
2110     GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2111     handle->gtFlags |= GTF_IND_NONFAULTING;
2112
2113     GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2114                                        nullptr DEBUGARG("impRuntimeLookup typehandle"));
2115
2116     // Call to helper
2117     GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2118
2119     GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2120     GenTree*        helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2121
2122     // Check for null and possibly call helper
2123     GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2124
2125     GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2126                                                        gtNewNothingNode(), // do nothing if nonnull
2127                                                        helperCall);
2128
2129     GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2130
2131     unsigned tmp;
2132     if (handleCopy->IsLocal())
2133     {
2134         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2135     }
2136     else
2137     {
2138         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2139     }
2140
2141     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2142     return gtNewLclvNode(tmp, TYP_I_IMPL);
2143 }
2144
2145 /******************************************************************************
2146  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2147  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2148  *     else, grab a new temp.
2149  *  For structs (which can be pushed on the stack using obj, etc),
2150  *  special handling is needed
2151  */
2152
2153 struct RecursiveGuard
2154 {
2155 public:
2156     RecursiveGuard()
2157     {
2158         m_pAddress = nullptr;
2159     }
2160
2161     ~RecursiveGuard()
2162     {
2163         if (m_pAddress)
2164         {
2165             *m_pAddress = false;
2166         }
2167     }
2168
2169     void Init(bool* pAddress, bool bInitialize)
2170     {
2171         assert(pAddress && *pAddress == false && "Recursive guard violation");
2172         m_pAddress = pAddress;
2173
2174         if (bInitialize)
2175         {
2176             *m_pAddress = true;
2177         }
2178     }
2179
2180 protected:
2181     bool* m_pAddress;
2182 };
2183
2184 bool Compiler::impSpillStackEntry(unsigned level,
2185                                   unsigned tnum
2186 #ifdef DEBUG
2187                                   ,
2188                                   bool        bAssertOnRecursion,
2189                                   const char* reason
2190 #endif
2191                                   )
2192 {
2193
2194 #ifdef DEBUG
2195     RecursiveGuard guard;
2196     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2197 #endif
2198
2199     GenTree* tree = verCurrentState.esStack[level].val;
2200
2201     /* Allocate a temp if we haven't been asked to use a particular one */
2202
2203     if (tiVerificationNeeded)
2204     {
2205         // Ignore bad temp requests (they will happen with bad code and will be
2206         // catched when importing the destblock)
2207         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2208         {
2209             return false;
2210         }
2211     }
2212     else
2213     {
2214         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2215         {
2216             return false;
2217         }
2218     }
2219
2220     bool isNewTemp = false;
2221
2222     if (tnum == BAD_VAR_NUM)
2223     {
2224         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2225         isNewTemp = true;
2226     }
2227     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2228     {
2229         // if verification is needed and tnum's type is incompatible with
2230         // type on that stack, we grab a new temp. This is safe since
2231         // we will throw a verification exception in the dest block.
2232
2233         var_types valTyp = tree->TypeGet();
2234         var_types dstTyp = lvaTable[tnum].TypeGet();
2235
2236         // if the two types are different, we return. This will only happen with bad code and will
2237         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2238         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2239             !(
2240 #ifndef _TARGET_64BIT_
2241                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2242 #endif // !_TARGET_64BIT_
2243                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2244         {
2245             if (verNeedsVerification())
2246             {
2247                 return false;
2248             }
2249         }
2250     }
2251
2252     /* Assign the spilled entry to the temp */
2253     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2254
2255     // If temp is newly introduced and a ref type, grab what type info we can.
2256     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2257     {
2258         assert(lvaTable[tnum].lvSingleDef == 0);
2259         lvaTable[tnum].lvSingleDef = 1;
2260         JITDUMP("Marked V%02u as a single def temp\n", tnum);
2261         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2262         lvaSetClass(tnum, tree, stkHnd);
2263
2264         // If we're assigning a GT_RET_EXPR, note the temp over on the call,
2265         // so the inliner can use it in case it needs a return spill temp.
2266         if (tree->OperGet() == GT_RET_EXPR)
2267         {
2268             JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
2269             GenTree*             call = tree->gtRetExpr.gtInlineCandidate;
2270             InlineCandidateInfo* ici  = call->gtCall.gtInlineCandidateInfo;
2271             ici->preexistingSpillTemp = tnum;
2272         }
2273     }
2274
2275     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2276     var_types type                     = genActualType(lvaTable[tnum].TypeGet());
2277     GenTree*  temp                     = gtNewLclvNode(tnum, type);
2278     verCurrentState.esStack[level].val = temp;
2279
2280     return true;
2281 }
2282
2283 /*****************************************************************************
2284  *
2285  *  Ensure that the stack has only spilled values
2286  */
2287
2288 void Compiler::impSpillStackEnsure(bool spillLeaves)
2289 {
2290     assert(!spillLeaves || opts.compDbgCode);
2291
2292     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2293     {
2294         GenTree* tree = verCurrentState.esStack[level].val;
2295
2296         if (!spillLeaves && tree->OperIsLeaf())
2297         {
2298             continue;
2299         }
2300
2301         // Temps introduced by the importer itself don't need to be spilled
2302
2303         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2304
2305         if (isTempLcl)
2306         {
2307             continue;
2308         }
2309
2310         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2311     }
2312 }
2313
2314 void Compiler::impSpillEvalStack()
2315 {
2316     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2317     {
2318         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2319     }
2320 }
2321
2322 /*****************************************************************************
2323  *
2324  *  If the stack contains any trees with side effects in them, assign those
2325  *  trees to temps and append the assignments to the statement list.
2326  *  On return the stack is guaranteed to be empty.
2327  */
2328
2329 inline void Compiler::impEvalSideEffects()
2330 {
2331     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2332     verCurrentState.esStackDepth = 0;
2333 }
2334
2335 /*****************************************************************************
2336  *
2337  *  If the stack contains any trees with side effects in them, assign those
2338  *  trees to temps and replace them on the stack with refs to their temps.
2339  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2340  */
2341
2342 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2343 {
2344     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2345
2346     /* Before we make any appends to the tree list we must spill the
2347      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2348
2349     impSpillSpecialSideEff();
2350
2351     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2352     {
2353         chkLevel = verCurrentState.esStackDepth;
2354     }
2355
2356     assert(chkLevel <= verCurrentState.esStackDepth);
2357
2358     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2359
2360     for (unsigned i = 0; i < chkLevel; i++)
2361     {
2362         GenTree* tree = verCurrentState.esStack[i].val;
2363
2364         GenTree* lclVarTree;
2365
2366         if ((tree->gtFlags & spillFlags) != 0 ||
2367             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2368              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2369              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2370                                            // lvAddrTaken flag.
2371         {
2372             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2373         }
2374     }
2375 }
2376
2377 /*****************************************************************************
2378  *
2379  *  If the stack contains any trees with special side effects in them, assign
2380  *  those trees to temps and replace them on the stack with refs to their temps.
2381  */
2382
2383 inline void Compiler::impSpillSpecialSideEff()
2384 {
2385     // Only exception objects need to be carefully handled
2386
2387     if (!compCurBB->bbCatchTyp)
2388     {
2389         return;
2390     }
2391
2392     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2393     {
2394         GenTree* tree = verCurrentState.esStack[level].val;
2395         // Make sure if we have an exception object in the sub tree we spill ourselves.
2396         if (gtHasCatchArg(tree))
2397         {
2398             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2399         }
2400     }
2401 }
2402
2403 /*****************************************************************************
2404  *
2405  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2406  */
2407
2408 void Compiler::impSpillValueClasses()
2409 {
2410     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2411     {
2412         GenTree* tree = verCurrentState.esStack[level].val;
2413
2414         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2415         {
2416             // Tree walk was aborted, which means that we found a
2417             // value class on the stack.  Need to spill that
2418             // stack entry.
2419
2420             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2421         }
2422     }
2423 }
2424
2425 /*****************************************************************************
2426  *
2427  *  Callback that checks if a tree node is TYP_STRUCT
2428  */
2429
2430 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2431 {
2432     fgWalkResult walkResult = WALK_CONTINUE;
2433
2434     if ((*pTree)->gtType == TYP_STRUCT)
2435     {
2436         // Abort the walk and indicate that we found a value class
2437
2438         walkResult = WALK_ABORT;
2439     }
2440
2441     return walkResult;
2442 }
2443
2444 /*****************************************************************************
2445  *
2446  *  If the stack contains any trees with references to local #lclNum, assign
2447  *  those trees to temps and replace their place on the stack with refs to
2448  *  their temps.
2449  */
2450
2451 void Compiler::impSpillLclRefs(ssize_t lclNum)
2452 {
2453     /* Before we make any appends to the tree list we must spill the
2454      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2455
2456     impSpillSpecialSideEff();
2457
2458     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2459     {
2460         GenTree* tree = verCurrentState.esStack[level].val;
2461
2462         /* If the tree may throw an exception, and the block has a handler,
2463            then we need to spill assignments to the local if the local is
2464            live on entry to the handler.
2465            Just spill 'em all without considering the liveness */
2466
2467         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2468
2469         /* Skip the tree if it doesn't have an affected reference,
2470            unless xcptnCaught */
2471
2472         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2473         {
2474             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2475         }
2476     }
2477 }
2478
2479 /*****************************************************************************
2480  *
2481  *  Push catch arg onto the stack.
2482  *  If there are jumps to the beginning of the handler, insert basic block
2483  *  and spill catch arg to a temp. Update the handler block if necessary.
2484  *
2485  *  Returns the basic block of the actual handler.
2486  */
2487
2488 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2489 {
2490     // Do not inject the basic block twice on reimport. This should be
2491     // hit only under JIT stress. See if the block is the one we injected.
2492     // Note that EH canonicalization can inject internal blocks here. We might
2493     // be able to re-use such a block (but we don't, right now).
2494     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2495         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2496     {
2497         GenTree* tree = hndBlk->bbTreeList;
2498
2499         if (tree != nullptr && tree->gtOper == GT_STMT)
2500         {
2501             tree = tree->gtStmt.gtStmtExpr;
2502             assert(tree != nullptr);
2503
2504             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2505                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2506             {
2507                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2508
2509                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2510
2511                 return hndBlk->bbNext;
2512             }
2513         }
2514
2515         // If we get here, it must have been some other kind of internal block. It's possible that
2516         // someone prepended something to our injected block, but that's unlikely.
2517     }
2518
2519     /* Push the exception address value on the stack */
2520     GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2521
2522     /* Mark the node as having a side-effect - i.e. cannot be
2523      * moved around since it is tied to a fixed location (EAX) */
2524     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2525
2526 #if defined(JIT32_GCENCODER)
2527     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2528 #else
2529     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2530 #endif // defined(JIT32_GCENCODER)
2531
2532     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2533     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2534     {
2535         if (hndBlk->bbRefs == 1)
2536         {
2537             hndBlk->bbRefs++;
2538         }
2539
2540         /* Create extra basic block for the spill */
2541         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2542         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2543         newBlk->setBBWeight(hndBlk->bbWeight);
2544         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2545
2546         /* Account for the new link we are about to create */
2547         hndBlk->bbRefs++;
2548
2549         /* Spill into a temp */
2550         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2551         lvaTable[tempNum].lvType = TYP_REF;
2552         arg                      = gtNewTempAssign(tempNum, arg);
2553
2554         hndBlk->bbStkTempsIn = tempNum;
2555
2556         /* Report the debug info. impImportBlockCode won't treat
2557          * the actual handler as exception block and thus won't do it for us. */
2558         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2559         {
2560             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2561             arg            = gtNewStmt(arg, impCurStmtOffs);
2562         }
2563
2564         fgInsertStmtAtEnd(newBlk, arg);
2565
2566         arg = gtNewLclvNode(tempNum, TYP_REF);
2567     }
2568
2569     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2570
2571     return hndBlk;
2572 }
2573
2574 /*****************************************************************************
2575  *
2576  *  Given a tree, clone it. *pClone is set to the cloned tree.
2577  *  Returns the original tree if the cloning was easy,
2578  *   else returns the temp to which the tree had to be spilled to.
2579  *  If the tree has side-effects, it will be spilled to a temp.
2580  */
2581
2582 GenTree* Compiler::impCloneExpr(GenTree*             tree,
2583                                 GenTree**            pClone,
2584                                 CORINFO_CLASS_HANDLE structHnd,
2585                                 unsigned             curLevel,
2586                                 GenTree** pAfterStmt DEBUGARG(const char* reason))
2587 {
2588     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2589     {
2590         GenTree* clone = gtClone(tree, true);
2591
2592         if (clone)
2593         {
2594             *pClone = clone;
2595             return tree;
2596         }
2597     }
2598
2599     /* Store the operand in a temp and return the temp */
2600
2601     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2602
2603     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2604     // return a struct type. It also may modify the struct type to a more
2605     // specialized type (e.g. a SIMD type).  So we will get the type from
2606     // the lclVar AFTER calling impAssignTempGen().
2607
2608     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2609     var_types type = genActualType(lvaTable[temp].TypeGet());
2610
2611     *pClone = gtNewLclvNode(temp, type);
2612     return gtNewLclvNode(temp, type);
2613 }
2614
2615 /*****************************************************************************
2616  * Remember the IL offset (including stack-empty info) for the trees we will
2617  * generate now.
2618  */
2619
2620 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2621 {
2622     if (compIsForInlining())
2623     {
2624         GenTree* callStmt = impInlineInfo->iciStmt;
2625         assert(callStmt->gtOper == GT_STMT);
2626         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2627     }
2628     else
2629     {
2630         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2631         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2632         impCurStmtOffs    = offs | stkBit;
2633     }
2634 }
2635
2636 /*****************************************************************************
2637  * Returns current IL offset with stack-empty and call-instruction info incorporated
2638  */
2639 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2640 {
2641     if (compIsForInlining())
2642     {
2643         return BAD_IL_OFFSET;
2644     }
2645     else
2646     {
2647         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2648         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2649         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2650         return offs | stkBit | callInstructionBit;
2651     }
2652 }
2653
2654 //------------------------------------------------------------------------
2655 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2656 //
2657 // Arguments:
2658 //    prevOpcode - last importer opcode
2659 //
2660 // Return Value:
2661 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2662 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2663 {
2664     // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2665     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2666     return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2667 }
2668
2669 /*****************************************************************************
2670  *
2671  *  Remember the instr offset for the statements
2672  *
2673  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2674  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2675  *  as some of the trees corresponding to code up to impCurOpcOffs might
2676  *  still be sitting on the stack.
2677  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2678  *  This should be called when an opcode finally/explicitly causes
2679  *  impAppendTree(tree) to be called (as opposed to being called because of
2680  *  a spill caused by the opcode)
2681  */
2682
2683 #ifdef DEBUG
2684
2685 void Compiler::impNoteLastILoffs()
2686 {
2687     if (impLastILoffsStmt == nullptr)
2688     {
2689         // We should have added a statement for the current basic block
2690         // Is this assert correct ?
2691
2692         assert(impTreeLast);
2693         assert(impTreeLast->gtOper == GT_STMT);
2694
2695         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2696     }
2697     else
2698     {
2699         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2700         impLastILoffsStmt                          = nullptr;
2701     }
2702 }
2703
2704 #endif // DEBUG
2705
2706 /*****************************************************************************
2707  * We don't create any GenTree (excluding spills) for a branch.
2708  * For debugging info, we need a placeholder so that we can note
2709  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2710  */
2711
2712 void Compiler::impNoteBranchOffs()
2713 {
2714     if (opts.compDbgCode)
2715     {
2716         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2717     }
2718 }
2719
2720 /*****************************************************************************
2721  * Locate the next stmt boundary for which we need to record info.
2722  * We will have to spill the stack at such boundaries if it is not
2723  * already empty.
2724  * Returns the next stmt boundary (after the start of the block)
2725  */
2726
2727 unsigned Compiler::impInitBlockLineInfo()
2728 {
2729     /* Assume the block does not correspond with any IL offset. This prevents
2730        us from reporting extra offsets. Extra mappings can cause confusing
2731        stepping, especially if the extra mapping is a jump-target, and the
2732        debugger does not ignore extra mappings, but instead rewinds to the
2733        nearest known offset */
2734
2735     impCurStmtOffsSet(BAD_IL_OFFSET);
2736
2737     if (compIsForInlining())
2738     {
2739         return ~0;
2740     }
2741
2742     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2743
2744     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2745     {
2746         impCurStmtOffsSet(blockOffs);
2747     }
2748
2749     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2750     {
2751         impCurStmtOffsSet(blockOffs);
2752     }
2753
2754     /* Always report IL offset 0 or some tests get confused.
2755        Probably a good idea anyways */
2756
2757     if (blockOffs == 0)
2758     {
2759         impCurStmtOffsSet(blockOffs);
2760     }
2761
2762     if (!info.compStmtOffsetsCount)
2763     {
2764         return ~0;
2765     }
2766
2767     /* Find the lowest explicit stmt boundary within the block */
2768
2769     /* Start looking at an entry that is based on our instr offset */
2770
2771     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2772
2773     if (index >= info.compStmtOffsetsCount)
2774     {
2775         index = info.compStmtOffsetsCount - 1;
2776     }
2777
2778     /* If we've guessed too far, back up */
2779
2780     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2781     {
2782         index--;
2783     }
2784
2785     /* If we guessed short, advance ahead */
2786
2787     while (info.compStmtOffsets[index] < blockOffs)
2788     {
2789         index++;
2790
2791         if (index == info.compStmtOffsetsCount)
2792         {
2793             return info.compStmtOffsetsCount;
2794         }
2795     }
2796
2797     assert(index < info.compStmtOffsetsCount);
2798
2799     if (info.compStmtOffsets[index] == blockOffs)
2800     {
2801         /* There is an explicit boundary for the start of this basic block.
2802            So we will start with bbCodeOffs. Else we will wait until we
2803            get to the next explicit boundary */
2804
2805         impCurStmtOffsSet(blockOffs);
2806
2807         index++;
2808     }
2809
2810     return index;
2811 }
2812
2813 /*****************************************************************************/
2814
2815 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2816 {
2817     switch (opcode)
2818     {
2819         case CEE_CALL:
2820         case CEE_CALLI:
2821         case CEE_CALLVIRT:
2822             return true;
2823
2824         default:
2825             return false;
2826     }
2827 }
2828
2829 /*****************************************************************************/
2830
2831 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2832 {
2833     switch (opcode)
2834     {
2835         case CEE_CALL:
2836         case CEE_CALLI:
2837         case CEE_CALLVIRT:
2838         case CEE_JMP:
2839         case CEE_NEWOBJ:
2840         case CEE_NEWARR:
2841             return true;
2842
2843         default:
2844             return false;
2845     }
2846 }
2847
2848 /*****************************************************************************/
2849
2850 // One might think it is worth caching these values, but results indicate
2851 // that it isn't.
2852 // In addition, caching them causes SuperPMI to be unable to completely
2853 // encapsulate an individual method context.
2854 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2855 {
2856     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2857     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2858     return refAnyClass;
2859 }
2860
2861 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2862 {
2863     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2864     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2865     return typeHandleClass;
2866 }
2867
2868 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2869 {
2870     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2871     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2872     return argIteratorClass;
2873 }
2874
2875 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2876 {
2877     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2878     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2879     return stringClass;
2880 }
2881
2882 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2883 {
2884     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2885     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2886     return objectClass;
2887 }
2888
2889 /*****************************************************************************
2890  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2891  *  set its type to TYP_BYREF when we create it. We know if it can be
2892  *  changed to TYP_I_IMPL only at the point where we use it
2893  */
2894
2895 /* static */
2896 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2897 {
2898     if (tree1->IsVarAddr())
2899     {
2900         tree1->gtType = TYP_I_IMPL;
2901     }
2902
2903     if (tree2 && tree2->IsVarAddr())
2904     {
2905         tree2->gtType = TYP_I_IMPL;
2906     }
2907 }
2908
2909 /*****************************************************************************
2910  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2911  *  to make that an explicit cast in our trees, so any implicit casts that
2912  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2913  *  turned into explicit casts here.
2914  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2915  */
2916
2917 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2918 {
2919     var_types currType   = genActualType(tree->gtType);
2920     var_types wantedType = genActualType(dstTyp);
2921
2922     if (wantedType != currType)
2923     {
2924         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2925         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2926         {
2927             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2928             {
2929                 tree->gtType = TYP_I_IMPL;
2930             }
2931         }
2932 #ifdef _TARGET_64BIT_
2933         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2934         {
2935             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2936             tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2937         }
2938         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2939         {
2940             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2941             tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2942         }
2943 #endif // _TARGET_64BIT_
2944     }
2945
2946     return tree;
2947 }
2948
2949 /*****************************************************************************
2950  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2951  *  but we want to make that an explicit cast in our trees, so any implicit casts
2952  *  that exist in the IL are turned into explicit casts here.
2953  */
2954
2955 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2956 {
2957     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2958     {
2959         tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2960     }
2961
2962     return tree;
2963 }
2964
2965 //------------------------------------------------------------------------
2966 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2967 //    with a GT_COPYBLK node.
2968 //
2969 // Arguments:
2970 //    sig - The InitializeArray signature.
2971 //
2972 // Return Value:
2973 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2974 //    nullptr otherwise.
2975 //
2976 // Notes:
2977 //    The function recognizes the following IL pattern:
2978 //      ldc <length> or a list of ldc <lower bound>/<length>
2979 //      newarr or newobj
2980 //      dup
2981 //      ldtoken <field handle>
2982 //      call InitializeArray
2983 //    The lower bounds need not be constant except when the array rank is 1.
2984 //    The function recognizes all kinds of arrays thus enabling a small runtime
2985 //    such as CoreRT to skip providing an implementation for InitializeArray.
2986
2987 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2988 {
2989     assert(sig->numArgs == 2);
2990
2991     GenTree* fieldTokenNode = impStackTop(0).val;
2992     GenTree* arrayLocalNode = impStackTop(1).val;
2993
2994     //
2995     // Verify that the field token is known and valid.  Note that It's also
2996     // possible for the token to come from reflection, in which case we cannot do
2997     // the optimization and must therefore revert to calling the helper.  You can
2998     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2999     //
3000
3001     // Check to see if the ldtoken helper call is what we see here.
3002     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
3003         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
3004     {
3005         return nullptr;
3006     }
3007
3008     // Strip helper call away
3009     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
3010
3011     if (fieldTokenNode->gtOper == GT_IND)
3012     {
3013         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
3014     }
3015
3016     // Check for constant
3017     if (fieldTokenNode->gtOper != GT_CNS_INT)
3018     {
3019         return nullptr;
3020     }
3021
3022     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
3023     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
3024     {
3025         return nullptr;
3026     }
3027
3028     //
3029     // We need to get the number of elements in the array and the size of each element.
3030     // We verify that the newarr statement is exactly what we expect it to be.
3031     // If it's not then we just return NULL and we don't optimize this call
3032     //
3033
3034     //
3035     // It is possible the we don't have any statements in the block yet
3036     //
3037     if (impTreeLast->gtOper != GT_STMT)
3038     {
3039         assert(impTreeLast->gtOper == GT_BEG_STMTS);
3040         return nullptr;
3041     }
3042
3043     //
3044     // We start by looking at the last statement, making sure it's an assignment, and
3045     // that the target of the assignment is the array passed to InitializeArray.
3046     //
3047     GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3048     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3049         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3050         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3051     {
3052         return nullptr;
3053     }
3054
3055     //
3056     // Make sure that the object being assigned is a helper call.
3057     //
3058
3059     GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3060     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3061     {
3062         return nullptr;
3063     }
3064
3065     //
3066     // Verify that it is one of the new array helpers.
3067     //
3068
3069     bool isMDArray = false;
3070
3071     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3072         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3073         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3074         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3075 #ifdef FEATURE_READYTORUN_COMPILER
3076         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3077         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3078 #endif
3079             )
3080     {
3081         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3082         {
3083             return nullptr;
3084         }
3085
3086         isMDArray = true;
3087     }
3088
3089     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3090
3091     //
3092     // Make sure we found a compile time handle to the array
3093     //
3094
3095     if (!arrayClsHnd)
3096     {
3097         return nullptr;
3098     }
3099
3100     unsigned rank = 0;
3101     S_UINT32 numElements;
3102
3103     if (isMDArray)
3104     {
3105         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3106
3107         if (rank == 0)
3108         {
3109             return nullptr;
3110         }
3111
3112         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3113         assert(tokenArg != nullptr);
3114         GenTreeArgList* numArgsArg = tokenArg->Rest();
3115         assert(numArgsArg != nullptr);
3116         GenTreeArgList* argsArg = numArgsArg->Rest();
3117         assert(argsArg != nullptr);
3118
3119         //
3120         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3121         // so at least one length must be present and the rank can't exceed 32 so there can
3122         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3123         //
3124
3125         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3126             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3127         {
3128             return nullptr;
3129         }
3130
3131         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3132         bool     lowerBoundsSpecified;
3133
3134         if (numArgs == rank * 2)
3135         {
3136             lowerBoundsSpecified = true;
3137         }
3138         else if (numArgs == rank)
3139         {
3140             lowerBoundsSpecified = false;
3141
3142             //
3143             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3144             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3145             // we get a SDArray as well, see the for loop below.
3146             //
3147
3148             if (rank == 1)
3149             {
3150                 isMDArray = false;
3151             }
3152         }
3153         else
3154         {
3155             return nullptr;
3156         }
3157
3158         //
3159         // The rank is known to be at least 1 so we can start with numElements being 1
3160         // to avoid the need to special case the first dimension.
3161         //
3162
3163         numElements = S_UINT32(1);
3164
3165         struct Match
3166         {
3167             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3168             {
3169                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3170                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3171             }
3172
3173             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3174             {
3175                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3176                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3177                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3178             }
3179
3180             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3181             {
3182                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3183                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3184             }
3185
3186             static bool IsComma(GenTree* tree)
3187             {
3188                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3189             }
3190         };
3191
3192         unsigned argIndex = 0;
3193         GenTree* comma;
3194
3195         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3196         {
3197             if (lowerBoundsSpecified)
3198             {
3199                 //
3200                 // In general lower bounds can be ignored because they're not needed to
3201                 // calculate the total number of elements. But for single dimensional arrays
3202                 // we need to know if the lower bound is 0 because in this case the runtime
3203                 // creates a SDArray and this affects the way the array data offset is calculated.
3204                 //
3205
3206                 if (rank == 1)
3207                 {
3208                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3209                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3210                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3211
3212                     if (lowerBoundNode->IsIntegralConst(0))
3213                     {
3214                         isMDArray = false;
3215                     }
3216                 }
3217
3218                 comma = comma->gtGetOp2();
3219                 argIndex++;
3220             }
3221
3222             GenTree* lengthNodeAssign = comma->gtGetOp1();
3223             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3224             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3225
3226             if (!lengthNode->IsCnsIntOrI())
3227             {
3228                 return nullptr;
3229             }
3230
3231             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3232             argIndex++;
3233         }
3234
3235         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3236
3237         if (argIndex != numArgs)
3238         {
3239             return nullptr;
3240         }
3241     }
3242     else
3243     {
3244         //
3245         // Make sure there are exactly two arguments:  the array class and
3246         // the number of elements.
3247         //
3248
3249         GenTree* arrayLengthNode;
3250
3251         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3252 #ifdef FEATURE_READYTORUN_COMPILER
3253         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3254         {
3255             // Array length is 1st argument for readytorun helper
3256             arrayLengthNode = args->Current();
3257         }
3258         else
3259 #endif
3260         {
3261             // Array length is 2nd argument for regular helper
3262             arrayLengthNode = args->Rest()->Current();
3263         }
3264
3265         //
3266         // Make sure that the number of elements look valid.
3267         //
3268         if (arrayLengthNode->gtOper != GT_CNS_INT)
3269         {
3270             return nullptr;
3271         }
3272
3273         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3274
3275         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3276         {
3277             return nullptr;
3278         }
3279     }
3280
3281     CORINFO_CLASS_HANDLE elemClsHnd;
3282     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3283
3284     //
3285     // Note that genTypeSize will return zero for non primitive types, which is exactly
3286     // what we want (size will then be 0, and we will catch this in the conditional below).
3287     // Note that we don't expect this to fail for valid binaries, so we assert in the
3288     // non-verification case (the verification case should not assert but rather correctly
3289     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3290     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3291     // why.
3292     //
3293
3294     S_UINT32 elemSize(genTypeSize(elementType));
3295     S_UINT32 size = elemSize * S_UINT32(numElements);
3296
3297     if (size.IsOverflow())
3298     {
3299         return nullptr;
3300     }
3301
3302     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3303     {
3304         assert(verNeedsVerification());
3305         return nullptr;
3306     }
3307
3308     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3309     if (!initData)
3310     {
3311         return nullptr;
3312     }
3313
3314     //
3315     // At this point we are ready to commit to implementing the InitializeArray
3316     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3317     // return the struct assignment node.
3318     //
3319
3320     impPopStack();
3321     impPopStack();
3322
3323     const unsigned blkSize = size.Value();
3324     unsigned       dataOffset;
3325
3326     if (isMDArray)
3327     {
3328         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3329     }
3330     else
3331     {
3332         dataOffset = eeGetArrayDataOffset(elementType);
3333     }
3334
3335     GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3336     GenTree* blk = gtNewBlockVal(dst, blkSize);
3337     GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3338
3339     return gtNewBlkOpNode(blk,     // dst
3340                           src,     // src
3341                           blkSize, // size
3342                           false,   // volatil
3343                           true);   // copyBlock
3344 }
3345
3346 //------------------------------------------------------------------------
3347 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3348 //
3349 // Arguments:
3350 //    newobjThis - for constructor calls, the tree for the newly allocated object
3351 //    clsHnd - handle for the intrinsic method's class
3352 //    method - handle for the intrinsic method
3353 //    sig    - signature of the intrinsic method
3354 //    methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3355 //    memberRef - the token for the intrinsic method
3356 //    readonlyCall - true if call has a readonly prefix
3357 //    tailCall - true if call is in tail position
3358 //    pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3359 //       if call is not constrained
3360 //    constraintCallThisTransform -- this transform to apply for a constrained call
3361 //    pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3362 //       for "traditional" jit intrinsics
3363 //    isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3364 //       that is amenable to special downstream optimization opportunities
3365 //
3366 // Returns:
3367 //    IR tree to use in place of the call, or nullptr if the jit should treat
3368 //    the intrinsic call like a normal call.
3369 //
3370 //    pIntrinsicID set to non-illegal value if the call is recognized as a
3371 //    traditional jit intrinsic, even if the intrinsic is not expaned.
3372 //
3373 //    isSpecial set true if the expansion is subject to special
3374 //    optimizations later in the jit processing
3375 //
3376 // Notes:
3377 //    On success the IR tree may be a call to a different method or an inline
3378 //    sequence. If it is a call, then the intrinsic processing here is responsible
3379 //    for handling all the special cases, as upon return to impImportCall
3380 //    expanded intrinsics bypass most of the normal call processing.
3381 //
3382 //    Intrinsics are generally not recognized in minopts and debug codegen.
3383 //
3384 //    However, certain traditional intrinsics are identifed as "must expand"
3385 //    if there is no fallback implmentation to invoke; these must be handled
3386 //    in all codegen modes.
3387 //
3388 //    New style intrinsics (where the fallback implementation is in IL) are
3389 //    identified as "must expand" if they are invoked from within their
3390 //    own method bodies.
3391 //
3392
3393 GenTree* Compiler::impIntrinsic(GenTree*                newobjThis,
3394                                 CORINFO_CLASS_HANDLE    clsHnd,
3395                                 CORINFO_METHOD_HANDLE   method,
3396                                 CORINFO_SIG_INFO*       sig,
3397                                 unsigned                methodFlags,
3398                                 int                     memberRef,
3399                                 bool                    readonlyCall,
3400                                 bool                    tailCall,
3401                                 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3402                                 CORINFO_THIS_TRANSFORM  constraintCallThisTransform,
3403                                 CorInfoIntrinsics*      pIntrinsicID,
3404                                 bool*                   isSpecialIntrinsic)
3405 {
3406     assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3407
3408     bool              mustExpand  = false;
3409     bool              isSpecial   = false;
3410     CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3411     NamedIntrinsic    ni          = NI_Illegal;
3412
3413     if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3414     {
3415         intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3416     }
3417
3418     if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3419     {
3420         // The recursive calls to Jit intrinsics are must-expand by convention.
3421         mustExpand = mustExpand || gtIsRecursiveCall(method);
3422
3423         if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3424         {
3425             ni = lookupNamedIntrinsic(method);
3426
3427 #ifdef FEATURE_HW_INTRINSICS
3428             switch (ni)
3429             {
3430 #if defined(_TARGET_ARM64_)
3431                 case NI_Base_Vector64_AsByte:
3432                 case NI_Base_Vector64_AsInt16:
3433                 case NI_Base_Vector64_AsInt32:
3434                 case NI_Base_Vector64_AsSByte:
3435                 case NI_Base_Vector64_AsSingle:
3436                 case NI_Base_Vector64_AsUInt16:
3437                 case NI_Base_Vector64_AsUInt32:
3438 #endif // _TARGET_ARM64_
3439                 case NI_Base_Vector128_As:
3440                 case NI_Base_Vector128_AsByte:
3441                 case NI_Base_Vector128_AsDouble:
3442                 case NI_Base_Vector128_AsInt16:
3443                 case NI_Base_Vector128_AsInt32:
3444                 case NI_Base_Vector128_AsInt64:
3445                 case NI_Base_Vector128_AsSByte:
3446                 case NI_Base_Vector128_AsSingle:
3447                 case NI_Base_Vector128_AsUInt16:
3448                 case NI_Base_Vector128_AsUInt32:
3449                 case NI_Base_Vector128_AsUInt64:
3450 #if defined(_TARGET_XARCH_)
3451                 case NI_Base_Vector128_Zero:
3452                 case NI_Base_Vector256_As:
3453                 case NI_Base_Vector256_AsByte:
3454                 case NI_Base_Vector256_AsDouble:
3455                 case NI_Base_Vector256_AsInt16:
3456                 case NI_Base_Vector256_AsInt32:
3457                 case NI_Base_Vector256_AsInt64:
3458                 case NI_Base_Vector256_AsSByte:
3459                 case NI_Base_Vector256_AsSingle:
3460                 case NI_Base_Vector256_AsUInt16:
3461                 case NI_Base_Vector256_AsUInt32:
3462                 case NI_Base_Vector256_AsUInt64:
3463                 case NI_Base_Vector256_Zero:
3464 #endif // _TARGET_XARCH_
3465                 {
3466                     return impBaseIntrinsic(ni, method, sig);
3467                 }
3468
3469                 default:
3470                 {
3471                     break;
3472                 }
3473             }
3474
3475             if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
3476             {
3477                 GenTree* hwintrinsic = impHWIntrinsic(ni, method, sig, mustExpand);
3478
3479                 if (mustExpand && (hwintrinsic == nullptr))
3480                 {
3481                     return impUnsupportedHWIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
3482                 }
3483
3484                 return hwintrinsic;
3485             }
3486 #endif // FEATURE_HW_INTRINSICS
3487         }
3488     }
3489
3490     *pIntrinsicID = intrinsicID;
3491
3492 #ifndef _TARGET_ARM_
3493     genTreeOps interlockedOperator;
3494 #endif
3495
3496     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3497     {
3498         // must be done regardless of DbgCode and MinOpts
3499         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3500     }
3501 #ifdef _TARGET_64BIT_
3502     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3503     {
3504         // must be done regardless of DbgCode and MinOpts
3505         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3506     }
3507 #else
3508     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3509 #endif
3510
3511     GenTree* retNode = nullptr;
3512
3513     // Under debug and minopts, only expand what is required.
3514     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3515     {
3516         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3517         return retNode;
3518     }
3519
3520     var_types callType = JITtype2varType(sig->retType);
3521
3522     /* First do the intrinsics which are always smaller than a call */
3523
3524     switch (intrinsicID)
3525     {
3526         GenTree* op1;
3527         GenTree* op2;
3528
3529         case CORINFO_INTRINSIC_Sin:
3530         case CORINFO_INTRINSIC_Cbrt:
3531         case CORINFO_INTRINSIC_Sqrt:
3532         case CORINFO_INTRINSIC_Abs:
3533         case CORINFO_INTRINSIC_Cos:
3534         case CORINFO_INTRINSIC_Round:
3535         case CORINFO_INTRINSIC_Cosh:
3536         case CORINFO_INTRINSIC_Sinh:
3537         case CORINFO_INTRINSIC_Tan:
3538         case CORINFO_INTRINSIC_Tanh:
3539         case CORINFO_INTRINSIC_Asin:
3540         case CORINFO_INTRINSIC_Asinh:
3541         case CORINFO_INTRINSIC_Acos:
3542         case CORINFO_INTRINSIC_Acosh:
3543         case CORINFO_INTRINSIC_Atan:
3544         case CORINFO_INTRINSIC_Atan2:
3545         case CORINFO_INTRINSIC_Atanh:
3546         case CORINFO_INTRINSIC_Log10:
3547         case CORINFO_INTRINSIC_Pow:
3548         case CORINFO_INTRINSIC_Exp:
3549         case CORINFO_INTRINSIC_Ceiling:
3550         case CORINFO_INTRINSIC_Floor:
3551             retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3552             break;
3553
3554 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3555         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3556
3557         // Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
3558         // Anyway, we can import them as XADD and leave it to lowering/codegen to perform
3559         // whatever optimizations may arise from the fact that result value is not used.
3560         case CORINFO_INTRINSIC_InterlockedAdd32:
3561         case CORINFO_INTRINSIC_InterlockedXAdd32:
3562             interlockedOperator = GT_XADD;
3563             goto InterlockedBinOpCommon;
3564         case CORINFO_INTRINSIC_InterlockedXchg32:
3565             interlockedOperator = GT_XCHG;
3566             goto InterlockedBinOpCommon;
3567
3568 #ifdef _TARGET_64BIT_
3569         case CORINFO_INTRINSIC_InterlockedAdd64:
3570         case CORINFO_INTRINSIC_InterlockedXAdd64:
3571             interlockedOperator = GT_XADD;
3572             goto InterlockedBinOpCommon;
3573         case CORINFO_INTRINSIC_InterlockedXchg64:
3574             interlockedOperator = GT_XCHG;
3575             goto InterlockedBinOpCommon;
3576 #endif // _TARGET_AMD64_
3577
3578         InterlockedBinOpCommon:
3579             assert(callType != TYP_STRUCT);
3580             assert(sig->numArgs == 2);
3581
3582             op2 = impPopStack().val;
3583             op1 = impPopStack().val;
3584
3585             // This creates:
3586             //   val
3587             // XAdd
3588             //   addr
3589             //     field (for example)
3590             //
3591             // In the case where the first argument is the address of a local, we might
3592             // want to make this *not* make the var address-taken -- but atomic instructions
3593             // on a local are probably pretty useless anyway, so we probably don't care.
3594
3595             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3596             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3597             retNode = op1;
3598             break;
3599 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3600
3601         case CORINFO_INTRINSIC_MemoryBarrier:
3602
3603             assert(sig->numArgs == 0);
3604
3605             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3606             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3607             retNode = op1;
3608             break;
3609
3610 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3611         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3612         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3613 #ifdef _TARGET_64BIT_
3614         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3615 #endif
3616         {
3617             assert(callType != TYP_STRUCT);
3618             assert(sig->numArgs == 3);
3619             GenTree* op3;
3620
3621             op3 = impPopStack().val; // comparand
3622             op2 = impPopStack().val; // value
3623             op1 = impPopStack().val; // location
3624
3625             GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3626
3627             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3628             retNode = node;
3629             break;
3630         }
3631 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3632
3633         case CORINFO_INTRINSIC_StringLength:
3634             op1 = impPopStack().val;
3635             if (!opts.MinOpts() && !opts.compDbgCode)
3636             {
3637                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
3638                 op1                   = arrLen;
3639             }
3640             else
3641             {
3642                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3643                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3644                                     gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
3645                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3646             }
3647
3648             // Getting the length of a null string should throw
3649             op1->gtFlags |= GTF_EXCEPT;
3650
3651             retNode = op1;
3652             break;
3653
3654         case CORINFO_INTRINSIC_StringGetChar:
3655             op2 = impPopStack().val;
3656             op1 = impPopStack().val;
3657             op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3658             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3659             retNode = op1;
3660             break;
3661
3662         case CORINFO_INTRINSIC_InitializeArray:
3663             retNode = impInitializeArrayIntrinsic(sig);
3664             break;
3665
3666         case CORINFO_INTRINSIC_Array_Address:
3667         case CORINFO_INTRINSIC_Array_Get:
3668         case CORINFO_INTRINSIC_Array_Set:
3669             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3670             break;
3671
3672         case CORINFO_INTRINSIC_GetTypeFromHandle:
3673             op1 = impStackTop(0).val;
3674             CorInfoHelpFunc typeHandleHelper;
3675             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3676                 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
3677             {
3678                 op1 = impPopStack().val;
3679                 // Replace helper with a more specialized helper that returns RuntimeType
3680                 if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
3681                 {
3682                     typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
3683                 }
3684                 else
3685                 {
3686                     assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
3687                     typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
3688                 }
3689                 assert(op1->gtCall.gtCallArgs->gtOp.gtOp2 == nullptr);
3690                 op1         = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->gtCall.gtCallArgs);
3691                 op1->gtType = TYP_REF;
3692                 retNode     = op1;
3693             }
3694             // Call the regular function.
3695             break;
3696
3697         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3698             op1 = impStackTop(0).val;
3699             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3700                 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
3701             {
3702                 // Old tree
3703                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3704                 //
3705                 // New tree
3706                 // TreeToGetNativeTypeHandle
3707
3708                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3709                 // to that helper.
3710
3711                 op1 = impPopStack().val;
3712
3713                 // Get native TypeHandle argument to old helper
3714                 op1 = op1->gtCall.gtCallArgs;
3715                 assert(op1->OperIsList());
3716                 assert(op1->gtOp.gtOp2 == nullptr);
3717                 op1     = op1->gtOp.gtOp1;
3718                 retNode = op1;
3719             }
3720             // Call the regular function.
3721             break;
3722
3723         case CORINFO_INTRINSIC_Object_GetType:
3724         {
3725             JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3726             op1 = impStackTop(0).val;
3727
3728             // If we're calling GetType on a boxed value, just get the type directly.
3729             if (op1->IsBoxedValue())
3730             {
3731                 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3732
3733                 // Try and clean up the box. Obtain the handle we
3734                 // were going to pass to the newobj.
3735                 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3736
3737                 if (boxTypeHandle != nullptr)
3738                 {
3739                     // Note we don't need to play the TYP_STRUCT games here like
3740                     // do for LDTOKEN since the return value of this operator is Type,
3741                     // not RuntimeTypeHandle.
3742                     impPopStack();
3743                     GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3744                     GenTree*        runtimeType =
3745                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3746                     retNode = runtimeType;
3747                 }
3748             }
3749
3750             // If we have a constrained callvirt with a "box this" transform
3751             // we know we have a value class and hence an exact type.
3752             //
3753             // If so, instead of boxing and then extracting the type, just
3754             // construct the type directly.
3755             if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3756                 (constraintCallThisTransform == CORINFO_BOX_THIS))
3757             {
3758                 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3759                 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3760                 const bool            isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3761
3762                 if (isSafeToOptimize)
3763                 {
3764                     JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3765                     impPopStack();
3766                     GenTree* typeHandleOp =
3767                         impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3768                     if (typeHandleOp == nullptr)
3769                     {
3770                         assert(compDonotInline());
3771                         return nullptr;
3772                     }
3773                     GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3774                     GenTree*        runtimeType =
3775                         gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3776                     retNode = runtimeType;
3777                 }
3778             }
3779
3780 #ifdef DEBUG
3781             if (retNode != nullptr)
3782             {
3783                 JITDUMP("Optimized result for call to GetType is\n");
3784                 if (verbose)
3785                 {
3786                     gtDispTree(retNode);
3787                 }
3788             }
3789 #endif
3790
3791             // Else expand as an intrinsic, unless the call is constrained,
3792             // in which case we defer expansion to allow impImportCall do the
3793             // special constraint processing.
3794             if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3795             {
3796                 JITDUMP("Expanding as special intrinsic\n");
3797                 impPopStack();
3798                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3799
3800                 // Set the CALL flag to indicate that the operator is implemented by a call.
3801                 // Set also the EXCEPTION flag because the native implementation of
3802                 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3803                 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3804                 retNode = op1;
3805                 // Might be further optimizable, so arrange to leave a mark behind
3806                 isSpecial = true;
3807             }
3808
3809             if (retNode == nullptr)
3810             {
3811                 JITDUMP("Leaving as normal call\n");
3812                 // Might be further optimizable, so arrange to leave a mark behind
3813                 isSpecial = true;
3814             }
3815
3816             break;
3817         }
3818
3819         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3820         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3821         // substitution.  The parameter byref will be assigned into the newly allocated object.
3822         case CORINFO_INTRINSIC_ByReference_Ctor:
3823         {
3824             // Remove call to constructor and directly assign the byref passed
3825             // to the call to the first slot of the ByReference struct.
3826             op1                                    = impPopStack().val;
3827             GenTree*             thisptr           = newobjThis;
3828             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3829             GenTree*             field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
3830             GenTree*             assign            = gtNewAssignNode(field, op1);
3831             GenTree*             byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3832             assert(byReferenceStruct != nullptr);
3833             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3834             retNode = assign;
3835             break;
3836         }
3837         // Implement ptr value getter for ByReference struct.
3838         case CORINFO_INTRINSIC_ByReference_Value:
3839         {
3840             op1                         = impPopStack().val;
3841             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3842             GenTree*             field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
3843             retNode                     = field;
3844             break;
3845         }
3846         case CORINFO_INTRINSIC_Span_GetItem:
3847         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3848         {
3849             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3850             //
3851             // For Span<T>
3852             //   Comma
3853             //     BoundsCheck(index, s->_length)
3854             //     s->_pointer + index * sizeof(T)
3855             //
3856             // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3857             //
3858             // Signature should show one class type parameter, which
3859             // we need to examine.
3860             assert(sig->sigInst.classInstCount == 1);
3861             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3862             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3863             assert(elemSize > 0);
3864
3865             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3866
3867             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3868                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3869
3870             GenTree* index          = impPopStack().val;
3871             GenTree* ptrToSpan      = impPopStack().val;
3872             GenTree* indexClone     = nullptr;
3873             GenTree* ptrToSpanClone = nullptr;
3874             assert(varTypeIsIntegral(index));
3875             assert(ptrToSpan->TypeGet() == TYP_BYREF);
3876
3877 #if defined(DEBUG)
3878             if (verbose)
3879             {
3880                 printf("with ptr-to-span\n");
3881                 gtDispTree(ptrToSpan);
3882                 printf("and index\n");
3883                 gtDispTree(index);
3884             }
3885 #endif // defined(DEBUG)
3886
3887             // We need to use both index and ptr-to-span twice, so clone or spill.
3888             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3889                                  nullptr DEBUGARG("Span.get_Item index"));
3890             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3891                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3892
3893             // Bounds check
3894             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3895             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3896             GenTree*             length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
3897             GenTree*             boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3898                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3899
3900             // Element access
3901             GenTree*             indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3902             GenTree*             sizeofNode  = gtNewIconNode(elemSize);
3903             GenTree*             mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3904             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3905             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3906             GenTree*             data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
3907             GenTree*             result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3908
3909             // Prepare result
3910             var_types resultType = JITtype2varType(sig->retType);
3911             assert(resultType == result->TypeGet());
3912             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3913
3914             break;
3915         }
3916
3917         case CORINFO_INTRINSIC_GetRawHandle:
3918         {
3919             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3920             CORINFO_RESOLVED_TOKEN resolvedToken;
3921             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3922             resolvedToken.tokenScope   = info.compScopeHnd;
3923             resolvedToken.token        = memberRef;
3924             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3925
3926             CORINFO_GENERICHANDLE_RESULT embedInfo;
3927             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3928
3929             GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3930                                                  embedInfo.compileTimeHandle);
3931             if (rawHandle == nullptr)
3932             {
3933                 return nullptr;
3934             }
3935
3936             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3937
3938             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3939             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3940
3941             GenTree*  lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3942             GenTree*  lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3943             var_types resultType = JITtype2varType(sig->retType);
3944             retNode              = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3945
3946             break;
3947         }
3948
3949         case CORINFO_INTRINSIC_TypeEQ:
3950         case CORINFO_INTRINSIC_TypeNEQ:
3951         {
3952             JITDUMP("Importing Type.op_*Equality intrinsic\n");
3953             op1              = impStackTop(1).val;
3954             op2              = impStackTop(0).val;
3955             GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3956             if (optTree != nullptr)
3957             {
3958                 // Success, clean up the evaluation stack.
3959                 impPopStack();
3960                 impPopStack();
3961
3962                 // See if we can optimize even further, to a handle compare.
3963                 optTree = gtFoldTypeCompare(optTree);
3964
3965                 // See if we can now fold a handle compare to a constant.
3966                 optTree = gtFoldExpr(optTree);
3967
3968                 retNode = optTree;
3969             }
3970             else
3971             {
3972                 // Retry optimizing these later
3973                 isSpecial = true;
3974             }
3975             break;
3976         }
3977
3978         case CORINFO_INTRINSIC_GetCurrentManagedThread:
3979         case CORINFO_INTRINSIC_GetManagedThreadId:
3980         {
3981             // Retry optimizing these during morph
3982             isSpecial = true;
3983             break;
3984         }
3985
3986         default:
3987             /* Unknown intrinsic */
3988             intrinsicID = CORINFO_INTRINSIC_Illegal;
3989             break;
3990     }
3991
3992     // Look for new-style jit intrinsics by name
3993     if (ni != NI_Illegal)
3994     {
3995         assert(retNode == nullptr);
3996         switch (ni)
3997         {
3998             case NI_System_Enum_HasFlag:
3999             {
4000                 GenTree* thisOp  = impStackTop(1).val;
4001                 GenTree* flagOp  = impStackTop(0).val;
4002                 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
4003
4004                 if (optTree != nullptr)
4005                 {
4006                     // Optimization successful. Pop the stack for real.
4007                     impPopStack();
4008                     impPopStack();
4009                     retNode = optTree;
4010                 }
4011                 else
4012                 {
4013                     // Retry optimizing this during morph.
4014                     isSpecial = true;
4015                 }
4016
4017                 break;
4018             }
4019
4020             case NI_MathF_Round:
4021             case NI_Math_Round:
4022             {
4023                 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
4024                 // to simplify the transition, we will just treat it as if it was still the
4025                 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
4026                 // everywhere else.
4027
4028                 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
4029                 break;
4030             }
4031
4032             case NI_System_Collections_Generic_EqualityComparer_get_Default:
4033             {
4034                 // Flag for later handling during devirtualization.
4035                 isSpecial = true;
4036                 break;
4037             }
4038
4039             case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
4040             {
4041                 assert(sig->numArgs == 1);
4042
4043                 // We expect the return type of the ReverseEndianness routine to match the type of the
4044                 // one and only argument to the method. We use a special instruction for 16-bit
4045                 // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
4046                 // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
4047                 // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
4048
4049                 switch (sig->retType)
4050                 {
4051                     case CorInfoType::CORINFO_TYPE_SHORT:
4052                     case CorInfoType::CORINFO_TYPE_USHORT:
4053                         retNode = gtNewOperNode(GT_BSWAP16, callType, impPopStack().val);
4054                         break;
4055
4056                     case CorInfoType::CORINFO_TYPE_INT:
4057                     case CorInfoType::CORINFO_TYPE_UINT:
4058 #ifdef _TARGET_64BIT_
4059                     case CorInfoType::CORINFO_TYPE_LONG:
4060                     case CorInfoType::CORINFO_TYPE_ULONG:
4061 #endif // _TARGET_64BIT_
4062                         retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
4063                         break;
4064
4065                     default:
4066                         // This default case gets hit on 32-bit archs when a call to a 64-bit overload
4067                         // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
4068                         // method call, where the implementation decomposes the operation into two 32-bit
4069                         // bswap routines. If the input to the 64-bit function is a constant, then we rely
4070                         // on inlining + constant folding of 32-bit bswaps to effectively constant fold
4071                         // the 64-bit call site.
4072                         break;
4073                 }
4074
4075                 break;
4076             }
4077
4078             default:
4079                 break;
4080         }
4081     }
4082
4083     if (mustExpand && (retNode == nullptr))
4084     {
4085         NO_WAY("JIT must expand the intrinsic!");
4086     }
4087
4088     // Optionally report if this intrinsic is special
4089     // (that is, potentially re-optimizable during morph).
4090     if (isSpecialIntrinsic != nullptr)
4091     {
4092         *isSpecialIntrinsic = isSpecial;
4093     }
4094
4095     return retNode;
4096 }
4097
4098 #ifdef FEATURE_HW_INTRINSICS
4099 //------------------------------------------------------------------------
4100 // impBaseIntrinsic: dispatch intrinsics to their own implementation
4101 //
4102 // Arguments:
4103 //    intrinsic  -- id of the intrinsic function.
4104 //    method     -- method handle of the intrinsic function.
4105 //    sig        -- signature of the intrinsic call
4106 //
4107 // Return Value:
4108 //    the expanded intrinsic.
4109 //
4110 GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
4111 {
4112     GenTree* retNode = nullptr;
4113
4114     if (!featureSIMD)
4115     {
4116         return nullptr;
4117     }
4118
4119     unsigned  simdSize = 0;
4120     var_types baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &simdSize);
4121     var_types retType  = getSIMDTypeForSize(simdSize);
4122
4123     if (sig->hasThis())
4124     {
4125         CORINFO_CLASS_HANDLE thisClass = info.compCompHnd->getArgClass(sig, sig->args);
4126         var_types            thisType  = getBaseTypeOfSIMDType(thisClass);
4127
4128         if (!varTypeIsArithmetic(thisType))
4129         {
4130             return nullptr;
4131         }
4132     }
4133
4134     if (!varTypeIsArithmetic(baseType))
4135     {
4136         return nullptr;
4137     }
4138
4139     switch (intrinsic)
4140     {
4141 #if defined(_TARGET_ARM64_)
4142         case NI_Base_Vector64_AsByte:
4143         case NI_Base_Vector64_AsInt16:
4144         case NI_Base_Vector64_AsInt32:
4145         case NI_Base_Vector64_AsSByte:
4146         case NI_Base_Vector64_AsSingle:
4147         case NI_Base_Vector64_AsUInt16:
4148         case NI_Base_Vector64_AsUInt32:
4149 #endif // _TARGET_ARM64_
4150         case NI_Base_Vector128_As:
4151         case NI_Base_Vector128_AsByte:
4152         case NI_Base_Vector128_AsDouble:
4153         case NI_Base_Vector128_AsInt16:
4154         case NI_Base_Vector128_AsInt32:
4155         case NI_Base_Vector128_AsInt64:
4156         case NI_Base_Vector128_AsSByte:
4157         case NI_Base_Vector128_AsSingle:
4158         case NI_Base_Vector128_AsUInt16:
4159         case NI_Base_Vector128_AsUInt32:
4160         case NI_Base_Vector128_AsUInt64:
4161 #if defined(_TARGET_XARCH_)
4162         case NI_Base_Vector256_As:
4163         case NI_Base_Vector256_AsByte:
4164         case NI_Base_Vector256_AsDouble:
4165         case NI_Base_Vector256_AsInt16:
4166         case NI_Base_Vector256_AsInt32:
4167         case NI_Base_Vector256_AsInt64:
4168         case NI_Base_Vector256_AsSByte:
4169         case NI_Base_Vector256_AsSingle:
4170         case NI_Base_Vector256_AsUInt16:
4171         case NI_Base_Vector256_AsUInt32:
4172         case NI_Base_Vector256_AsUInt64:
4173 #endif // _TARGET_XARCH_
4174         {
4175             // We fold away the cast here, as it only exists to satisfy
4176             // the type system. It is safe to do this here since the retNode type
4177             // and the signature return type are both the same TYP_SIMD.
4178
4179             assert(sig->numArgs == 0);
4180             assert(sig->hasThis());
4181
4182             retNode = impSIMDPopStack(retType, true, sig->retTypeClass);
4183             SetOpLclRelatedToSIMDIntrinsic(retNode);
4184             assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass)));
4185             break;
4186         }
4187
4188 #ifdef _TARGET_XARCH_
4189         case NI_Base_Vector128_Zero:
4190         {
4191             assert(sig->numArgs == 0);
4192
4193             if (compSupports(InstructionSet_SSE))
4194             {
4195                 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4196             }
4197             break;
4198         }
4199
4200         case NI_Base_Vector256_Zero:
4201         {
4202             assert(sig->numArgs == 0);
4203
4204             if (compSupports(InstructionSet_AVX))
4205             {
4206                 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4207             }
4208             break;
4209         }
4210 #endif // _TARGET_XARCH_
4211
4212         default:
4213         {
4214             unreached();
4215             break;
4216         }
4217     }
4218
4219     return retNode;
4220 }
4221 #endif // FEATURE_HW_INTRINSICS
4222
4223 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
4224                                     CORINFO_SIG_INFO*     sig,
4225                                     var_types             callType,
4226                                     CorInfoIntrinsics     intrinsicID,
4227                                     bool                  tailCall)
4228 {
4229     GenTree* op1;
4230     GenTree* op2;
4231
4232     assert(callType != TYP_STRUCT);
4233     assert(IsMathIntrinsic(intrinsicID));
4234
4235     op1 = nullptr;
4236
4237 #if !defined(_TARGET_X86_)
4238     // Intrinsics that are not implemented directly by target instructions will
4239     // be re-materialized as users calls in rationalizer. For prefixed tail calls,
4240     // don't do this optimization, because
4241     //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
4242     //  b) It will be non-trivial task or too late to re-materialize a surviving
4243     //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
4244     if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
4245 #else
4246     // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
4247     // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4248     // code generation for certain EH constructs.
4249     if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4250 #endif
4251     {
4252         switch (sig->numArgs)
4253         {
4254             case 1:
4255                 op1 = impPopStack().val;
4256
4257                 assert(varTypeIsFloating(op1));
4258
4259                 if (op1->TypeGet() != callType)
4260                 {
4261                     op1 = gtNewCastNode(callType, op1, false, callType);
4262                 }
4263
4264                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4265                 break;
4266
4267             case 2:
4268                 op2 = impPopStack().val;
4269                 op1 = impPopStack().val;
4270
4271                 assert(varTypeIsFloating(op1));
4272                 assert(varTypeIsFloating(op2));
4273
4274                 if (op2->TypeGet() != callType)
4275                 {
4276                     op2 = gtNewCastNode(callType, op2, false, callType);
4277                 }
4278                 if (op1->TypeGet() != callType)
4279                 {
4280                     op1 = gtNewCastNode(callType, op1, false, callType);
4281                 }
4282
4283                 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4284                 break;
4285
4286             default:
4287                 NO_WAY("Unsupported number of args for Math Instrinsic");
4288         }
4289
4290         if (IsIntrinsicImplementedByUserCall(intrinsicID))
4291         {
4292             op1->gtFlags |= GTF_CALL;
4293         }
4294     }
4295
4296     return op1;
4297 }
4298
4299 //------------------------------------------------------------------------
4300 // lookupNamedIntrinsic: map method to jit named intrinsic value
4301 //
4302 // Arguments:
4303 //    method -- method handle for method
4304 //
4305 // Return Value:
4306 //    Id for the named intrinsic, or Illegal if none.
4307 //
4308 // Notes:
4309 //    method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4310 //    otherwise it is not a named jit intrinsic.
4311 //
4312
4313 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4314 {
4315     NamedIntrinsic result = NI_Illegal;
4316
4317     const char* className     = nullptr;
4318     const char* namespaceName = nullptr;
4319     const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4320
4321     if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4322     {
4323         return result;
4324     }
4325
4326     if (strcmp(namespaceName, "System") == 0)
4327     {
4328         if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4329         {
4330             result = NI_System_Enum_HasFlag;
4331         }
4332         else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4333         {
4334             result = NI_MathF_Round;
4335         }
4336         else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4337         {
4338             result = NI_Math_Round;
4339         }
4340     }
4341 #if defined(_TARGET_XARCH_) // We currently only support BSWAP on x86
4342     else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
4343     {
4344         if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
4345         {
4346             result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
4347         }
4348     }
4349 #endif // !defined(_TARGET_XARCH_)
4350     else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4351     {
4352         if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4353         {
4354             result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4355         }
4356     }
4357 #ifdef FEATURE_HW_INTRINSICS
4358     else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
4359     {
4360         namespaceName += 25;
4361
4362         if (namespaceName[0] == '\0')
4363         {
4364             if (strncmp(className, "Vector", 6) == 0)
4365             {
4366                 className += 6;
4367
4368 #if defined(_TARGET_ARM64_)
4369                 if (strncmp(className, "64", 2) == 0)
4370                 {
4371                     className += 2;
4372
4373                     if (strcmp(className, "`1") == 0)
4374                     {
4375                         if (strncmp(methodName, "As", 2) == 0)
4376                         {
4377                             methodName += 2;
4378
4379                             // Vector64_As, Vector64_AsDouble, Vector64_AsInt64, and Vector64_AsUInt64
4380                             // are not currently supported as they require additional plumbing to be
4381                             // supported by the JIT as TYP_SIMD8.
4382
4383                             if (strcmp(methodName, "Byte") == 0)
4384                             {
4385                                 result = NI_Base_Vector64_AsByte;
4386                             }
4387                             else if (strcmp(methodName, "Int16") == 0)
4388                             {
4389                                 result = NI_Base_Vector64_AsInt16;
4390                             }
4391                             else if (strcmp(methodName, "Int32") == 0)
4392                             {
4393                                 result = NI_Base_Vector64_AsInt32;
4394                             }
4395                             else if (strcmp(methodName, "SByte") == 0)
4396                             {
4397                                 result = NI_Base_Vector64_AsSByte;
4398                             }
4399                             else if (strcmp(methodName, "Single") == 0)
4400                             {
4401                                 result = NI_Base_Vector64_AsSingle;
4402                             }
4403                             else if (strcmp(methodName, "UInt16") == 0)
4404                             {
4405                                 result = NI_Base_Vector64_AsUInt16;
4406                             }
4407                             else if (strcmp(methodName, "UInt32") == 0)
4408                             {
4409                                 result = NI_Base_Vector64_AsUInt32;
4410                             }
4411                         }
4412                     }
4413                 }
4414                 else
4415 #endif // _TARGET_ARM64_
4416                     if (strncmp(className, "128", 3) == 0)
4417                 {
4418                     className += 3;
4419
4420                     if (strcmp(className, "`1") == 0)
4421                     {
4422                         if (strncmp(methodName, "As", 2) == 0)
4423                         {
4424                             methodName += 2;
4425
4426                             if (strcmp(methodName, "`1") == 0)
4427                             {
4428                                 result = NI_Base_Vector128_As;
4429                             }
4430                             else if (strcmp(methodName, "Byte") == 0)
4431                             {
4432                                 result = NI_Base_Vector128_AsByte;
4433                             }
4434                             else if (strcmp(methodName, "Double") == 0)
4435                             {
4436                                 result = NI_Base_Vector128_AsDouble;
4437                             }
4438                             else if (strcmp(methodName, "Int16") == 0)
4439                             {
4440                                 result = NI_Base_Vector128_AsInt16;
4441                             }
4442                             else if (strcmp(methodName, "Int32") == 0)
4443                             {
4444                                 result = NI_Base_Vector128_AsInt32;
4445                             }
4446                             else if (strcmp(methodName, "Int64") == 0)
4447                             {
4448                                 result = NI_Base_Vector128_AsInt64;
4449                             }
4450                             else if (strcmp(methodName, "SByte") == 0)
4451                             {
4452                                 result = NI_Base_Vector128_AsSByte;
4453                             }
4454                             else if (strcmp(methodName, "Single") == 0)
4455                             {
4456                                 result = NI_Base_Vector128_AsSingle;
4457                             }
4458                             else if (strcmp(methodName, "UInt16") == 0)
4459                             {
4460                                 result = NI_Base_Vector128_AsUInt16;
4461                             }
4462                             else if (strcmp(methodName, "UInt32") == 0)
4463                             {
4464                                 result = NI_Base_Vector128_AsUInt32;
4465                             }
4466                             else if (strcmp(methodName, "UInt64") == 0)
4467                             {
4468                                 result = NI_Base_Vector128_AsUInt64;
4469                             }
4470                         }
4471 #if defined(_TARGET_XARCH_)
4472                         else if (strcmp(methodName, "get_Zero") == 0)
4473                         {
4474                             result = NI_Base_Vector128_Zero;
4475                         }
4476 #endif // _TARGET_XARCH_
4477                     }
4478                 }
4479 #if defined(_TARGET_XARCH_)
4480                 else if (strncmp(className, "256", 3) == 0)
4481                 {
4482                     className += 3;
4483
4484                     if (strcmp(className, "`1") == 0)
4485                     {
4486                         if (strncmp(methodName, "As", 2) == 0)
4487                         {
4488                             methodName += 2;
4489
4490                             if (strcmp(methodName, "`1") == 0)
4491                             {
4492                                 result = NI_Base_Vector256_As;
4493                             }
4494                             else if (strcmp(methodName, "Byte") == 0)
4495                             {
4496                                 result = NI_Base_Vector256_AsByte;
4497                             }
4498                             else if (strcmp(methodName, "Double") == 0)
4499                             {
4500                                 result = NI_Base_Vector256_AsDouble;
4501                             }
4502                             else if (strcmp(methodName, "Int16") == 0)
4503                             {
4504                                 result = NI_Base_Vector256_AsInt16;
4505                             }
4506                             else if (strcmp(methodName, "Int32") == 0)
4507                             {
4508                                 result = NI_Base_Vector256_AsInt32;
4509                             }
4510                             else if (strcmp(methodName, "Int64") == 0)
4511                             {
4512                                 result = NI_Base_Vector256_AsInt64;
4513                             }
4514                             else if (strcmp(methodName, "SByte") == 0)
4515                             {
4516                                 result = NI_Base_Vector256_AsSByte;
4517                             }
4518                             else if (strcmp(methodName, "Single") == 0)
4519                             {
4520                                 result = NI_Base_Vector256_AsSingle;
4521                             }
4522                             else if (strcmp(methodName, "UInt16") == 0)
4523                             {
4524                                 result = NI_Base_Vector256_AsUInt16;
4525                             }
4526                             else if (strcmp(methodName, "UInt32") == 0)
4527                             {
4528                                 result = NI_Base_Vector256_AsUInt32;
4529                             }
4530                             else if (strcmp(methodName, "UInt64") == 0)
4531                             {
4532                                 result = NI_Base_Vector256_AsUInt64;
4533                             }
4534                         }
4535                         else if (strcmp(methodName, "get_Zero") == 0)
4536                         {
4537                             result = NI_Base_Vector256_Zero;
4538                         }
4539                     }
4540                 }
4541 #endif // _TARGET_XARCH_
4542             }
4543         }
4544 #if defined(_TARGET_XARCH_)
4545         else if (strcmp(namespaceName, ".X86") == 0)
4546         {
4547             result = HWIntrinsicInfo::lookupId(className, methodName);
4548         }
4549 #elif defined(_TARGET_ARM64_)
4550         else if (strcmp(namespaceName, ".Arm.Arm64") == 0)
4551         {
4552             result = lookupHWIntrinsic(className, methodName);
4553         }
4554 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4555 #error Unsupported platform
4556 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4557     }
4558 #endif // FEATURE_HW_INTRINSICS
4559
4560     return result;
4561 }
4562
4563 /*****************************************************************************/
4564
4565 GenTree* Compiler::impArrayAccessIntrinsic(
4566     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4567 {
4568     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4569        the following, as it generates fatter code.
4570     */
4571
4572     if (compCodeOpt() == SMALL_CODE)
4573     {
4574         return nullptr;
4575     }
4576
4577     /* These intrinsics generate fatter (but faster) code and are only
4578        done if we don't need SMALL_CODE */
4579
4580     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4581
4582     // The rank 1 case is special because it has to handle two array formats
4583     // we will simply not do that case
4584     if (rank > GT_ARR_MAX_RANK || rank <= 1)
4585     {
4586         return nullptr;
4587     }
4588
4589     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4590     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4591
4592     // For the ref case, we will only be able to inline if the types match
4593     // (verifier checks for this, we don't care for the nonverified case and the
4594     // type is final (so we don't need to do the cast)
4595     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4596     {
4597         // Get the call site signature
4598         CORINFO_SIG_INFO LocalSig;
4599         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4600         assert(LocalSig.hasThis());
4601
4602         CORINFO_CLASS_HANDLE actualElemClsHnd;
4603
4604         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4605         {
4606             // Fetch the last argument, the one that indicates the type we are setting.
4607             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4608             for (unsigned r = 0; r < rank; r++)
4609             {
4610                 argType = info.compCompHnd->getArgNext(argType);
4611             }
4612
4613             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4614             actualElemClsHnd = argInfo.GetClassHandle();
4615         }
4616         else
4617         {
4618             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4619
4620             // Fetch the return type
4621             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4622             assert(retInfo.IsByRef());
4623             actualElemClsHnd = retInfo.GetClassHandle();
4624         }
4625
4626         // if it's not final, we can't do the optimization
4627         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4628         {
4629             return nullptr;
4630         }
4631     }
4632
4633     unsigned arrayElemSize;
4634     if (elemType == TYP_STRUCT)
4635     {
4636         assert(arrElemClsHnd);
4637
4638         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4639     }
4640     else
4641     {
4642         arrayElemSize = genTypeSize(elemType);
4643     }
4644
4645     if ((unsigned char)arrayElemSize != arrayElemSize)
4646     {
4647         // arrayElemSize would be truncated as an unsigned char.
4648         // This means the array element is too large. Don't do the optimization.
4649         return nullptr;
4650     }
4651
4652     GenTree* val = nullptr;
4653
4654     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4655     {
4656         // Assignment of a struct is more work, and there are more gets than sets.
4657         if (elemType == TYP_STRUCT)
4658         {
4659             return nullptr;
4660         }
4661
4662         val = impPopStack().val;
4663         assert(genActualType(elemType) == genActualType(val->gtType) ||
4664                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4665                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4666                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4667     }
4668
4669     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4670
4671     GenTree* inds[GT_ARR_MAX_RANK];
4672     for (unsigned k = rank; k > 0; k--)
4673     {
4674         inds[k - 1] = impPopStack().val;
4675     }
4676
4677     GenTree* arr = impPopStack().val;
4678     assert(arr->gtType == TYP_REF);
4679
4680     GenTree* arrElem =
4681         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4682                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4683
4684     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4685     {
4686         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4687     }
4688
4689     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4690     {
4691         assert(val != nullptr);
4692         return gtNewAssignNode(arrElem, val);
4693     }
4694     else
4695     {
4696         return arrElem;
4697     }
4698 }
4699
4700 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4701 {
4702     unsigned i;
4703
4704     // do some basic checks first
4705     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4706     {
4707         return FALSE;
4708     }
4709
4710     if (verCurrentState.esStackDepth > 0)
4711     {
4712         // merge stack types
4713         StackEntry* parentStack = block->bbStackOnEntry();
4714         StackEntry* childStack  = verCurrentState.esStack;
4715
4716         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4717         {
4718             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4719             {
4720                 return FALSE;
4721             }
4722         }
4723     }
4724
4725     // merge initialization status of this ptr
4726
4727     if (verTrackObjCtorInitState)
4728     {
4729         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4730         assert(verCurrentState.thisInitialized != TIS_Bottom);
4731
4732         // If the successor block's thisInit state is unknown, copy it from the current state.
4733         if (block->bbThisOnEntry() == TIS_Bottom)
4734         {
4735             *changed = true;
4736             verSetThisInit(block, verCurrentState.thisInitialized);
4737         }
4738         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4739         {
4740             if (block->bbThisOnEntry() != TIS_Top)
4741             {
4742                 *changed = true;
4743                 verSetThisInit(block, TIS_Top);
4744
4745                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4746                 {
4747                     // The block is bad. Control can flow through the block to any handler that catches the
4748                     // verification exception, but the importer ignores bad blocks and therefore won't model
4749                     // this flow in the normal way. To complete the merge into the bad block, the new state
4750                     // needs to be manually pushed to the handlers that may be reached after the verification
4751                     // exception occurs.
4752                     //
4753                     // Usually, the new state was already propagated to the relevant handlers while processing
4754                     // the predecessors of the bad block. The exception is when the bad block is at the start
4755                     // of a try region, meaning it is protected by additional handlers that do not protect its
4756                     // predecessors.
4757                     //
4758                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4759                     {
4760                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4761                         // recursive calls back into this code path (if successors of the current bad block are
4762                         // also bad blocks).
4763                         //
4764                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4765                         verCurrentState.thisInitialized = TIS_Top;
4766                         impVerifyEHBlock(block, true);
4767                         verCurrentState.thisInitialized = origTIS;
4768                     }
4769                 }
4770             }
4771         }
4772     }
4773     else
4774     {
4775         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4776     }
4777
4778     return TRUE;
4779 }
4780
4781 /*****************************************************************************
4782  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4783  *   already logged it (presumably in a more detailed fashion than done here)
4784  * 'bVerificationException' is true for a verification exception, false for a
4785  *   "call unauthorized by host" exception.
4786  */
4787
4788 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4789 {
4790     block->bbJumpKind = BBJ_THROW;
4791     block->bbFlags |= BBF_FAILED_VERIFICATION;
4792
4793     impCurStmtOffsSet(block->bbCodeOffs);
4794
4795 #ifdef DEBUG
4796     // we need this since BeginTreeList asserts otherwise
4797     impTreeList = impTreeLast = nullptr;
4798     block->bbFlags &= ~BBF_IMPORTED;
4799
4800     if (logMsg)
4801     {
4802         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4803                 block->bbCodeOffs, block->bbCodeOffsEnd));
4804         if (verbose)
4805         {
4806             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4807         }
4808     }
4809
4810     if (JitConfig.DebugBreakOnVerificationFailure())
4811     {
4812         DebugBreak();
4813     }
4814 #endif
4815
4816     impBeginTreeList();
4817
4818     // if the stack is non-empty evaluate all the side-effects
4819     if (verCurrentState.esStackDepth > 0)
4820     {
4821         impEvalSideEffects();
4822     }
4823     assert(verCurrentState.esStackDepth == 0);
4824
4825     GenTree* op1 =
4826         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4827     // verCurrentState.esStackDepth = 0;
4828     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4829
4830     // The inliner is not able to handle methods that require throw block, so
4831     // make sure this methods never gets inlined.
4832     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4833 }
4834
4835 /*****************************************************************************
4836  *
4837  */
4838 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4839
4840 {
4841     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4842     // slightly different mechanism in which it calls the JIT to perform IL verification:
4843     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4844     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4845     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4846     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4847     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4848     // to fail upon runtime of the jitted method.
4849     //
4850     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4851     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4852     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4853     // we detect these two conditions, instead of generating a throw statement inside the offending
4854     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4855     // to return false and make RyuJIT behave the same way JIT64 does.
4856     //
4857     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4858     // RyuJIT for the time being until we completely replace JIT64.
4859     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4860
4861     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4862     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4863     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4864     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4865     // be turned off during importation).
4866     CLANG_FORMAT_COMMENT_ANCHOR;
4867
4868 #ifdef _TARGET_64BIT_
4869
4870 #ifdef DEBUG
4871     bool canSkipVerificationResult =
4872         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4873     assert(tiVerificationNeeded || canSkipVerificationResult);
4874 #endif // DEBUG
4875
4876     // Add the non verifiable flag to the compiler
4877     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4878     {
4879         tiIsVerifiableCode = FALSE;
4880     }
4881 #endif //_TARGET_64BIT_
4882     verResetCurrentState(block, &verCurrentState);
4883     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4884
4885 #ifdef DEBUG
4886     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4887 #endif                   // DEBUG
4888 }
4889
4890 /******************************************************************************/
4891 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4892 {
4893     assert(ciType < CORINFO_TYPE_COUNT);
4894
4895     typeInfo tiResult;
4896     switch (ciType)
4897     {
4898         case CORINFO_TYPE_STRING:
4899         case CORINFO_TYPE_CLASS:
4900             tiResult = verMakeTypeInfo(clsHnd);
4901             if (!tiResult.IsType(TI_REF))
4902             { // type must be consistent with element type
4903                 return typeInfo();
4904             }
4905             break;
4906
4907 #ifdef _TARGET_64BIT_
4908         case CORINFO_TYPE_NATIVEINT:
4909         case CORINFO_TYPE_NATIVEUINT:
4910             if (clsHnd)
4911             {
4912                 // If we have more precise information, use it
4913                 return verMakeTypeInfo(clsHnd);
4914             }
4915             else
4916             {
4917                 return typeInfo::nativeInt();
4918             }
4919             break;
4920 #endif // _TARGET_64BIT_
4921
4922         case CORINFO_TYPE_VALUECLASS:
4923         case CORINFO_TYPE_REFANY:
4924             tiResult = verMakeTypeInfo(clsHnd);
4925             // type must be constant with element type;
4926             if (!tiResult.IsValueClass())
4927             {
4928                 return typeInfo();
4929             }
4930             break;
4931         case CORINFO_TYPE_VAR:
4932             return verMakeTypeInfo(clsHnd);
4933
4934         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4935         case CORINFO_TYPE_VOID:
4936             return typeInfo();
4937             break;
4938
4939         case CORINFO_TYPE_BYREF:
4940         {
4941             CORINFO_CLASS_HANDLE childClassHandle;
4942             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4943             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4944         }
4945         break;
4946
4947         default:
4948             if (clsHnd)
4949             { // If we have more precise information, use it
4950                 return typeInfo(TI_STRUCT, clsHnd);
4951             }
4952             else
4953             {
4954                 return typeInfo(JITtype2tiType(ciType));
4955             }
4956     }
4957     return tiResult;
4958 }
4959
4960 /******************************************************************************/
4961
4962 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4963 {
4964     if (clsHnd == nullptr)
4965     {
4966         return typeInfo();
4967     }
4968
4969     // Byrefs should only occur in method and local signatures, which are accessed
4970     // using ICorClassInfo and ICorClassInfo.getChildType.
4971     // So findClass() and getClassAttribs() should not be called for byrefs
4972
4973     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4974     {
4975         assert(!"Did findClass() return a Byref?");
4976         return typeInfo();
4977     }
4978
4979     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4980
4981     if (attribs & CORINFO_FLG_VALUECLASS)
4982     {
4983         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4984
4985         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4986         // not occur here, so we may want to change this to an assert instead.
4987         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4988         {
4989             return typeInfo();
4990         }
4991
4992 #ifdef _TARGET_64BIT_
4993         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4994         {
4995             return typeInfo::nativeInt();
4996         }
4997 #endif // _TARGET_64BIT_
4998
4999         if (t != CORINFO_TYPE_UNDEF)
5000         {
5001             return (typeInfo(JITtype2tiType(t)));
5002         }
5003         else if (bashStructToRef)
5004         {
5005             return (typeInfo(TI_REF, clsHnd));
5006         }
5007         else
5008         {
5009             return (typeInfo(TI_STRUCT, clsHnd));
5010         }
5011     }
5012     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
5013     {
5014         // See comment in _typeInfo.h for why we do it this way.
5015         return (typeInfo(TI_REF, clsHnd, true));
5016     }
5017     else
5018     {
5019         return (typeInfo(TI_REF, clsHnd));
5020     }
5021 }
5022
5023 /******************************************************************************/
5024 BOOL Compiler::verIsSDArray(typeInfo ti)
5025 {
5026     if (ti.IsNullObjRef())
5027     { // nulls are SD arrays
5028         return TRUE;
5029     }
5030
5031     if (!ti.IsType(TI_REF))
5032     {
5033         return FALSE;
5034     }
5035
5036     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
5037     {
5038         return FALSE;
5039     }
5040     return TRUE;
5041 }
5042
5043 /******************************************************************************/
5044 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
5045 /* Returns an error type if anything goes wrong */
5046
5047 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
5048 {
5049     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
5050
5051     if (!verIsSDArray(arrayObjectType))
5052     {
5053         return typeInfo();
5054     }
5055
5056     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
5057     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
5058
5059     return verMakeTypeInfo(ciType, childClassHandle);
5060 }
5061
5062 /*****************************************************************************
5063  */
5064 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
5065 {
5066     CORINFO_CLASS_HANDLE classHandle;
5067     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
5068
5069     var_types type = JITtype2varType(ciType);
5070     if (varTypeIsGC(type))
5071     {
5072         // For efficiency, getArgType only returns something in classHandle for
5073         // value types.  For other types that have addition type info, you
5074         // have to call back explicitly
5075         classHandle = info.compCompHnd->getArgClass(sig, args);
5076         if (!classHandle)
5077         {
5078             NO_WAY("Could not figure out Class specified in argument or local signature");
5079         }
5080     }
5081
5082     return verMakeTypeInfo(ciType, classHandle);
5083 }
5084
5085 /*****************************************************************************/
5086
5087 // This does the expensive check to figure out whether the method
5088 // needs to be verified. It is called only when we fail verification,
5089 // just before throwing the verification exception.
5090
5091 BOOL Compiler::verNeedsVerification()
5092 {
5093     // If we have previously determined that verification is NOT needed
5094     // (for example in Compiler::compCompile), that means verification is really not needed.
5095     // Return the same decision we made before.
5096     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
5097
5098     if (!tiVerificationNeeded)
5099     {
5100         return tiVerificationNeeded;
5101     }
5102
5103     assert(tiVerificationNeeded);
5104
5105     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
5106     // obtain the answer.
5107     CorInfoCanSkipVerificationResult canSkipVerificationResult =
5108         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
5109
5110     // canSkipVerification will return one of the following three values:
5111     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
5112     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
5113     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
5114     //     but need to insert a callout to the VM to ask during runtime
5115     //     whether to skip verification or not.
5116
5117     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
5118     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
5119     {
5120         tiRuntimeCalloutNeeded = true;
5121     }
5122
5123     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
5124     {
5125         // Dev10 706080 - Testers don't like the assert, so just silence it
5126         // by not using the macros that invoke debugAssert.
5127         badCode();
5128     }
5129
5130     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
5131     // The following line means we will NOT do jit time verification if canSkipVerification
5132     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
5133     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
5134     return tiVerificationNeeded;
5135 }
5136
5137 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
5138 {
5139     if (ti.IsByRef())
5140     {
5141         return TRUE;
5142     }
5143     if (!ti.IsType(TI_STRUCT))
5144     {
5145         return FALSE;
5146     }
5147     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
5148 }
5149
5150 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
5151 {
5152     if (ti.IsPermanentHomeByRef())
5153     {
5154         return TRUE;
5155     }
5156     else
5157     {
5158         return FALSE;
5159     }
5160 }
5161
5162 BOOL Compiler::verIsBoxable(const typeInfo& ti)
5163 {
5164     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
5165             || ti.IsUnboxedGenericTypeVar() ||
5166             (ti.IsType(TI_STRUCT) &&
5167              // exclude byreflike structs
5168              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
5169 }
5170
5171 // Is it a boxed value type?
5172 bool Compiler::verIsBoxedValueType(typeInfo ti)
5173 {
5174     if (ti.GetType() == TI_REF)
5175     {
5176         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
5177         return !!eeIsValueClass(clsHnd);
5178     }
5179     else
5180     {
5181         return false;
5182     }
5183 }
5184
5185 /*****************************************************************************
5186  *
5187  *  Check if a TailCall is legal.
5188  */
5189
5190 bool Compiler::verCheckTailCallConstraint(
5191     OPCODE                  opcode,
5192     CORINFO_RESOLVED_TOKEN* pResolvedToken,
5193     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
5194     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
5195                                                        // return false to the caller.
5196                                                        // If false, it will throw.
5197     )
5198 {
5199     DWORD            mflags;
5200     CORINFO_SIG_INFO sig;
5201     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
5202                                    // this counter is used to keep track of how many items have been
5203                                    // virtually popped
5204
5205     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
5206     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
5207     unsigned              methodClassFlgs = 0;
5208
5209     assert(impOpcodeIsCallOpcode(opcode));
5210
5211     if (compIsForInlining())
5212     {
5213         return false;
5214     }
5215
5216     // for calli, VerifyOrReturn that this is not a virtual method
5217     if (opcode == CEE_CALLI)
5218     {
5219         /* Get the call sig */
5220         eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5221
5222         // We don't know the target method, so we have to infer the flags, or
5223         // assume the worst-case.
5224         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
5225     }
5226     else
5227     {
5228         methodHnd = pResolvedToken->hMethod;
5229
5230         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
5231
5232         // When verifying generic code we pair the method handle with its
5233         // owning class to get the exact method signature.
5234         methodClassHnd = pResolvedToken->hClass;
5235         assert(methodClassHnd);
5236
5237         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
5238
5239         // opcode specific check
5240         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
5241     }
5242
5243     // We must have got the methodClassHnd if opcode is not CEE_CALLI
5244     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
5245
5246     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5247     {
5248         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5249     }
5250
5251     // check compatibility of the arguments
5252     unsigned int argCount;
5253     argCount = sig.numArgs;
5254     CORINFO_ARG_LIST_HANDLE args;
5255     args = sig.args;
5256     while (argCount--)
5257     {
5258         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
5259
5260         // check that the argument is not a byref for tailcalls
5261         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
5262
5263         // For unsafe code, we might have parameters containing pointer to the stack location.
5264         // Disallow the tailcall for this kind.
5265         CORINFO_CLASS_HANDLE classHandle;
5266         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
5267         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
5268
5269         args = info.compCompHnd->getArgNext(args);
5270     }
5271
5272     // update popCount
5273     popCount += sig.numArgs;
5274
5275     // check for 'this' which is on non-static methods, not called via NEWOBJ
5276     if (!(mflags & CORINFO_FLG_STATIC))
5277     {
5278         // Always update the popCount.
5279         // This is crucial for the stack calculation to be correct.
5280         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5281         popCount++;
5282
5283         if (opcode == CEE_CALLI)
5284         {
5285             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
5286             // on the stack.
5287             if (tiThis.IsValueClass())
5288             {
5289                 tiThis.MakeByRef();
5290             }
5291             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
5292         }
5293         else
5294         {
5295             // Check type compatibility of the this argument
5296             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
5297             if (tiDeclaredThis.IsValueClass())
5298             {
5299                 tiDeclaredThis.MakeByRef();
5300             }
5301
5302             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
5303         }
5304     }
5305
5306     // Tail calls on constrained calls should be illegal too:
5307     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
5308     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
5309
5310     // Get the exact view of the signature for an array method
5311     if (sig.retType != CORINFO_TYPE_VOID)
5312     {
5313         if (methodClassFlgs & CORINFO_FLG_ARRAY)
5314         {
5315             assert(opcode != CEE_CALLI);
5316             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5317         }
5318     }
5319
5320     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
5321     typeInfo tiCallerRetType =
5322         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
5323
5324     // void return type gets morphed into the error type, so we have to treat them specially here
5325     if (sig.retType == CORINFO_TYPE_VOID)
5326     {
5327         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
5328                                   speculative);
5329     }
5330     else
5331     {
5332         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
5333                                                    NormaliseForStack(tiCallerRetType), true),
5334                                   "tailcall return mismatch", speculative);
5335     }
5336
5337     // for tailcall, stack must be empty
5338     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
5339
5340     return true; // Yes, tailcall is legal
5341 }
5342
5343 /*****************************************************************************
5344  *
5345  *  Checks the IL verification rules for the call
5346  */
5347
5348 void Compiler::verVerifyCall(OPCODE                  opcode,
5349                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
5350                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5351                              bool                    tailCall,
5352                              bool                    readonlyCall,
5353                              const BYTE*             delegateCreateStart,
5354                              const BYTE*             codeAddr,
5355                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
5356 {
5357     DWORD             mflags;
5358     CORINFO_SIG_INFO* sig      = nullptr;
5359     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
5360                                     // this counter is used to keep track of how many items have been
5361                                     // virtually popped
5362
5363     // for calli, VerifyOrReturn that this is not a virtual method
5364     if (opcode == CEE_CALLI)
5365     {
5366         Verify(false, "Calli not verifiable");
5367         return;
5368     }
5369
5370     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
5371     mflags = callInfo->verMethodFlags;
5372
5373     sig = &callInfo->verSig;
5374
5375     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5376     {
5377         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
5378     }
5379
5380     // opcode specific check
5381     unsigned methodClassFlgs = callInfo->classFlags;
5382     switch (opcode)
5383     {
5384         case CEE_CALLVIRT:
5385             // cannot do callvirt on valuetypes
5386             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
5387             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
5388             break;
5389
5390         case CEE_NEWOBJ:
5391         {
5392             assert(!tailCall); // Importer should not allow this
5393             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
5394                            "newobj must be on instance");
5395
5396             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
5397             {
5398                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
5399                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
5400                 typeInfo tiDeclaredFtn =
5401                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
5402                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
5403
5404                 assert(popCount == 0);
5405                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
5406                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
5407
5408                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
5409                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
5410                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
5411                                "delegate object type mismatch");
5412
5413                 CORINFO_CLASS_HANDLE objTypeHandle =
5414                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
5415
5416                 // the method signature must be compatible with the delegate's invoke method
5417
5418                 // check that for virtual functions, the type of the object used to get the
5419                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
5420                 // since this is a bit of work to determine in general, we pattern match stylized
5421                 // code sequences
5422
5423                 // the delegate creation code check, which used to be done later, is now done here
5424                 // so we can read delegateMethodRef directly from
5425                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5426                 // we then use it in our call to isCompatibleDelegate().
5427
5428                 mdMemberRef delegateMethodRef = mdMemberRefNil;
5429                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5430                                "must create delegates with certain IL");
5431
5432                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5433                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5434                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
5435                 delegateResolvedToken.token        = delegateMethodRef;
5436                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
5437                 info.compCompHnd->resolveToken(&delegateResolvedToken);
5438
5439                 CORINFO_CALL_INFO delegateCallInfo;
5440                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5441                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5442
5443                 BOOL isOpenDelegate = FALSE;
5444                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5445                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
5446                                                                       &isOpenDelegate),
5447                                "function incompatible with delegate");
5448
5449                 // check the constraints on the target method
5450                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5451                                "delegate target has unsatisfied class constraints");
5452                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5453                                                                             tiActualFtn.GetMethod()),
5454                                "delegate target has unsatisfied method constraints");
5455
5456                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5457                 // for additional verification rules for delegates
5458                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
5459                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5460                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5461                 {
5462
5463                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5464 #ifdef DEBUG
5465                         && StrictCheckForNonVirtualCallToVirtualMethod()
5466 #endif
5467                             )
5468                     {
5469                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5470                         {
5471                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5472                                                verIsBoxedValueType(tiActualObj),
5473                                            "The 'this' parameter to the call must be either the calling method's "
5474                                            "'this' parameter or "
5475                                            "a boxed value type.");
5476                         }
5477                     }
5478                 }
5479
5480                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5481                 {
5482                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5483
5484                     Verify(targetIsStatic || !isOpenDelegate,
5485                            "Unverifiable creation of an open instance delegate for a protected member.");
5486
5487                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5488                                                                 ? info.compClassHnd
5489                                                                 : tiActualObj.GetClassHandleForObjRef();
5490
5491                     // In the case of protected methods, it is a requirement that the 'this'
5492                     // pointer be a subclass of the current context.  Perform this check.
5493                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5494                            "Accessing protected method through wrong type.");
5495                 }
5496                 goto DONE_ARGS;
5497             }
5498         }
5499         // fall thru to default checks
5500         default:
5501             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5502     }
5503     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5504                    "can only newobj a delegate constructor");
5505
5506     // check compatibility of the arguments
5507     unsigned int argCount;
5508     argCount = sig->numArgs;
5509     CORINFO_ARG_LIST_HANDLE args;
5510     args = sig->args;
5511     while (argCount--)
5512     {
5513         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5514
5515         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5516         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5517
5518         args = info.compCompHnd->getArgNext(args);
5519     }
5520
5521 DONE_ARGS:
5522
5523     // update popCount
5524     popCount += sig->numArgs;
5525
5526     // check for 'this' which are is non-static methods, not called via NEWOBJ
5527     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5528     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5529     {
5530         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5531         popCount++;
5532
5533         // If it is null, we assume we can access it (since it will AV shortly)
5534         // If it is anything but a reference class, there is no hierarchy, so
5535         // again, we don't need the precise instance class to compute 'protected' access
5536         if (tiThis.IsType(TI_REF))
5537         {
5538             instanceClassHnd = tiThis.GetClassHandleForObjRef();
5539         }
5540
5541         // Check type compatibility of the this argument
5542         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5543         if (tiDeclaredThis.IsValueClass())
5544         {
5545             tiDeclaredThis.MakeByRef();
5546         }
5547
5548         // If this is a call to the base class .ctor, set thisPtr Init for
5549         // this block.
5550         if (mflags & CORINFO_FLG_CONSTRUCTOR)
5551         {
5552             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5553                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5554             {
5555                 assert(verCurrentState.thisInitialized !=
5556                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
5557                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5558                                "Call to base class constructor when 'this' is possibly initialized");
5559                 // Otherwise, 'this' is now initialized.
5560                 verCurrentState.thisInitialized = TIS_Init;
5561                 tiThis.SetInitialisedObjRef();
5562             }
5563             else
5564             {
5565                 // We allow direct calls to value type constructors
5566                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5567                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5568                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5569                                "Bad call to a constructor");
5570             }
5571         }
5572
5573         if (pConstrainedResolvedToken != nullptr)
5574         {
5575             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5576
5577             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5578
5579             // We just dereference this and test for equality
5580             tiThis.DereferenceByRef();
5581             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5582                            "this type mismatch with constrained type operand");
5583
5584             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5585             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5586         }
5587
5588         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5589         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5590         {
5591             tiDeclaredThis.SetIsReadonlyByRef();
5592         }
5593
5594         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5595
5596         if (tiThis.IsByRef())
5597         {
5598             // Find the actual type where the method exists (as opposed to what is declared
5599             // in the metadata). This is to prevent passing a byref as the "this" argument
5600             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5601
5602             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5603             VerifyOrReturn(eeIsValueClass(actualClassHnd),
5604                            "Call to base type of valuetype (which is never a valuetype)");
5605         }
5606
5607         // Rules for non-virtual call to a non-final virtual method:
5608
5609         // Define:
5610         // The "this" pointer is considered to be "possibly written" if
5611         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
5612         //   (or)
5613         //   2. It has been stored to (STARG.0) anywhere in the method.
5614
5615         // A non-virtual call to a non-final virtual method is only allowed if
5616         //   1. The this pointer passed to the callee is an instance of a boxed value type.
5617         //   (or)
5618         //   2. The this pointer passed to the callee is the current method's this pointer.
5619         //      (and) The current method's this pointer is not "possibly written".
5620
5621         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5622         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
5623         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5624         // hard and more error prone.
5625
5626         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5627 #ifdef DEBUG
5628             && StrictCheckForNonVirtualCallToVirtualMethod()
5629 #endif
5630                 )
5631         {
5632             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5633             {
5634                 VerifyOrReturn(
5635                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5636                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5637                     "a boxed value type.");
5638             }
5639         }
5640     }
5641
5642     // check any constraints on the callee's class and type parameters
5643     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5644                    "method has unsatisfied class constraints");
5645     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5646                    "method has unsatisfied method constraints");
5647
5648     if (mflags & CORINFO_FLG_PROTECTED)
5649     {
5650         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5651                        "Can't access protected method");
5652     }
5653
5654     // Get the exact view of the signature for an array method
5655     if (sig->retType != CORINFO_TYPE_VOID)
5656     {
5657         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5658     }
5659
5660     // "readonly." prefixed calls only allowed for the Address operation on arrays.
5661     // The methods supported by array types are under the control of the EE
5662     // so we can trust that only the Address operation returns a byref.
5663     if (readonlyCall)
5664     {
5665         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5666         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5667                        "unexpected use of readonly prefix");
5668     }
5669
5670     // Verify the tailcall
5671     if (tailCall)
5672     {
5673         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5674     }
5675 }
5676
5677 /*****************************************************************************
5678  *  Checks that a delegate creation is done using the following pattern:
5679  *     dup
5680  *     ldvirtftn targetMemberRef
5681  *  OR
5682  *     ldftn targetMemberRef
5683  *
5684  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5685  *  not in this basic block)
5686  *
5687  *  targetMemberRef is read from the code sequence.
5688  *  targetMemberRef is validated iff verificationNeeded.
5689  */
5690
5691 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
5692                                         const BYTE*  codeAddr,
5693                                         mdMemberRef& targetMemberRef)
5694 {
5695     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5696     {
5697         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5698         return TRUE;
5699     }
5700     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5701     {
5702         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5703         return TRUE;
5704     }
5705
5706     return FALSE;
5707 }
5708
5709 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5710 {
5711     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5712     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
5713     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5714     if (!tiCompatibleWith(value, normPtrVal, true))
5715     {
5716         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5717         compUnsafeCastUsed = true;
5718     }
5719     return ptrVal;
5720 }
5721
5722 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5723 {
5724     assert(!instrType.IsStruct());
5725
5726     typeInfo ptrVal;
5727     if (ptr.IsByRef())
5728     {
5729         ptrVal = DereferenceByRef(ptr);
5730         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5731         {
5732             Verify(false, "bad pointer");
5733             compUnsafeCastUsed = true;
5734         }
5735         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5736         {
5737             Verify(false, "pointer not consistent with instr");
5738             compUnsafeCastUsed = true;
5739         }
5740     }
5741     else
5742     {
5743         Verify(false, "pointer not byref");
5744         compUnsafeCastUsed = true;
5745     }
5746
5747     return ptrVal;
5748 }
5749
5750 // Verify that the field is used properly.  'tiThis' is NULL for statics,
5751 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5752 // ld*flda or a st*fld.
5753 // 'enclosingClass' is given if we are accessing a field in some specific type.
5754
5755 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
5756                               const CORINFO_FIELD_INFO& fieldInfo,
5757                               const typeInfo*           tiThis,
5758                               BOOL                      mutator,
5759                               BOOL                      allowPlainStructAsThis)
5760 {
5761     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5762     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5763     CORINFO_CLASS_HANDLE instanceClass =
5764         info.compClassHnd; // for statics, we imagine the instance is the current class.
5765
5766     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5767     if (mutator)
5768     {
5769         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5770         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5771         {
5772             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5773                        info.compIsStatic == isStaticField,
5774                    "bad use of initonly field (set or address taken)");
5775         }
5776     }
5777
5778     if (tiThis == nullptr)
5779     {
5780         Verify(isStaticField, "used static opcode with non-static field");
5781     }
5782     else
5783     {
5784         typeInfo tThis = *tiThis;
5785
5786         if (allowPlainStructAsThis && tThis.IsValueClass())
5787         {
5788             tThis.MakeByRef();
5789         }
5790
5791         // If it is null, we assume we can access it (since it will AV shortly)
5792         // If it is anything but a refernce class, there is no hierarchy, so
5793         // again, we don't need the precise instance class to compute 'protected' access
5794         if (tiThis->IsType(TI_REF))
5795         {
5796             instanceClass = tiThis->GetClassHandleForObjRef();
5797         }
5798
5799         // Note that even if the field is static, we require that the this pointer
5800         // satisfy the same constraints as a non-static field  This happens to
5801         // be simpler and seems reasonable
5802         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5803         if (tiDeclaredThis.IsValueClass())
5804         {
5805             tiDeclaredThis.MakeByRef();
5806
5807             // we allow read-only tThis, on any field access (even stores!), because if the
5808             // class implementor wants to prohibit stores he should make the field private.
5809             // we do this by setting the read-only bit on the type we compare tThis to.
5810             tiDeclaredThis.SetIsReadonlyByRef();
5811         }
5812         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5813         {
5814             // Any field access is legal on "uninitialized" this pointers.
5815             // The easiest way to implement this is to simply set the
5816             // initialized bit for the duration of the type check on the
5817             // field access only.  It does not change the state of the "this"
5818             // for the function as a whole. Note that the "tThis" is a copy
5819             // of the original "this" type (*tiThis) passed in.
5820             tThis.SetInitialisedObjRef();
5821         }
5822
5823         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5824     }
5825
5826     // Presently the JIT does not check that we don't store or take the address of init-only fields
5827     // since we cannot guarantee their immutability and it is not a security issue.
5828
5829     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5830     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5831                    "field has unsatisfied class constraints");
5832     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5833     {
5834         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5835                "Accessing protected method through wrong type.");
5836     }
5837 }
5838
5839 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5840 {
5841     if (tiOp1.IsNumberType())
5842     {
5843 #ifdef _TARGET_64BIT_
5844         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5845 #else  // _TARGET_64BIT
5846         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5847         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5848         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5849         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5850 #endif // !_TARGET_64BIT_
5851     }
5852     else if (tiOp1.IsObjRef())
5853     {
5854         switch (opcode)
5855         {
5856             case CEE_BEQ_S:
5857             case CEE_BEQ:
5858             case CEE_BNE_UN_S:
5859             case CEE_BNE_UN:
5860             case CEE_CEQ:
5861             case CEE_CGT_UN:
5862                 break;
5863             default:
5864                 Verify(FALSE, "Cond not allowed on object types");
5865         }
5866         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5867     }
5868     else if (tiOp1.IsByRef())
5869     {
5870         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5871     }
5872     else
5873     {
5874         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5875     }
5876 }
5877
5878 void Compiler::verVerifyThisPtrInitialised()
5879 {
5880     if (verTrackObjCtorInitState)
5881     {
5882         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5883     }
5884 }
5885
5886 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5887 {
5888     // Either target == context, in this case calling an alternate .ctor
5889     // Or target is the immediate parent of context
5890
5891     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5892 }
5893
5894 GenTree* Compiler::impImportLdvirtftn(GenTree*                thisPtr,
5895                                       CORINFO_RESOLVED_TOKEN* pResolvedToken,
5896                                       CORINFO_CALL_INFO*      pCallInfo)
5897 {
5898     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5899     {
5900         NO_WAY("Virtual call to a function added via EnC is not supported");
5901     }
5902
5903     // CoreRT generic virtual method
5904     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5905     {
5906         GenTree* runtimeMethodHandle = nullptr;
5907         if (pCallInfo->exactContextNeedsRuntimeLookup)
5908         {
5909             runtimeMethodHandle =
5910                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5911         }
5912         else
5913         {
5914             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5915         }
5916         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5917                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5918     }
5919
5920 #ifdef FEATURE_READYTORUN_COMPILER
5921     if (opts.IsReadyToRun())
5922     {
5923         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5924         {
5925             GenTreeCall* call =
5926                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5927
5928             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5929
5930             return call;
5931         }
5932
5933         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5934         if (IsTargetAbi(CORINFO_CORERT_ABI))
5935         {
5936             GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5937
5938             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5939                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5940         }
5941     }
5942 #endif
5943
5944     // Get the exact descriptor for the static callsite
5945     GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5946     if (exactTypeDesc == nullptr)
5947     { // compDonotInline()
5948         return nullptr;
5949     }
5950
5951     GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5952     if (exactMethodDesc == nullptr)
5953     { // compDonotInline()
5954         return nullptr;
5955     }
5956
5957     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5958
5959     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5960
5961     helpArgs = gtNewListNode(thisPtr, helpArgs);
5962
5963     // Call helper function.  This gets the target address of the final destination callsite.
5964
5965     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5966 }
5967
5968 //------------------------------------------------------------------------
5969 // impImportAndPushBox: build and import a value-type box
5970 //
5971 // Arguments:
5972 //   pResolvedToken - resolved token from the box operation
5973 //
5974 // Return Value:
5975 //   None.
5976 //
5977 // Side Effects:
5978 //   The value to be boxed is popped from the stack, and a tree for
5979 //   the boxed value is pushed. This method may create upstream
5980 //   statements, spill side effecting trees, and create new temps.
5981 //
5982 //   If importing an inlinee, we may also discover the inline must
5983 //   fail. If so there is no new value pushed on the stack. Callers
5984 //   should use CompDoNotInline after calling this method to see if
5985 //   ongoing importation should be aborted.
5986 //
5987 // Notes:
5988 //   Boxing of ref classes results in the same value as the value on
5989 //   the top of the stack, so is handled inline in impImportBlockCode
5990 //   for the CEE_BOX case. Only value or primitive type boxes make it
5991 //   here.
5992 //
5993 //   Boxing for nullable types is done via a helper call; boxing
5994 //   of other value types is expanded inline or handled via helper
5995 //   call, depending on the jit's codegen mode.
5996 //
5997 //   When the jit is operating in size and time constrained modes,
5998 //   using a helper call here can save jit time and code size. But it
5999 //   also may inhibit cleanup optimizations that could have also had a
6000 //   even greater benefit effect on code size and jit time. An optimal
6001 //   strategy may need to peek ahead and see if it is easy to tell how
6002 //   the box is being used. For now, we defer.
6003
6004 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6005 {
6006     // Spill any special side effects
6007     impSpillSpecialSideEff();
6008
6009     // Get get the expression to box from the stack.
6010     GenTree*             op1       = nullptr;
6011     GenTree*             op2       = nullptr;
6012     StackEntry           se        = impPopStack();
6013     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
6014     GenTree*             exprToBox = se.val;
6015
6016     // Look at what helper we should use.
6017     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
6018
6019     // Determine what expansion to prefer.
6020     //
6021     // In size/time/debuggable constrained modes, the helper call
6022     // expansion for box is generally smaller and is preferred, unless
6023     // the value to box is a struct that comes from a call. In that
6024     // case the call can construct its return value directly into the
6025     // box payload, saving possibly some up-front zeroing.
6026     //
6027     // Currently primitive type boxes always get inline expanded. We may
6028     // want to do the same for small structs if they don't come from
6029     // calls and don't have GC pointers, since explicitly copying such
6030     // structs is cheap.
6031     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
6032     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
6033     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
6034     bool expandInline    = canExpandInline && !optForSize;
6035
6036     if (expandInline)
6037     {
6038         JITDUMP(" inline allocate/copy sequence\n");
6039
6040         // we are doing 'normal' boxing.  This means that we can inline the box operation
6041         // Box(expr) gets morphed into
6042         // temp = new(clsHnd)
6043         // cpobj(temp+4, expr, clsHnd)
6044         // push temp
6045         // The code paths differ slightly below for structs and primitives because
6046         // "cpobj" differs in these cases.  In one case you get
6047         //    impAssignStructPtr(temp+4, expr, clsHnd)
6048         // and the other you get
6049         //    *(temp+4) = expr
6050
6051         if (opts.MinOpts() || opts.compDbgCode)
6052         {
6053             // For minopts/debug code, try and minimize the total number
6054             // of box temps by reusing an existing temp when possible.
6055             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
6056             {
6057                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
6058             }
6059         }
6060         else
6061         {
6062             // When optimizing, use a new temp for each box operation
6063             // since we then know the exact class of the box temp.
6064             impBoxTemp                       = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
6065             lvaTable[impBoxTemp].lvType      = TYP_REF;
6066             lvaTable[impBoxTemp].lvSingleDef = 1;
6067             JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
6068             const bool isExact = true;
6069             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
6070         }
6071
6072         // needs to stay in use until this box expression is appended
6073         // some other node.  We approximate this by keeping it alive until
6074         // the opcode stack becomes empty
6075         impBoxTempInUse = true;
6076
6077 #ifdef FEATURE_READYTORUN_COMPILER
6078         bool usingReadyToRunHelper = false;
6079
6080         if (opts.IsReadyToRun())
6081         {
6082             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
6083             usingReadyToRunHelper = (op1 != nullptr);
6084         }
6085
6086         if (!usingReadyToRunHelper)
6087 #endif
6088         {
6089             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
6090             // and the newfast call with a single call to a dynamic R2R cell that will:
6091             //      1) Load the context
6092             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
6093             //      3) Allocate and return the new object for boxing
6094             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
6095
6096             // Ensure that the value class is restored
6097             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6098             if (op2 == nullptr)
6099             {
6100                 // We must be backing out of an inline.
6101                 assert(compDonotInline());
6102                 return;
6103             }
6104
6105             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
6106                                     pResolvedToken->hClass, TYP_REF, op2);
6107         }
6108
6109         /* Remember that this basic block contains 'new' of an object, and so does this method */
6110         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
6111         optMethodFlags |= OMF_HAS_NEWOBJ;
6112
6113         GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
6114
6115         GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6116
6117         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6118         op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
6119         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
6120
6121         if (varTypeIsStruct(exprToBox))
6122         {
6123             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
6124             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
6125         }
6126         else
6127         {
6128             var_types lclTyp = exprToBox->TypeGet();
6129             if (lclTyp == TYP_BYREF)
6130             {
6131                 lclTyp = TYP_I_IMPL;
6132             }
6133             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
6134             if (impIsPrimitive(jitType))
6135             {
6136                 lclTyp = JITtype2varType(jitType);
6137             }
6138             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
6139                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
6140             var_types srcTyp = exprToBox->TypeGet();
6141             var_types dstTyp = lclTyp;
6142
6143             if (srcTyp != dstTyp)
6144             {
6145                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
6146                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
6147                 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
6148             }
6149             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
6150         }
6151
6152         // Spill eval stack to flush out any pending side effects.
6153         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
6154
6155         // Set up this copy as a second assignment.
6156         GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6157
6158         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6159
6160         // Record that this is a "box" node and keep track of the matching parts.
6161         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
6162
6163         // If it is a value class, mark the "box" node.  We can use this information
6164         // to optimise several cases:
6165         //    "box(x) == null" --> false
6166         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
6167         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
6168
6169         op1->gtFlags |= GTF_BOX_VALUE;
6170         assert(op1->IsBoxedValue());
6171         assert(asg->gtOper == GT_ASG);
6172     }
6173     else
6174     {
6175         // Don't optimize, just call the helper and be done with it.
6176         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
6177         assert(operCls != nullptr);
6178
6179         // Ensure that the value class is restored
6180         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6181         if (op2 == nullptr)
6182         {
6183             // We must be backing out of an inline.
6184             assert(compDonotInline());
6185             return;
6186         }
6187
6188         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
6189         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
6190     }
6191
6192     /* Push the result back on the stack, */
6193     /* even if clsHnd is a value class we want the TI_REF */
6194     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
6195     impPushOnStack(op1, tiRetVal);
6196 }
6197
6198 //------------------------------------------------------------------------
6199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
6200 //
6201 // Arguments:
6202 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6203 //                     by a call to CEEInfo::resolveToken().
6204 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
6205 //                by a call to CEEInfo::getCallInfo().
6206 //
6207 // Assumptions:
6208 //    The multi-dimensional array constructor arguments (array dimensions) are
6209 //    pushed on the IL stack on entry to this method.
6210 //
6211 // Notes:
6212 //    Multi-dimensional array constructors are imported as calls to a JIT
6213 //    helper, not as regular calls.
6214
6215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
6216 {
6217     GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
6218     if (classHandle == nullptr)
6219     { // compDonotInline()
6220         return;
6221     }
6222
6223     assert(pCallInfo->sig.numArgs);
6224
6225     GenTree*        node;
6226     GenTreeArgList* args;
6227
6228     //
6229     // There are two different JIT helpers that can be used to allocate
6230     // multi-dimensional arrays:
6231     //
6232     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
6233     //      This variant is deprecated. It should be eventually removed.
6234     //
6235     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
6236     //      pointer to block of int32s. This variant is more portable.
6237     //
6238     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
6239     // unconditionally would require ReadyToRun version bump.
6240     //
6241     CLANG_FORMAT_COMMENT_ANCHOR;
6242
6243     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
6244     {
6245
6246         // Reuse the temp used to pass the array dimensions to avoid bloating
6247         // the stack frame in case there are multiple calls to multi-dim array
6248         // constructors within a single method.
6249         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
6250         {
6251             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
6252             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
6253             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
6254         }
6255
6256         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
6257         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
6258         lvaTable[lvaNewObjArrayArgs].lvExactSize =
6259             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
6260
6261         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
6262         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
6263         // to one allocation at a time.
6264         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
6265
6266         //
6267         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
6268         //  - Array class handle
6269         //  - Number of dimension arguments
6270         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
6271         //
6272
6273         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6274         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
6275
6276         // Pop dimension arguments from the stack one at a time and store it
6277         // into lvaNewObjArrayArgs temp.
6278         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
6279         {
6280             GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
6281
6282             GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6283             dest          = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
6284             dest          = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
6285                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
6286             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
6287
6288             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
6289         }
6290
6291         args = gtNewArgList(node);
6292
6293         // pass number of arguments to the helper
6294         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6295
6296         args = gtNewListNode(classHandle, args);
6297
6298         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
6299     }
6300     else
6301     {
6302         //
6303         // The varargs helper needs the type and method handles as last
6304         // and  last-1 param (this is a cdecl call, so args will be
6305         // pushed in reverse order on the CPU stack)
6306         //
6307
6308         args = gtNewArgList(classHandle);
6309
6310         // pass number of arguments to the helper
6311         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6312
6313         unsigned argFlags = 0;
6314         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
6315
6316         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
6317
6318         // varargs, so we pop the arguments
6319         node->gtFlags |= GTF_CALL_POP_ARGS;
6320
6321 #ifdef DEBUG
6322         // At the present time we don't track Caller pop arguments
6323         // that have GC references in them
6324         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
6325         {
6326             assert(temp->Current()->gtType != TYP_REF);
6327         }
6328 #endif
6329     }
6330
6331     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6332     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
6333
6334     // Remember that this basic block contains 'new' of a md array
6335     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
6336
6337     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
6338 }
6339
6340 GenTree* Compiler::impTransformThis(GenTree*                thisPtr,
6341                                     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6342                                     CORINFO_THIS_TRANSFORM  transform)
6343 {
6344     switch (transform)
6345     {
6346         case CORINFO_DEREF_THIS:
6347         {
6348             GenTree* obj = thisPtr;
6349
6350             // This does a LDIND on the obj, which should be a byref. pointing to a ref
6351             impBashVarAddrsToI(obj);
6352             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
6353             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6354
6355             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
6356             // ldind could point anywhere, example a boxed class static int
6357             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
6358
6359             return obj;
6360         }
6361
6362         case CORINFO_BOX_THIS:
6363         {
6364             // Constraint calls where there might be no
6365             // unboxed entry point require us to implement the call via helper.
6366             // These only occur when a possible target of the call
6367             // may have inherited an implementation of an interface
6368             // method from System.Object or System.ValueType.  The EE does not provide us with
6369             // "unboxed" versions of these methods.
6370
6371             GenTree* obj = thisPtr;
6372
6373             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
6374             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
6375             obj->gtFlags |= GTF_EXCEPT;
6376
6377             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6378             var_types   objType = JITtype2varType(jitTyp);
6379             if (impIsPrimitive(jitTyp))
6380             {
6381                 if (obj->OperIsBlk())
6382                 {
6383                     obj->ChangeOperUnchecked(GT_IND);
6384
6385                     // Obj could point anywhere, example a boxed class static int
6386                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
6387                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
6388                 }
6389
6390                 obj->gtType = JITtype2varType(jitTyp);
6391                 assert(varTypeIsArithmetic(obj->gtType));
6392             }
6393
6394             // This pushes on the dereferenced byref
6395             // This is then used immediately to box.
6396             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
6397
6398             // This pops off the byref-to-a-value-type remaining on the stack and
6399             // replaces it with a boxed object.
6400             // This is then used as the object to the virtual call immediately below.
6401             impImportAndPushBox(pConstrainedResolvedToken);
6402             if (compDonotInline())
6403             {
6404                 return nullptr;
6405             }
6406
6407             obj = impPopStack().val;
6408             return obj;
6409         }
6410         case CORINFO_NO_THIS_TRANSFORM:
6411         default:
6412             return thisPtr;
6413     }
6414 }
6415
6416 //------------------------------------------------------------------------
6417 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
6418 //
6419 // Return Value:
6420 //    true if PInvoke inlining should be enabled in current method, false otherwise
6421 //
6422 // Notes:
6423 //    Checks a number of ambient conditions where we could pinvoke but choose not to
6424
6425 bool Compiler::impCanPInvokeInline()
6426 {
6427     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6428            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6429         ;
6430 }
6431
6432 //------------------------------------------------------------------------
6433 // impCanPInvokeInlineCallSite: basic legality checks using information
6434 // from a call to see if the call qualifies as an inline pinvoke.
6435 //
6436 // Arguments:
6437 //    block      - block contaning the call, or for inlinees, block
6438 //                 containing the call being inlined
6439 //
6440 // Return Value:
6441 //    true if this call can legally qualify as an inline pinvoke, false otherwise
6442 //
6443 // Notes:
6444 //    For runtimes that support exception handling interop there are
6445 //    restrictions on using inline pinvoke in handler regions.
6446 //
6447 //    * We have to disable pinvoke inlining inside of filters because
6448 //    in case the main execution (i.e. in the try block) is inside
6449 //    unmanaged code, we cannot reuse the inlined stub (we still need
6450 //    the original state until we are in the catch handler)
6451 //
6452 //    * We disable pinvoke inlining inside handlers since the GSCookie
6453 //    is in the inlined Frame (see
6454 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6455 //    this would not protect framelets/return-address of handlers.
6456 //
6457 //    These restrictions are currently also in place for CoreCLR but
6458 //    can be relaxed when coreclr/#8459 is addressed.
6459
6460 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6461 {
6462     if (block->hasHndIndex())
6463     {
6464         return false;
6465     }
6466
6467     // The remaining limitations do not apply to CoreRT
6468     if (IsTargetAbi(CORINFO_CORERT_ABI))
6469     {
6470         return true;
6471     }
6472
6473 #ifdef _TARGET_AMD64_
6474     // On x64, we disable pinvoke inlining inside of try regions.
6475     // Here is the comment from JIT64 explaining why:
6476     //
6477     //   [VSWhidbey: 611015] - because the jitted code links in the
6478     //   Frame (instead of the stub) we rely on the Frame not being
6479     //   'active' until inside the stub.  This normally happens by the
6480     //   stub setting the return address pointer in the Frame object
6481     //   inside the stub.  On a normal return, the return address
6482     //   pointer is zeroed out so the Frame can be safely re-used, but
6483     //   if an exception occurs, nobody zeros out the return address
6484     //   pointer.  Thus if we re-used the Frame object, it would go
6485     //   'active' as soon as we link it into the Frame chain.
6486     //
6487     //   Technically we only need to disable PInvoke inlining if we're
6488     //   in a handler or if we're in a try body with a catch or
6489     //   filter/except where other non-handler code in this method
6490     //   might run and try to re-use the dirty Frame object.
6491     //
6492     //   A desktop test case where this seems to matter is
6493     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6494     if (block->hasTryIndex())
6495     {
6496         return false;
6497     }
6498 #endif // _TARGET_AMD64_
6499
6500     return true;
6501 }
6502
6503 //------------------------------------------------------------------------
6504 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6505 // if it can be expressed as an inline pinvoke.
6506 //
6507 // Arguments:
6508 //    call       - tree for the call
6509 //    methHnd    - handle for the method being called (may be null)
6510 //    sig        - signature of the method being called
6511 //    mflags     - method flags for the method being called
6512 //    block      - block contaning the call, or for inlinees, block
6513 //                 containing the call being inlined
6514 //
6515 // Notes:
6516 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6517 //
6518 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6519 //   call passes a combination of legality and profitabilty checks.
6520 //
6521 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6522
6523 void Compiler::impCheckForPInvokeCall(
6524     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6525 {
6526     CorInfoUnmanagedCallConv unmanagedCallConv;
6527
6528     // If VM flagged it as Pinvoke, flag the call node accordingly
6529     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6530     {
6531         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6532     }
6533
6534     if (methHnd)
6535     {
6536         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6537         {
6538             return;
6539         }
6540
6541         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6542     }
6543     else
6544     {
6545         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6546         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6547         {
6548             // Used by the IL Stubs.
6549             callConv = CORINFO_CALLCONV_C;
6550         }
6551         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6552         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6553         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6554         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6555
6556         assert(!call->gtCallCookie);
6557     }
6558
6559     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6560         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6561     {
6562         return;
6563     }
6564     optNativeCallCount++;
6565
6566     if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
6567     {
6568         // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
6569         // converted to regular method calls earlier using convertPInvokeCalliToCall.
6570
6571         // PInvoke CALLI in IL stubs must be inlined
6572     }
6573     else
6574     {
6575         // Check legality
6576         if (!impCanPInvokeInlineCallSite(block))
6577         {
6578             return;
6579         }
6580
6581         // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
6582         // inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
6583         if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
6584         {
6585             if (!impCanPInvokeInline())
6586             {
6587                 return;
6588             }
6589
6590             // Size-speed tradeoff: don't use inline pinvoke at rarely
6591             // executed call sites.  The non-inline version is more
6592             // compact.
6593             if (block->isRunRarely())
6594             {
6595                 return;
6596             }
6597         }
6598
6599         // The expensive check should be last
6600         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6601         {
6602             return;
6603         }
6604     }
6605
6606     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6607
6608     call->gtFlags |= GTF_CALL_UNMANAGED;
6609     info.compCallUnmanaged++;
6610
6611     // AMD64 convention is same for native and managed
6612     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6613     {
6614         call->gtFlags |= GTF_CALL_POP_ARGS;
6615     }
6616
6617     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6618     {
6619         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6620     }
6621 }
6622
6623 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6624 {
6625     var_types callRetTyp = JITtype2varType(sig->retType);
6626
6627     /* The function pointer is on top of the stack - It may be a
6628      * complex expression. As it is evaluated after the args,
6629      * it may cause registered args to be spilled. Simply spill it.
6630      */
6631
6632     // Ignore this trivial case.
6633     if (impStackTop().val->gtOper != GT_LCL_VAR)
6634     {
6635         impSpillStackEntry(verCurrentState.esStackDepth - 1,
6636                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6637     }
6638
6639     /* Get the function pointer */
6640
6641     GenTree* fptr = impPopStack().val;
6642
6643     // The function pointer is typically a sized to match the target pointer size
6644     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6645     // See ILCodeStream::LowerOpcode
6646     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6647
6648 #ifdef DEBUG
6649     // This temporary must never be converted to a double in stress mode,
6650     // because that can introduce a call to the cast helper after the
6651     // arguments have already been evaluated.
6652
6653     if (fptr->OperGet() == GT_LCL_VAR)
6654     {
6655         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6656     }
6657 #endif
6658
6659     /* Create the call node */
6660
6661     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6662
6663     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6664
6665     return call;
6666 }
6667
6668 /*****************************************************************************/
6669
6670 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6671 {
6672     assert(call->gtFlags & GTF_CALL_UNMANAGED);
6673
6674     /* Since we push the arguments in reverse order (i.e. right -> left)
6675      * spill any side effects from the stack
6676      *
6677      * OBS: If there is only one side effect we do not need to spill it
6678      *      thus we have to spill all side-effects except last one
6679      */
6680
6681     unsigned lastLevelWithSideEffects = UINT_MAX;
6682
6683     unsigned argsToReverse = sig->numArgs;
6684
6685     // For "thiscall", the first argument goes in a register. Since its
6686     // order does not need to be changed, we do not need to spill it
6687
6688     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6689     {
6690         assert(argsToReverse);
6691         argsToReverse--;
6692     }
6693
6694 #ifndef _TARGET_X86_
6695     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6696     argsToReverse = 0;
6697 #endif
6698
6699     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6700     {
6701         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6702         {
6703             assert(lastLevelWithSideEffects == UINT_MAX);
6704
6705             impSpillStackEntry(level,
6706                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6707         }
6708         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6709         {
6710             if (lastLevelWithSideEffects != UINT_MAX)
6711             {
6712                 /* We had a previous side effect - must spill it */
6713                 impSpillStackEntry(lastLevelWithSideEffects,
6714                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6715
6716                 /* Record the level for the current side effect in case we will spill it */
6717                 lastLevelWithSideEffects = level;
6718             }
6719             else
6720             {
6721                 /* This is the first side effect encountered - record its level */
6722
6723                 lastLevelWithSideEffects = level;
6724             }
6725         }
6726     }
6727
6728     /* The argument list is now "clean" - no out-of-order side effects
6729      * Pop the argument list in reverse order */
6730
6731     GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6732
6733     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6734     {
6735         GenTree* thisPtr = args->Current();
6736         impBashVarAddrsToI(thisPtr);
6737         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6738     }
6739
6740     if (args)
6741     {
6742         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6743     }
6744 }
6745
6746 //------------------------------------------------------------------------
6747 // impInitClass: Build a node to initialize the class before accessing the
6748 //               field if necessary
6749 //
6750 // Arguments:
6751 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6752 //                     by a call to CEEInfo::resolveToken().
6753 //
6754 // Return Value: If needed, a pointer to the node that will perform the class
6755 //               initializtion.  Otherwise, nullptr.
6756 //
6757
6758 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6759 {
6760     CorInfoInitClassResult initClassResult =
6761         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6762
6763     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6764     {
6765         return nullptr;
6766     }
6767     BOOL runtimeLookup;
6768
6769     GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6770
6771     if (node == nullptr)
6772     {
6773         assert(compDonotInline());
6774         return nullptr;
6775     }
6776
6777     if (runtimeLookup)
6778     {
6779         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6780     }
6781     else
6782     {
6783         // Call the shared non gc static helper, as its the fastest
6784         node = fgGetSharedCCtor(pResolvedToken->hClass);
6785     }
6786
6787     return node;
6788 }
6789
6790 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6791 {
6792     GenTree* op1 = nullptr;
6793
6794     switch (lclTyp)
6795     {
6796         int     ival;
6797         __int64 lval;
6798         double  dval;
6799
6800         case TYP_BOOL:
6801             ival = *((bool*)fldAddr);
6802             goto IVAL_COMMON;
6803
6804         case TYP_BYTE:
6805             ival = *((signed char*)fldAddr);
6806             goto IVAL_COMMON;
6807
6808         case TYP_UBYTE:
6809             ival = *((unsigned char*)fldAddr);
6810             goto IVAL_COMMON;
6811
6812         case TYP_SHORT:
6813             ival = *((short*)fldAddr);
6814             goto IVAL_COMMON;
6815
6816         case TYP_USHORT:
6817             ival = *((unsigned short*)fldAddr);
6818             goto IVAL_COMMON;
6819
6820         case TYP_UINT:
6821         case TYP_INT:
6822             ival = *((int*)fldAddr);
6823         IVAL_COMMON:
6824             op1 = gtNewIconNode(ival);
6825             break;
6826
6827         case TYP_LONG:
6828         case TYP_ULONG:
6829             lval = *((__int64*)fldAddr);
6830             op1  = gtNewLconNode(lval);
6831             break;
6832
6833         case TYP_FLOAT:
6834             dval        = *((float*)fldAddr);
6835             op1         = gtNewDconNode(dval);
6836             op1->gtType = TYP_FLOAT;
6837             break;
6838
6839         case TYP_DOUBLE:
6840             dval = *((double*)fldAddr);
6841             op1  = gtNewDconNode(dval);
6842             break;
6843
6844         default:
6845             assert(!"Unexpected lclTyp");
6846             break;
6847     }
6848
6849     return op1;
6850 }
6851
6852 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6853                                               CORINFO_ACCESS_FLAGS    access,
6854                                               CORINFO_FIELD_INFO*     pFieldInfo,
6855                                               var_types               lclTyp)
6856 {
6857     GenTree* op1;
6858
6859     switch (pFieldInfo->fieldAccessor)
6860     {
6861         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6862         {
6863             assert(!compIsForInlining());
6864
6865             // We first call a special helper to get the statics base pointer
6866             op1 = impParentClassTokenToHandle(pResolvedToken);
6867
6868             // compIsForInlining() is false so we should not neve get NULL here
6869             assert(op1 != nullptr);
6870
6871             var_types type = TYP_BYREF;
6872
6873             switch (pFieldInfo->helper)
6874             {
6875                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6876                     type = TYP_I_IMPL;
6877                     break;
6878                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6879                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6880                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6881                     break;
6882                 default:
6883                     assert(!"unknown generic statics helper");
6884                     break;
6885             }
6886
6887             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6888
6889             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6890             op1              = gtNewOperNode(GT_ADD, type, op1,
6891                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6892         }
6893         break;
6894
6895         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6896         {
6897 #ifdef FEATURE_READYTORUN_COMPILER
6898             if (opts.IsReadyToRun())
6899             {
6900                 unsigned callFlags = 0;
6901
6902                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6903                 {
6904                     callFlags |= GTF_CALL_HOISTABLE;
6905                 }
6906
6907                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6908                 op1->gtFlags |= callFlags;
6909
6910                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6911             }
6912             else
6913 #endif
6914             {
6915                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6916             }
6917
6918             {
6919                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6920                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6921                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6922             }
6923             break;
6924         }
6925
6926         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6927         {
6928 #ifdef FEATURE_READYTORUN_COMPILER
6929             noway_assert(opts.IsReadyToRun());
6930             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6931             assert(kind.needsRuntimeLookup);
6932
6933             GenTree*        ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6934             GenTreeArgList* args    = gtNewArgList(ctxTree);
6935
6936             unsigned callFlags = 0;
6937
6938             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6939             {
6940                 callFlags |= GTF_CALL_HOISTABLE;
6941             }
6942             var_types type = TYP_BYREF;
6943             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6944             op1->gtFlags |= callFlags;
6945
6946             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6947             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6948             op1              = gtNewOperNode(GT_ADD, type, op1,
6949                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6950 #else
6951             unreached();
6952 #endif // FEATURE_READYTORUN_COMPILER
6953         }
6954         break;
6955
6956         default:
6957         {
6958             if (!(access & CORINFO_ACCESS_ADDRESS))
6959             {
6960                 // In future, it may be better to just create the right tree here instead of folding it later.
6961                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6962
6963                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6964                 {
6965                     op1->gtFlags |= GTF_FLD_INITCLASS;
6966                 }
6967
6968                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6969                 {
6970                     op1->gtType = TYP_REF; // points at boxed object
6971                     FieldSeqNode* firstElemFldSeq =
6972                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6973                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6974                                         new (this, GT_CNS_INT)
6975                                             GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6976
6977                     if (varTypeIsStruct(lclTyp))
6978                     {
6979                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6980                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6981                     }
6982                     else
6983                     {
6984                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6985                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6986                     }
6987                 }
6988
6989                 return op1;
6990             }
6991             else
6992             {
6993                 void** pFldAddr = nullptr;
6994                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6995
6996                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6997
6998                 /* Create the data member node */
6999                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
7000                                           fldSeq);
7001
7002                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
7003                 {
7004                     op1->gtFlags |= GTF_ICON_INITCLASS;
7005                 }
7006
7007                 if (pFldAddr != nullptr)
7008                 {
7009                     // There are two cases here, either the static is RVA based,
7010                     // in which case the type of the FIELD node is not a GC type
7011                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
7012                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
7013                     // because handles to statics now go into the large object heap
7014
7015                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
7016                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
7017                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
7018                 }
7019             }
7020             break;
7021         }
7022     }
7023
7024     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
7025     {
7026         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
7027
7028         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
7029
7030         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
7031                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
7032     }
7033
7034     if (!(access & CORINFO_ACCESS_ADDRESS))
7035     {
7036         if (varTypeIsStruct(lclTyp))
7037         {
7038             // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
7039             op1 = gtNewObjNode(pFieldInfo->structType, op1);
7040         }
7041         else
7042         {
7043             op1 = gtNewOperNode(GT_IND, lclTyp, op1);
7044             op1->gtFlags |= GTF_GLOB_REF;
7045         }
7046     }
7047
7048     return op1;
7049 }
7050
7051 // In general try to call this before most of the verification work.  Most people expect the access
7052 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
7053 // out if you can't access something we also think that you're unverifiable for other reasons.
7054 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7055 {
7056     if (result != CORINFO_ACCESS_ALLOWED)
7057     {
7058         impHandleAccessAllowedInternal(result, helperCall);
7059     }
7060 }
7061
7062 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7063 {
7064     switch (result)
7065     {
7066         case CORINFO_ACCESS_ALLOWED:
7067             break;
7068         case CORINFO_ACCESS_ILLEGAL:
7069             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
7070             // method is verifiable.  Otherwise, delay the exception to runtime.
7071             if (compIsForImportOnly())
7072             {
7073                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
7074             }
7075             else
7076             {
7077                 impInsertHelperCall(helperCall);
7078             }
7079             break;
7080         case CORINFO_ACCESS_RUNTIME_CHECK:
7081             impInsertHelperCall(helperCall);
7082             break;
7083     }
7084 }
7085
7086 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
7087 {
7088     // Construct the argument list
7089     GenTreeArgList* args = nullptr;
7090     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
7091     for (unsigned i = helperInfo->numArgs; i > 0; --i)
7092     {
7093         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
7094         GenTree*                  currentArg = nullptr;
7095         switch (helperArg.argType)
7096         {
7097             case CORINFO_HELPER_ARG_TYPE_Field:
7098                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
7099                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
7100                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
7101                 break;
7102             case CORINFO_HELPER_ARG_TYPE_Method:
7103                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
7104                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
7105                 break;
7106             case CORINFO_HELPER_ARG_TYPE_Class:
7107                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
7108                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
7109                 break;
7110             case CORINFO_HELPER_ARG_TYPE_Module:
7111                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
7112                 break;
7113             case CORINFO_HELPER_ARG_TYPE_Const:
7114                 currentArg = gtNewIconNode(helperArg.constant);
7115                 break;
7116             default:
7117                 NO_WAY("Illegal helper arg type");
7118         }
7119         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
7120     }
7121
7122     /* TODO-Review:
7123      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
7124      * Also, consider sticking this in the first basic block.
7125      */
7126     GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
7127     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
7128 }
7129
7130 // Checks whether the return types of caller and callee are compatible
7131 // so that callee can be tail called. Note that here we don't check
7132 // compatibility in IL Verifier sense, but on the lines of return type
7133 // sizes are equal and get returned in the same return register.
7134 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
7135                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
7136                                             var_types            calleeRetType,
7137                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
7138 {
7139     // Note that we can not relax this condition with genActualType() as the
7140     // calling convention dictates that the caller of a function with a small
7141     // typed return value is responsible for normalizing the return val.
7142     if (callerRetType == calleeRetType)
7143     {
7144         return true;
7145     }
7146
7147     // If the class handles are the same and not null, the return types are compatible.
7148     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
7149     {
7150         return true;
7151     }
7152
7153 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
7154     // Jit64 compat:
7155     if (callerRetType == TYP_VOID)
7156     {
7157         // This needs to be allowed to support the following IL pattern that Jit64 allows:
7158         //     tail.call
7159         //     pop
7160         //     ret
7161         //
7162         // Note that the above IL pattern is not valid as per IL verification rules.
7163         // Therefore, only full trust code can take advantage of this pattern.
7164         return true;
7165     }
7166
7167     // These checks return true if the return value type sizes are the same and
7168     // get returned in the same return register i.e. caller doesn't need to normalize
7169     // return value. Some of the tail calls permitted by below checks would have
7170     // been rejected by IL Verifier before we reached here.  Therefore, only full
7171     // trust code can make those tail calls.
7172     unsigned callerRetTypeSize = 0;
7173     unsigned calleeRetTypeSize = 0;
7174     bool     isCallerRetTypMBEnreg =
7175         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs);
7176     bool isCalleeRetTypMBEnreg =
7177         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs);
7178
7179     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
7180     {
7181         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
7182     }
7183 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
7184
7185     return false;
7186 }
7187
7188 // For prefixFlags
7189 enum
7190 {
7191     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
7192     PREFIX_TAILCALL_IMPLICIT =
7193         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
7194     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
7195     PREFIX_VOLATILE    = 0x00000100,
7196     PREFIX_UNALIGNED   = 0x00001000,
7197     PREFIX_CONSTRAINED = 0x00010000,
7198     PREFIX_READONLY    = 0x00100000
7199 };
7200
7201 /********************************************************************************
7202  *
7203  * Returns true if the current opcode and and the opcodes following it correspond
7204  * to a supported tail call IL pattern.
7205  *
7206  */
7207 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
7208                                       OPCODE      curOpcode,
7209                                       const BYTE* codeAddrOfNextOpcode,
7210                                       const BYTE* codeEnd,
7211                                       bool        isRecursive,
7212                                       bool*       isCallPopAndRet /* = nullptr */)
7213 {
7214     // Bail out if the current opcode is not a call.
7215     if (!impOpcodeIsCallOpcode(curOpcode))
7216     {
7217         return false;
7218     }
7219
7220 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7221     // If shared ret tail opt is not enabled, we will enable
7222     // it for recursive methods.
7223     if (isRecursive)
7224 #endif
7225     {
7226         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
7227         // sequence. Make sure we don't go past the end of the IL however.
7228         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
7229     }
7230
7231     // Bail out if there is no next opcode after call
7232     if (codeAddrOfNextOpcode >= codeEnd)
7233     {
7234         return false;
7235     }
7236
7237     // Scan the opcodes to look for the following IL patterns if either
7238     //   i) the call is not tail prefixed (i.e. implicit tail call) or
7239     //  ii) if tail prefixed, IL verification is not needed for the method.
7240     //
7241     // Only in the above two cases we can allow the below tail call patterns
7242     // violating ECMA spec.
7243     //
7244     // Pattern1:
7245     //       call
7246     //       nop*
7247     //       ret
7248     //
7249     // Pattern2:
7250     //       call
7251     //       nop*
7252     //       pop
7253     //       nop*
7254     //       ret
7255     int    cntPop = 0;
7256     OPCODE nextOpcode;
7257
7258 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7259     do
7260     {
7261         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7262         codeAddrOfNextOpcode += sizeof(__int8);
7263     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
7264              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
7265              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
7266                                                                                          // one pop seen so far.
7267 #else
7268     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7269 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7270
7271     if (isCallPopAndRet)
7272     {
7273         // Allow call+pop+ret to be tail call optimized if caller ret type is void
7274         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
7275     }
7276
7277 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7278     // Jit64 Compat:
7279     // Tail call IL pattern could be either of the following
7280     // 1) call/callvirt/calli + ret
7281     // 2) call/callvirt/calli + pop + ret in a method returning void.
7282     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
7283 #else
7284     return (nextOpcode == CEE_RET) && (cntPop == 0);
7285 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7286 }
7287
7288 /*****************************************************************************
7289  *
7290  * Determine whether the call could be converted to an implicit tail call
7291  *
7292  */
7293 bool Compiler::impIsImplicitTailCallCandidate(
7294     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
7295 {
7296
7297 #if FEATURE_TAILCALL_OPT
7298     if (!opts.compTailCallOpt)
7299     {
7300         return false;
7301     }
7302
7303     if (opts.compDbgCode || opts.MinOpts())
7304     {
7305         return false;
7306     }
7307
7308     // must not be tail prefixed
7309     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
7310     {
7311         return false;
7312     }
7313
7314 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7315     // the block containing call is marked as BBJ_RETURN
7316     // We allow shared ret tail call optimization on recursive calls even under
7317     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
7318     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
7319         return false;
7320 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
7321
7322     // must be call+ret or call+pop+ret
7323     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
7324     {
7325         return false;
7326     }
7327
7328     return true;
7329 #else
7330     return false;
7331 #endif // FEATURE_TAILCALL_OPT
7332 }
7333
7334 //------------------------------------------------------------------------
7335 // impImportCall: import a call-inspiring opcode
7336 //
7337 // Arguments:
7338 //    opcode                    - opcode that inspires the call
7339 //    pResolvedToken            - resolved token for the call target
7340 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
7341 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
7342 //    prefixFlags               - IL prefix flags for the call
7343 //    callInfo                  - EE supplied info for the call
7344 //    rawILOffset               - IL offset of the opcode
7345 //
7346 // Returns:
7347 //    Type of the call's return value.
7348 //    If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
7349 //    However we can't assert for this here yet because there are cases we miss. See issue #13272.
7350 //
7351 //
7352 // Notes:
7353 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
7354 //
7355 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
7356 //    uninitalized object.
7357
7358 #ifdef _PREFAST_
7359 #pragma warning(push)
7360 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
7361 #endif
7362
7363 var_types Compiler::impImportCall(OPCODE                  opcode,
7364                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
7365                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
7366                                   GenTree*                newobjThis,
7367                                   int                     prefixFlags,
7368                                   CORINFO_CALL_INFO*      callInfo,
7369                                   IL_OFFSET               rawILOffset)
7370 {
7371     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
7372
7373     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
7374     var_types              callRetTyp                     = TYP_COUNT;
7375     CORINFO_SIG_INFO*      sig                            = nullptr;
7376     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
7377     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
7378     unsigned               clsFlags                       = 0;
7379     unsigned               mflags                         = 0;
7380     unsigned               argFlags                       = 0;
7381     GenTree*               call                           = nullptr;
7382     GenTreeArgList*        args                           = nullptr;
7383     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
7384     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
7385     bool                   exactContextNeedsRuntimeLookup = false;
7386     bool                   canTailCall                    = true;
7387     const char*            szCanTailCallFailReason        = nullptr;
7388     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
7389     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
7390
7391     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
7392
7393     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
7394     // do that before tailcalls, but that is probably not the intended
7395     // semantic. So just disallow tailcalls from synchronized methods.
7396     // Also, popping arguments in a varargs function is more work and NYI
7397     // If we have a security object, we have to keep our frame around for callers
7398     // to see any imperative security.
7399     if (info.compFlags & CORINFO_FLG_SYNCH)
7400     {
7401         canTailCall             = false;
7402         szCanTailCallFailReason = "Caller is synchronized";
7403     }
7404 #if !FEATURE_FIXED_OUT_ARGS
7405     else if (info.compIsVarArgs)
7406     {
7407         canTailCall             = false;
7408         szCanTailCallFailReason = "Caller is varargs";
7409     }
7410 #endif // FEATURE_FIXED_OUT_ARGS
7411     else if (opts.compNeedSecurityCheck)
7412     {
7413         canTailCall             = false;
7414         szCanTailCallFailReason = "Caller requires a security check.";
7415     }
7416
7417     // We only need to cast the return value of pinvoke inlined calls that return small types
7418
7419     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
7420     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
7421     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
7422     // the time being that the callee might be compiled by the other JIT and thus the return
7423     // value will need to be widened by us (or not widened at all...)
7424
7425     // ReadyToRun code sticks with default calling convention that does not widen small return types.
7426
7427     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
7428     bool bIntrinsicImported = false;
7429
7430     CORINFO_SIG_INFO calliSig;
7431     GenTreeArgList*  extraArg = nullptr;
7432
7433     /*-------------------------------------------------------------------------
7434      * First create the call node
7435      */
7436
7437     if (opcode == CEE_CALLI)
7438     {
7439         if (IsTargetAbi(CORINFO_CORERT_ABI))
7440         {
7441             // See comment in impCheckForPInvokeCall
7442             BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7443             if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
7444             {
7445                 eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
7446                 return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
7447             }
7448         }
7449
7450         /* Get the call site sig */
7451         eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
7452
7453         callRetTyp = JITtype2varType(calliSig.retType);
7454
7455         call = impImportIndirectCall(&calliSig, ilOffset);
7456
7457         // We don't know the target method, so we have to infer the flags, or
7458         // assume the worst-case.
7459         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7460
7461 #ifdef DEBUG
7462         if (verbose)
7463         {
7464             unsigned structSize =
7465                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7466             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7467                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7468         }
7469 #endif
7470         // This should be checked in impImportBlockCode.
7471         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7472
7473         sig = &calliSig;
7474
7475 #ifdef DEBUG
7476         // We cannot lazily obtain the signature of a CALLI call because it has no method
7477         // handle that we can use, so we need to save its full call signature here.
7478         assert(call->gtCall.callSig == nullptr);
7479         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7480         *call->gtCall.callSig = calliSig;
7481 #endif // DEBUG
7482
7483         if (IsTargetAbi(CORINFO_CORERT_ABI))
7484         {
7485             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7486                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7487                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7488                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7489             if (managedCall)
7490             {
7491                 addFatPointerCandidate(call->AsCall());
7492             }
7493         }
7494     }
7495     else // (opcode != CEE_CALLI)
7496     {
7497         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7498
7499         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7500         // supply the instantiation parameters necessary to make direct calls to underlying
7501         // shared generic code, rather than calling through instantiating stubs.  If the
7502         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7503         // must indeed pass an instantiation parameter.
7504
7505         methHnd = callInfo->hMethod;
7506
7507         sig        = &(callInfo->sig);
7508         callRetTyp = JITtype2varType(sig->retType);
7509
7510         mflags = callInfo->methodFlags;
7511
7512 #ifdef DEBUG
7513         if (verbose)
7514         {
7515             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7516             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7517                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7518         }
7519 #endif
7520         if (compIsForInlining())
7521         {
7522             /* Does this call site have security boundary restrictions? */
7523
7524             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7525             {
7526                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7527                 return TYP_UNDEF;
7528             }
7529
7530             /* Does the inlinee need a security check token on the frame */
7531
7532             if (mflags & CORINFO_FLG_SECURITYCHECK)
7533             {
7534                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7535                 return TYP_UNDEF;
7536             }
7537
7538             /* Does the inlinee use StackCrawlMark */
7539
7540             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7541             {
7542                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7543                 return TYP_UNDEF;
7544             }
7545
7546             /* For now ignore delegate invoke */
7547
7548             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7549             {
7550                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7551                 return TYP_UNDEF;
7552             }
7553
7554             /* For now ignore varargs */
7555             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7556             {
7557                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7558                 return TYP_UNDEF;
7559             }
7560
7561             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7562             {
7563                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7564                 return TYP_UNDEF;
7565             }
7566
7567             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7568             {
7569                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7570                 return TYP_UNDEF;
7571             }
7572         }
7573
7574         clsHnd = pResolvedToken->hClass;
7575
7576         clsFlags = callInfo->classFlags;
7577
7578 #ifdef DEBUG
7579         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7580
7581         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7582         // These should be in mscorlib.h, and available through a JIT/EE interface call.
7583         const char* modName;
7584         const char* className;
7585         const char* methodName;
7586         if ((className = eeGetClassName(clsHnd)) != nullptr &&
7587             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7588             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7589         {
7590             return impImportJitTestLabelMark(sig->numArgs);
7591         }
7592 #endif // DEBUG
7593
7594         // <NICE> Factor this into getCallInfo </NICE>
7595         bool isSpecialIntrinsic = false;
7596         if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7597         {
7598             const bool isTail = canTailCall && (tailCall != 0);
7599
7600             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7601                                 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7602
7603             if (compDonotInline())
7604             {
7605                 return TYP_UNDEF;
7606             }
7607
7608             if (call != nullptr)
7609             {
7610                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7611                        (clsFlags & CORINFO_FLG_FINAL));
7612
7613 #ifdef FEATURE_READYTORUN_COMPILER
7614                 if (call->OperGet() == GT_INTRINSIC)
7615                 {
7616                     if (opts.IsReadyToRun())
7617                     {
7618                         noway_assert(callInfo->kind == CORINFO_CALL);
7619                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7620                     }
7621                     else
7622                     {
7623                         call->gtIntrinsic.gtEntryPoint.addr       = nullptr;
7624                         call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7625                     }
7626                 }
7627 #endif
7628
7629                 bIntrinsicImported = true;
7630                 goto DONE_CALL;
7631             }
7632         }
7633
7634 #ifdef FEATURE_SIMD
7635         if (featureSIMD)
7636         {
7637             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7638             if (call != nullptr)
7639             {
7640                 bIntrinsicImported = true;
7641                 goto DONE_CALL;
7642             }
7643         }
7644 #endif // FEATURE_SIMD
7645
7646         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7647         {
7648             NO_WAY("Virtual call to a function added via EnC is not supported");
7649         }
7650
7651         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7652             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7653             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7654         {
7655             BADCODE("Bad calling convention");
7656         }
7657
7658         //-------------------------------------------------------------------------
7659         //  Construct the call node
7660         //
7661         // Work out what sort of call we're making.
7662         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7663
7664         constraintCallThisTransform    = callInfo->thisTransform;
7665         exactContextHnd                = callInfo->contextHandle;
7666         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7667
7668         // Recursive call is treated as a loop to the begining of the method.
7669         if (gtIsRecursiveCall(methHnd))
7670         {
7671 #ifdef DEBUG
7672             if (verbose)
7673             {
7674                 JITDUMP("\nFound recursive call in the method. Mark " FMT_BB " to " FMT_BB
7675                         " as having a backward branch.\n",
7676                         fgFirstBB->bbNum, compCurBB->bbNum);
7677             }
7678 #endif
7679             fgMarkBackwardJump(fgFirstBB, compCurBB);
7680         }
7681
7682         switch (callInfo->kind)
7683         {
7684
7685             case CORINFO_VIRTUALCALL_STUB:
7686             {
7687                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7688                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7689                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7690                 {
7691
7692                     if (compIsForInlining())
7693                     {
7694                         // Don't import runtime lookups when inlining
7695                         // Inlining has to be aborted in such a case
7696                         /* XXX Fri 3/20/2009
7697                          * By the way, this would never succeed.  If the handle lookup is into the generic
7698                          * dictionary for a candidate, you'll generate different dictionary offsets and the
7699                          * inlined code will crash.
7700                          *
7701                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
7702                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
7703                          * failing here.
7704                          */
7705                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7706                         return TYP_UNDEF;
7707                     }
7708
7709                     GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7710                     assert(!compDonotInline());
7711
7712                     // This is the rough code to set up an indirect stub call
7713                     assert(stubAddr != nullptr);
7714
7715                     // The stubAddr may be a
7716                     // complex expression. As it is evaluated after the args,
7717                     // it may cause registered args to be spilled. Simply spill it.
7718
7719                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7720                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7721                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7722
7723                     // Create the actual call node
7724
7725                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7726                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7727
7728                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7729
7730                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7731                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7732
7733 #ifdef _TARGET_X86_
7734                     // No tailcalls allowed for these yet...
7735                     canTailCall             = false;
7736                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
7737 #endif
7738                 }
7739                 else
7740                 {
7741                     // ok, the stub is available at compile type.
7742
7743                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7744                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7745                     call->gtFlags |= GTF_CALL_VIRT_STUB;
7746                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7747                            callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7748                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7749                     {
7750                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7751                     }
7752                 }
7753
7754 #ifdef FEATURE_READYTORUN_COMPILER
7755                 if (opts.IsReadyToRun())
7756                 {
7757                     // Null check is sometimes needed for ready to run to handle
7758                     // non-virtual <-> virtual changes between versions
7759                     if (callInfo->nullInstanceCheck)
7760                     {
7761                         call->gtFlags |= GTF_CALL_NULLCHECK;
7762                     }
7763                 }
7764 #endif
7765
7766                 break;
7767             }
7768
7769             case CORINFO_VIRTUALCALL_VTABLE:
7770             {
7771                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7772                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7773                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7774                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7775                 break;
7776             }
7777
7778             case CORINFO_VIRTUALCALL_LDVIRTFTN:
7779             {
7780                 if (compIsForInlining())
7781                 {
7782                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7783                     return TYP_UNDEF;
7784                 }
7785
7786                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7787                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7788                 // OK, We've been told to call via LDVIRTFTN, so just
7789                 // take the call now....
7790
7791                 args = impPopList(sig->numArgs, sig);
7792
7793                 GenTree* thisPtr = impPopStack().val;
7794                 thisPtr          = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7795                 assert(thisPtr != nullptr);
7796
7797                 // Clone the (possibly transformed) "this" pointer
7798                 GenTree* thisPtrCopy;
7799                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7800                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7801
7802                 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7803                 assert(fptr != nullptr);
7804
7805                 thisPtr = nullptr; // can't reuse it
7806
7807                 // Now make an indirect call through the function pointer
7808
7809                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7810                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7811                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7812
7813                 // Create the actual call node
7814
7815                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7816                 call->gtCall.gtCallObjp = thisPtrCopy;
7817                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7818
7819                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7820                 {
7821                     // CoreRT generic virtual method: need to handle potential fat function pointers
7822                     addFatPointerCandidate(call->AsCall());
7823                 }
7824 #ifdef FEATURE_READYTORUN_COMPILER
7825                 if (opts.IsReadyToRun())
7826                 {
7827                     // Null check is needed for ready to run to handle
7828                     // non-virtual <-> virtual changes between versions
7829                     call->gtFlags |= GTF_CALL_NULLCHECK;
7830                 }
7831 #endif
7832
7833                 // Sine we are jumping over some code, check that its OK to skip that code
7834                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7835                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7836                 goto DONE;
7837             }
7838
7839             case CORINFO_CALL:
7840             {
7841                 // This is for a non-virtual, non-interface etc. call
7842                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7843
7844                 // We remove the nullcheck for the GetType call instrinsic.
7845                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7846                 // and instrinsics.
7847                 if (callInfo->nullInstanceCheck &&
7848                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7849                 {
7850                     call->gtFlags |= GTF_CALL_NULLCHECK;
7851                 }
7852
7853 #ifdef FEATURE_READYTORUN_COMPILER
7854                 if (opts.IsReadyToRun())
7855                 {
7856                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7857                 }
7858 #endif
7859                 break;
7860             }
7861
7862             case CORINFO_CALL_CODE_POINTER:
7863             {
7864                 // The EE has asked us to call by computing a code pointer and then doing an
7865                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7866
7867                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7868                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7869
7870                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7871                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7872
7873                 GenTree* fptr =
7874                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7875
7876                 if (compDonotInline())
7877                 {
7878                     return TYP_UNDEF;
7879                 }
7880
7881                 // Now make an indirect call through the function pointer
7882
7883                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7884                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7885                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7886
7887                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7888                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7889                 if (callInfo->nullInstanceCheck)
7890                 {
7891                     call->gtFlags |= GTF_CALL_NULLCHECK;
7892                 }
7893
7894                 break;
7895             }
7896
7897             default:
7898                 assert(!"unknown call kind");
7899                 break;
7900         }
7901
7902         //-------------------------------------------------------------------------
7903         // Set more flags
7904
7905         PREFIX_ASSUME(call != nullptr);
7906
7907         if (mflags & CORINFO_FLG_NOGCCHECK)
7908         {
7909             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7910         }
7911
7912         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7913         if (isSpecialIntrinsic)
7914         {
7915             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7916         }
7917     }
7918     assert(sig);
7919     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7920
7921     /* Some sanity checks */
7922
7923     // CALL_VIRT and NEWOBJ must have a THIS pointer
7924     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7925     // static bit and hasThis are negations of one another
7926     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7927     assert(call != nullptr);
7928
7929     /*-------------------------------------------------------------------------
7930      * Check special-cases etc
7931      */
7932
7933     /* Special case - Check if it is a call to Delegate.Invoke(). */
7934
7935     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7936     {
7937         assert(!compIsForInlining());
7938         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7939         assert(mflags & CORINFO_FLG_FINAL);
7940
7941         /* Set the delegate flag */
7942         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7943
7944         if (callInfo->secureDelegateInvoke)
7945         {
7946             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7947         }
7948
7949         if (opcode == CEE_CALLVIRT)
7950         {
7951             assert(mflags & CORINFO_FLG_FINAL);
7952
7953             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7954             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7955             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7956         }
7957     }
7958
7959     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7960     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7961     if (varTypeIsStruct(callRetTyp))
7962     {
7963         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7964         call->gtType = callRetTyp;
7965     }
7966
7967 #if !FEATURE_VARARG
7968     /* Check for varargs */
7969     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7970         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7971     {
7972         BADCODE("Varargs not supported.");
7973     }
7974 #endif // !FEATURE_VARARG
7975
7976 #ifdef UNIX_X86_ABI
7977     if (call->gtCall.callSig == nullptr)
7978     {
7979         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7980         *call->gtCall.callSig = *sig;
7981     }
7982 #endif // UNIX_X86_ABI
7983
7984     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7985         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7986     {
7987         assert(!compIsForInlining());
7988
7989         /* Set the right flags */
7990
7991         call->gtFlags |= GTF_CALL_POP_ARGS;
7992         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7993
7994         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7995            will be expecting to pop a certain number of arguments, but if we
7996            tailcall to a function with a different number of arguments, we
7997            are hosed. There are ways around this (caller remembers esp value,
7998            varargs is not caller-pop, etc), but not worth it. */
7999         CLANG_FORMAT_COMMENT_ANCHOR;
8000
8001 #ifdef _TARGET_X86_
8002         if (canTailCall)
8003         {
8004             canTailCall             = false;
8005             szCanTailCallFailReason = "Callee is varargs";
8006         }
8007 #endif
8008
8009         /* Get the total number of arguments - this is already correct
8010          * for CALLI - for methods we have to get it from the call site */
8011
8012         if (opcode != CEE_CALLI)
8013         {
8014 #ifdef DEBUG
8015             unsigned numArgsDef = sig->numArgs;
8016 #endif
8017             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8018
8019 #ifdef DEBUG
8020             // We cannot lazily obtain the signature of a vararg call because using its method
8021             // handle will give us only the declared argument list, not the full argument list.
8022             assert(call->gtCall.callSig == nullptr);
8023             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8024             *call->gtCall.callSig = *sig;
8025 #endif
8026
8027             // For vararg calls we must be sure to load the return type of the
8028             // method actually being called, as well as the return types of the
8029             // specified in the vararg signature. With type equivalency, these types
8030             // may not be the same.
8031             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
8032             {
8033                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
8034                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
8035                     sig->retType != CORINFO_TYPE_VAR)
8036                 {
8037                     // Make sure that all valuetypes (including enums) that we push are loaded.
8038                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
8039                     // all valuetypes in the method signature are already loaded.
8040                     // We need to be able to find the size of the valuetypes, but we cannot
8041                     // do a class-load from within GC.
8042                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
8043                 }
8044             }
8045
8046             assert(numArgsDef <= sig->numArgs);
8047         }
8048
8049         /* We will have "cookie" as the last argument but we cannot push
8050          * it on the operand stack because we may overflow, so we append it
8051          * to the arg list next after we pop them */
8052     }
8053
8054     if (mflags & CORINFO_FLG_SECURITYCHECK)
8055     {
8056         assert(!compIsForInlining());
8057
8058         // Need security prolog/epilog callouts when there is
8059         // imperative security in the method. This is to give security a
8060         // chance to do any setup in the prolog and cleanup in the epilog if needed.
8061
8062         if (compIsForInlining())
8063         {
8064             // Cannot handle this if the method being imported is an inlinee by itself.
8065             // Because inlinee method does not have its own frame.
8066
8067             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
8068             return TYP_UNDEF;
8069         }
8070         else
8071         {
8072             tiSecurityCalloutNeeded = true;
8073
8074             // If the current method calls a method which needs a security check,
8075             // (i.e. the method being compiled has imperative security)
8076             // we need to reserve a slot for the security object in
8077             // the current method's stack frame
8078             opts.compNeedSecurityCheck = true;
8079         }
8080     }
8081
8082     //--------------------------- Inline NDirect ------------------------------
8083
8084     // For inline cases we technically should look at both the current
8085     // block and the call site block (or just the latter if we've
8086     // fused the EH trees). However the block-related checks pertain to
8087     // EH and we currently won't inline a method with EH. So for
8088     // inlinees, just checking the call site block is sufficient.
8089     {
8090         // New lexical block here to avoid compilation errors because of GOTOs.
8091         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
8092         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
8093     }
8094
8095     if (call->gtFlags & GTF_CALL_UNMANAGED)
8096     {
8097         // We set up the unmanaged call by linking the frame, disabling GC, etc
8098         // This needs to be cleaned up on return
8099         if (canTailCall)
8100         {
8101             canTailCall             = false;
8102             szCanTailCallFailReason = "Callee is native";
8103         }
8104
8105         checkForSmallType = true;
8106
8107         impPopArgsForUnmanagedCall(call, sig);
8108
8109         goto DONE;
8110     }
8111     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
8112                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
8113                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
8114                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
8115     {
8116         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
8117         {
8118             // Normally this only happens with inlining.
8119             // However, a generic method (or type) being NGENd into another module
8120             // can run into this issue as well.  There's not an easy fall-back for NGEN
8121             // so instead we fallback to JIT.
8122             if (compIsForInlining())
8123             {
8124                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
8125             }
8126             else
8127             {
8128                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
8129             }
8130
8131             return TYP_UNDEF;
8132         }
8133
8134         GenTree* cookie = eeGetPInvokeCookie(sig);
8135
8136         // This cookie is required to be either a simple GT_CNS_INT or
8137         // an indirection of a GT_CNS_INT
8138         //
8139         GenTree* cookieConst = cookie;
8140         if (cookie->gtOper == GT_IND)
8141         {
8142             cookieConst = cookie->gtOp.gtOp1;
8143         }
8144         assert(cookieConst->gtOper == GT_CNS_INT);
8145
8146         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
8147         // we won't allow this tree to participate in any CSE logic
8148         //
8149         cookie->gtFlags |= GTF_DONT_CSE;
8150         cookieConst->gtFlags |= GTF_DONT_CSE;
8151
8152         call->gtCall.gtCallCookie = cookie;
8153
8154         if (canTailCall)
8155         {
8156             canTailCall             = false;
8157             szCanTailCallFailReason = "PInvoke calli";
8158         }
8159     }
8160
8161     /*-------------------------------------------------------------------------
8162      * Create the argument list
8163      */
8164
8165     //-------------------------------------------------------------------------
8166     // Special case - for varargs we have an implicit last argument
8167
8168     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
8169     {
8170         assert(!compIsForInlining());
8171
8172         void *varCookie, *pVarCookie;
8173         if (!info.compCompHnd->canGetVarArgsHandle(sig))
8174         {
8175             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
8176             return TYP_UNDEF;
8177         }
8178
8179         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
8180         assert((!varCookie) != (!pVarCookie));
8181         GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
8182
8183         assert(extraArg == nullptr);
8184         extraArg = gtNewArgList(cookie);
8185     }
8186
8187     //-------------------------------------------------------------------------
8188     // Extra arg for shared generic code and array methods
8189     //
8190     // Extra argument containing instantiation information is passed in the
8191     // following circumstances:
8192     // (a) To the "Address" method on array classes; the extra parameter is
8193     //     the array's type handle (a TypeDesc)
8194     // (b) To shared-code instance methods in generic structs; the extra parameter
8195     //     is the struct's type handle (a vtable ptr)
8196     // (c) To shared-code per-instantiation non-generic static methods in generic
8197     //     classes and structs; the extra parameter is the type handle
8198     // (d) To shared-code generic methods; the extra parameter is an
8199     //     exact-instantiation MethodDesc
8200     //
8201     // We also set the exact type context associated with the call so we can
8202     // inline the call correctly later on.
8203
8204     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
8205     {
8206         assert(call->gtCall.gtCallType == CT_USER_FUNC);
8207         if (clsHnd == nullptr)
8208         {
8209             NO_WAY("CALLI on parameterized type");
8210         }
8211
8212         assert(opcode != CEE_CALLI);
8213
8214         GenTree* instParam;
8215         BOOL     runtimeLookup;
8216
8217         // Instantiated generic method
8218         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
8219         {
8220             CORINFO_METHOD_HANDLE exactMethodHandle =
8221                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8222
8223             if (!exactContextNeedsRuntimeLookup)
8224             {
8225 #ifdef FEATURE_READYTORUN_COMPILER
8226                 if (opts.IsReadyToRun())
8227                 {
8228                     instParam =
8229                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
8230                     if (instParam == nullptr)
8231                     {
8232                         assert(compDonotInline());
8233                         return TYP_UNDEF;
8234                     }
8235                 }
8236                 else
8237 #endif
8238                 {
8239                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
8240                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
8241                 }
8242             }
8243             else
8244             {
8245                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8246                 if (instParam == nullptr)
8247                 {
8248                     assert(compDonotInline());
8249                     return TYP_UNDEF;
8250                 }
8251             }
8252         }
8253
8254         // otherwise must be an instance method in a generic struct,
8255         // a static method in a generic type, or a runtime-generated array method
8256         else
8257         {
8258             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
8259             CORINFO_CLASS_HANDLE exactClassHandle =
8260                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8261
8262             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
8263             {
8264                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
8265                 return TYP_UNDEF;
8266             }
8267
8268             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
8269             {
8270                 // We indicate "readonly" to the Address operation by using a null
8271                 // instParam.
8272                 instParam = gtNewIconNode(0, TYP_REF);
8273             }
8274             else if (!exactContextNeedsRuntimeLookup)
8275             {
8276 #ifdef FEATURE_READYTORUN_COMPILER
8277                 if (opts.IsReadyToRun())
8278                 {
8279                     instParam =
8280                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
8281                     if (instParam == nullptr)
8282                     {
8283                         assert(compDonotInline());
8284                         return TYP_UNDEF;
8285                     }
8286                 }
8287                 else
8288 #endif
8289                 {
8290                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
8291                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
8292                 }
8293             }
8294             else
8295             {
8296                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
8297                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
8298                 // because pResolvedToken is an interface method and interface types make a poor generic context.
8299                 if (pConstrainedResolvedToken)
8300                 {
8301                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
8302                                                  FALSE /* importParent */);
8303                 }
8304                 else
8305                 {
8306                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8307                 }
8308
8309                 if (instParam == nullptr)
8310                 {
8311                     assert(compDonotInline());
8312                     return TYP_UNDEF;
8313                 }
8314             }
8315         }
8316
8317         assert(extraArg == nullptr);
8318         extraArg = gtNewArgList(instParam);
8319     }
8320
8321     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
8322     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
8323     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
8324     // exactContextHnd is not currently required when inlining shared generic code into shared
8325     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
8326     // (e.g. anything marked needsRuntimeLookup)
8327     if (exactContextNeedsRuntimeLookup)
8328     {
8329         exactContextHnd = nullptr;
8330     }
8331
8332     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
8333     {
8334         // Only verifiable cases are supported.
8335         // dup; ldvirtftn; newobj; or ldftn; newobj.
8336         // IL test could contain unverifiable sequence, in this case optimization should not be done.
8337         if (impStackHeight() > 0)
8338         {
8339             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
8340             if (delegateTypeInfo.IsToken())
8341             {
8342                 ldftnToken = delegateTypeInfo.GetToken();
8343             }
8344         }
8345     }
8346
8347     //-------------------------------------------------------------------------
8348     // The main group of arguments
8349
8350     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
8351
8352     if (args)
8353     {
8354         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
8355     }
8356
8357     //-------------------------------------------------------------------------
8358     // The "this" pointer
8359
8360     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
8361     {
8362         GenTree* obj;
8363
8364         if (opcode == CEE_NEWOBJ)
8365         {
8366             obj = newobjThis;
8367         }
8368         else
8369         {
8370             obj = impPopStack().val;
8371             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
8372             if (compDonotInline())
8373             {
8374                 return TYP_UNDEF;
8375             }
8376         }
8377
8378         // Store the "this" value in the call
8379         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
8380         call->gtCall.gtCallObjp = obj;
8381
8382         // Is this a virtual or interface call?
8383         if (call->gtCall.IsVirtual())
8384         {
8385             // only true object pointers can be virtual
8386             assert(obj->gtType == TYP_REF);
8387
8388             // See if we can devirtualize.
8389             impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
8390                                 &exactContextHnd);
8391         }
8392
8393         if (impIsThis(obj))
8394         {
8395             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
8396         }
8397     }
8398
8399     //-------------------------------------------------------------------------
8400     // The "this" pointer for "newobj"
8401
8402     if (opcode == CEE_NEWOBJ)
8403     {
8404         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
8405         {
8406             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
8407             // This is a 'new' of a variable sized object, wher
8408             // the constructor is to return the object.  In this case
8409             // the constructor claims to return VOID but we know it
8410             // actually returns the new object
8411             assert(callRetTyp == TYP_VOID);
8412             callRetTyp   = TYP_REF;
8413             call->gtType = TYP_REF;
8414             impSpillSpecialSideEff();
8415
8416             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
8417         }
8418         else
8419         {
8420             if (clsFlags & CORINFO_FLG_DELEGATE)
8421             {
8422                 // New inliner morph it in impImportCall.
8423                 // This will allow us to inline the call to the delegate constructor.
8424                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
8425             }
8426
8427             if (!bIntrinsicImported)
8428             {
8429
8430 #if defined(DEBUG) || defined(INLINE_DATA)
8431
8432                 // Keep track of the raw IL offset of the call
8433                 call->gtCall.gtRawILOffset = rawILOffset;
8434
8435 #endif // defined(DEBUG) || defined(INLINE_DATA)
8436
8437                 // Is it an inline candidate?
8438                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8439             }
8440
8441             // append the call node.
8442             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8443
8444             // Now push the value of the 'new onto the stack
8445
8446             // This is a 'new' of a non-variable sized object.
8447             // Append the new node (op1) to the statement list,
8448             // and then push the local holding the value of this
8449             // new instruction on the stack.
8450
8451             if (clsFlags & CORINFO_FLG_VALUECLASS)
8452             {
8453                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8454
8455                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8456                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8457             }
8458             else
8459             {
8460                 if (newobjThis->gtOper == GT_COMMA)
8461                 {
8462                     // In coreclr the callout can be inserted even if verification is disabled
8463                     // so we cannot rely on tiVerificationNeeded alone
8464
8465                     // We must have inserted the callout. Get the real newobj.
8466                     newobjThis = newobjThis->gtOp.gtOp2;
8467                 }
8468
8469                 assert(newobjThis->gtOper == GT_LCL_VAR);
8470                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8471             }
8472         }
8473         return callRetTyp;
8474     }
8475
8476 DONE:
8477
8478     if (tailCall)
8479     {
8480         // This check cannot be performed for implicit tail calls for the reason
8481         // that impIsImplicitTailCallCandidate() is not checking whether return
8482         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8483         // As a result it is possible that in the following case, we find that
8484         // the type stack is non-empty if Callee() is considered for implicit
8485         // tail calling.
8486         //      int Caller(..) { .... void Callee(); ret val; ... }
8487         //
8488         // Note that we cannot check return type compatibility before ImpImportCall()
8489         // as we don't have required info or need to duplicate some of the logic of
8490         // ImpImportCall().
8491         //
8492         // For implicit tail calls, we perform this check after return types are
8493         // known to be compatible.
8494         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8495         {
8496             BADCODE("Stack should be empty after tailcall");
8497         }
8498
8499         // Note that we can not relax this condition with genActualType() as
8500         // the calling convention dictates that the caller of a function with
8501         // a small-typed return value is responsible for normalizing the return val
8502
8503         if (canTailCall &&
8504             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8505                                           callInfo->sig.retTypeClass))
8506         {
8507             canTailCall             = false;
8508             szCanTailCallFailReason = "Return types are not tail call compatible";
8509         }
8510
8511         // Stack empty check for implicit tail calls.
8512         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8513         {
8514 #ifdef _TARGET_AMD64_
8515             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
8516             // in JIT64, not an InvalidProgramException.
8517             Verify(false, "Stack should be empty after tailcall");
8518 #else  // _TARGET_64BIT_
8519             BADCODE("Stack should be empty after tailcall");
8520 #endif //!_TARGET_64BIT_
8521         }
8522
8523         // assert(compCurBB is not a catch, finally or filter block);
8524         // assert(compCurBB is not a try block protected by a finally block);
8525
8526         // Check for permission to tailcall
8527         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8528
8529         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8530
8531         if (canTailCall)
8532         {
8533             // True virtual or indirect calls, shouldn't pass in a callee handle.
8534             CORINFO_METHOD_HANDLE exactCalleeHnd =
8535                 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8536             GenTree* thisArg = call->gtCall.gtCallObjp;
8537
8538             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8539             {
8540                 canTailCall = true;
8541                 if (explicitTailCall)
8542                 {
8543                     // In case of explicit tail calls, mark it so that it is not considered
8544                     // for in-lining.
8545                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8546 #ifdef DEBUG
8547                     if (verbose)
8548                     {
8549                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8550                         printTreeID(call);
8551                         printf("\n");
8552                     }
8553 #endif
8554                 }
8555                 else
8556                 {
8557 #if FEATURE_TAILCALL_OPT
8558                     // Must be an implicit tail call.
8559                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8560
8561                     // It is possible that a call node is both an inline candidate and marked
8562                     // for opportunistic tail calling.  In-lining happens before morhphing of
8563                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
8564                     // reason, it will survive to the morphing stage at which point it will be
8565                     // transformed into a tail call after performing additional checks.
8566
8567                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8568 #ifdef DEBUG
8569                     if (verbose)
8570                     {
8571                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8572                         printTreeID(call);
8573                         printf("\n");
8574                     }
8575 #endif
8576
8577 #else //! FEATURE_TAILCALL_OPT
8578                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8579
8580 #endif // FEATURE_TAILCALL_OPT
8581                 }
8582
8583                 // we can't report success just yet...
8584             }
8585             else
8586             {
8587                 canTailCall = false;
8588 // canTailCall reported its reasons already
8589 #ifdef DEBUG
8590                 if (verbose)
8591                 {
8592                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8593                     printTreeID(call);
8594                     printf("\n");
8595                 }
8596 #endif
8597             }
8598         }
8599         else
8600         {
8601             // If this assert fires it means that canTailCall was set to false without setting a reason!
8602             assert(szCanTailCallFailReason != nullptr);
8603
8604 #ifdef DEBUG
8605             if (verbose)
8606             {
8607                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8608                 printTreeID(call);
8609                 printf(": %s\n", szCanTailCallFailReason);
8610             }
8611 #endif
8612             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8613                                                      szCanTailCallFailReason);
8614         }
8615     }
8616
8617     // Note: we assume that small return types are already normalized by the managed callee
8618     // or by the pinvoke stub for calls to unmanaged code.
8619
8620     if (!bIntrinsicImported)
8621     {
8622         //
8623         // Things needed to be checked when bIntrinsicImported is false.
8624         //
8625
8626         assert(call->gtOper == GT_CALL);
8627         assert(sig != nullptr);
8628
8629         // Tail calls require us to save the call site's sig info so we can obtain an argument
8630         // copying thunk from the EE later on.
8631         if (call->gtCall.callSig == nullptr)
8632         {
8633             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8634             *call->gtCall.callSig = *sig;
8635         }
8636
8637         if (compIsForInlining() && opcode == CEE_CALLVIRT)
8638         {
8639             GenTree* callObj = call->gtCall.gtCallObjp;
8640             assert(callObj != nullptr);
8641
8642             if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8643                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8644                                                                    impInlineInfo->inlArgInfo))
8645             {
8646                 impInlineInfo->thisDereferencedFirst = true;
8647             }
8648         }
8649
8650 #if defined(DEBUG) || defined(INLINE_DATA)
8651
8652         // Keep track of the raw IL offset of the call
8653         call->gtCall.gtRawILOffset = rawILOffset;
8654
8655 #endif // defined(DEBUG) || defined(INLINE_DATA)
8656
8657         // Is it an inline candidate?
8658         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8659     }
8660
8661 DONE_CALL:
8662     // Push or append the result of the call
8663     if (callRetTyp == TYP_VOID)
8664     {
8665         if (opcode == CEE_NEWOBJ)
8666         {
8667             // we actually did push something, so don't spill the thing we just pushed.
8668             assert(verCurrentState.esStackDepth > 0);
8669             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8670         }
8671         else
8672         {
8673             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8674         }
8675     }
8676     else
8677     {
8678         impSpillSpecialSideEff();
8679
8680         if (clsFlags & CORINFO_FLG_ARRAY)
8681         {
8682             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8683         }
8684
8685         // Find the return type used for verification by interpreting the method signature.
8686         // NB: we are clobbering the already established sig.
8687         if (tiVerificationNeeded)
8688         {
8689             // Actually, we never get the sig for the original method.
8690             sig = &(callInfo->verSig);
8691         }
8692
8693         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8694         tiRetVal.NormaliseForStack();
8695
8696         // The CEE_READONLY prefix modifies the verification semantics of an Address
8697         // operation on an array type.
8698         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8699         {
8700             tiRetVal.SetIsReadonlyByRef();
8701         }
8702
8703         if (tiVerificationNeeded)
8704         {
8705             // We assume all calls return permanent home byrefs. If they
8706             // didn't they wouldn't be verifiable. This is also covering
8707             // the Address() helper for multidimensional arrays.
8708             if (tiRetVal.IsByRef())
8709             {
8710                 tiRetVal.SetIsPermanentHomeByRef();
8711             }
8712         }
8713
8714         if (call->IsCall())
8715         {
8716             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8717
8718             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8719             if (varTypeIsStruct(callRetTyp))
8720             {
8721                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8722             }
8723
8724             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8725             {
8726                 assert(opts.OptEnabled(CLFLG_INLINING));
8727                 assert(!fatPointerCandidate); // We should not try to inline calli.
8728
8729                 // Make the call its own tree (spill the stack if needed).
8730                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8731
8732                 // TODO: Still using the widened type.
8733                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8734             }
8735             else
8736             {
8737                 if (fatPointerCandidate)
8738                 {
8739                     // fatPointer candidates should be in statements of the form call() or var = call().
8740                     // Such form allows to find statements with fat calls without walking through whole trees
8741                     // and removes problems with cutting trees.
8742                     assert(!bIntrinsicImported);
8743                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
8744                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8745                     {
8746                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
8747                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
8748                         varDsc->lvVerTypeInfo = tiRetVal;
8749                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8750                         // impAssignTempGen can change src arg list and return type for call that returns struct.
8751                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8752                         call           = gtNewLclvNode(calliSlot, type);
8753                     }
8754                 }
8755
8756                 // For non-candidates we must also spill, since we
8757                 // might have locals live on the eval stack that this
8758                 // call can modify.
8759                 //
8760                 // Suppress this for certain well-known call targets
8761                 // that we know won't modify locals, eg calls that are
8762                 // recognized in gtCanOptimizeTypeEquality. Otherwise
8763                 // we may break key fragile pattern matches later on.
8764                 bool spillStack = true;
8765                 if (call->IsCall())
8766                 {
8767                     GenTreeCall* callNode = call->AsCall();
8768                     if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
8769                                                                 gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
8770                     {
8771                         spillStack = false;
8772                     }
8773                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8774                     {
8775                         spillStack = false;
8776                     }
8777                 }
8778
8779                 if (spillStack)
8780                 {
8781                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8782                 }
8783             }
8784         }
8785
8786         if (!bIntrinsicImported)
8787         {
8788             //-------------------------------------------------------------------------
8789             //
8790             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8791                 before returning.
8792                 However, we need to normalize small type values returned by unmanaged
8793                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8794                 if we use the shorter inlined pinvoke stub. */
8795
8796             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8797             {
8798                 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8799             }
8800         }
8801
8802         impPushOnStack(call, tiRetVal);
8803     }
8804
8805     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8806     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8807     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8808     //  callInfoCache.uncacheCallInfo();
8809
8810     return callRetTyp;
8811 }
8812 #ifdef _PREFAST_
8813 #pragma warning(pop)
8814 #endif
8815
8816 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8817 {
8818     CorInfoType corType = methInfo->args.retType;
8819
8820     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8821     {
8822         // We have some kind of STRUCT being returned
8823
8824         structPassingKind howToReturnStruct = SPK_Unknown;
8825
8826         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8827
8828         if (howToReturnStruct == SPK_ByReference)
8829         {
8830             return true;
8831         }
8832     }
8833
8834     return false;
8835 }
8836
8837 #ifdef DEBUG
8838 //
8839 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8840 {
8841     TestLabelAndNum tlAndN;
8842     if (numArgs == 2)
8843     {
8844         tlAndN.m_num  = 0;
8845         StackEntry se = impPopStack();
8846         assert(se.seTypeInfo.GetType() == TI_INT);
8847         GenTree* val = se.val;
8848         assert(val->IsCnsIntOrI());
8849         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8850     }
8851     else if (numArgs == 3)
8852     {
8853         StackEntry se = impPopStack();
8854         assert(se.seTypeInfo.GetType() == TI_INT);
8855         GenTree* val = se.val;
8856         assert(val->IsCnsIntOrI());
8857         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8858         se           = impPopStack();
8859         assert(se.seTypeInfo.GetType() == TI_INT);
8860         val = se.val;
8861         assert(val->IsCnsIntOrI());
8862         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8863     }
8864     else
8865     {
8866         assert(false);
8867     }
8868
8869     StackEntry expSe = impPopStack();
8870     GenTree*   node  = expSe.val;
8871
8872     // There are a small number of special cases, where we actually put the annotation on a subnode.
8873     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8874     {
8875         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8876         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8877         // offset within the the static field block whose address is returned by the helper call.
8878         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8879         GenTree* helperCall = nullptr;
8880         assert(node->OperGet() == GT_IND);
8881         tlAndN.m_num -= 100;
8882         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8883         GetNodeTestData()->Remove(node);
8884     }
8885     else
8886     {
8887         GetNodeTestData()->Set(node, tlAndN);
8888     }
8889
8890     impPushOnStack(node, expSe.seTypeInfo);
8891     return node->TypeGet();
8892 }
8893 #endif // DEBUG
8894
8895 //-----------------------------------------------------------------------------------
8896 //  impFixupCallStructReturn: For a call node that returns a struct type either
8897 //  adjust the return type to an enregisterable type, or set the flag to indicate
8898 //  struct return via retbuf arg.
8899 //
8900 //  Arguments:
8901 //    call       -  GT_CALL GenTree node
8902 //    retClsHnd  -  Class handle of return type of the call
8903 //
8904 //  Return Value:
8905 //    Returns new GenTree node after fixing struct return of call node
8906 //
8907 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8908 {
8909     if (!varTypeIsStruct(call))
8910     {
8911         return call;
8912     }
8913
8914     call->gtRetClsHnd = retClsHnd;
8915
8916 #if FEATURE_MULTIREG_RET
8917     // Initialize Return type descriptor of call node
8918     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8919     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8920 #endif // FEATURE_MULTIREG_RET
8921
8922 #ifdef UNIX_AMD64_ABI
8923
8924     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8925     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8926
8927     // The return type will remain as the incoming struct type unless normalized to a
8928     // single eightbyte return type below.
8929     call->gtReturnType = call->gtType;
8930
8931     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8932     if (retRegCount != 0)
8933     {
8934         if (retRegCount == 1)
8935         {
8936             // See if the struct size is smaller than the return
8937             // type size...
8938             if (retTypeDesc->IsEnclosingType())
8939             {
8940                 // If we know for sure this call will remain a call,
8941                 // retype and return value via a suitable temp.
8942                 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8943                 {
8944                     call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8945                     return impAssignSmallStructTypeToVar(call, retClsHnd);
8946                 }
8947             }
8948             else
8949             {
8950                 // Return type is same size as struct, so we can
8951                 // simply retype the call.
8952                 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8953             }
8954         }
8955         else
8956         {
8957             // must be a struct returned in two registers
8958             assert(retRegCount == 2);
8959
8960             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8961             {
8962                 // Force a call returning multi-reg struct to be always of the IR form
8963                 //   tmp = call
8964                 //
8965                 // No need to assign a multi-reg struct to a local var if:
8966                 //  - It is a tail call or
8967                 //  - The call is marked for in-lining later
8968                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8969             }
8970         }
8971     }
8972     else
8973     {
8974         // struct not returned in registers i.e returned via hiddden retbuf arg.
8975         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8976     }
8977
8978 #else // not UNIX_AMD64_ABI
8979
8980     // Check for TYP_STRUCT type that wraps a primitive type
8981     // Such structs are returned using a single register
8982     // and we change the return type on those calls here.
8983     //
8984     structPassingKind howToReturnStruct;
8985     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8986
8987     if (howToReturnStruct == SPK_ByReference)
8988     {
8989         assert(returnType == TYP_UNKNOWN);
8990         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8991     }
8992     else
8993     {
8994         assert(returnType != TYP_UNKNOWN);
8995
8996         // See if the struct size is smaller than the return
8997         // type size...
8998         if (howToReturnStruct == SPK_EnclosingType)
8999         {
9000             // If we know for sure this call will remain a call,
9001             // retype and return value via a suitable temp.
9002             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9003             {
9004                 call->gtReturnType = returnType;
9005                 return impAssignSmallStructTypeToVar(call, retClsHnd);
9006             }
9007         }
9008         else
9009         {
9010             // Return type is same size as struct, so we can
9011             // simply retype the call.
9012             call->gtReturnType = returnType;
9013         }
9014
9015         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
9016         if ((returnType == TYP_LONG) && (compLongUsed == false))
9017         {
9018             compLongUsed = true;
9019         }
9020         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
9021         {
9022             compFloatingPointUsed = true;
9023         }
9024
9025 #if FEATURE_MULTIREG_RET
9026         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
9027         assert(retRegCount != 0);
9028
9029         if (retRegCount >= 2)
9030         {
9031             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9032             {
9033                 // Force a call returning multi-reg struct to be always of the IR form
9034                 //   tmp = call
9035                 //
9036                 // No need to assign a multi-reg struct to a local var if:
9037                 //  - It is a tail call or
9038                 //  - The call is marked for in-lining later
9039                 return impAssignMultiRegTypeToVar(call, retClsHnd);
9040             }
9041         }
9042 #endif // FEATURE_MULTIREG_RET
9043     }
9044
9045 #endif // not UNIX_AMD64_ABI
9046
9047     return call;
9048 }
9049
9050 /*****************************************************************************
9051    For struct return values, re-type the operand in the case where the ABI
9052    does not use a struct return buffer
9053    Note that this method is only call for !_TARGET_X86_
9054  */
9055
9056 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
9057 {
9058     assert(varTypeIsStruct(info.compRetType));
9059     assert(info.compRetBuffArg == BAD_VAR_NUM);
9060
9061     JITDUMP("\nimpFixupStructReturnType: retyping\n");
9062     DISPTREE(op);
9063
9064 #if defined(_TARGET_XARCH_)
9065
9066 #ifdef UNIX_AMD64_ABI
9067     // No VarArgs for CoreCLR on x64 Unix
9068     assert(!info.compIsVarArgs);
9069
9070     // Is method returning a multi-reg struct?
9071     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
9072     {
9073         // In case of multi-reg struct return, we force IR to be one of the following:
9074         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
9075         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
9076
9077         if (op->gtOper == GT_LCL_VAR)
9078         {
9079             // Make sure that this struct stays in memory and doesn't get promoted.
9080             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
9081             lvaTable[lclNum].lvIsMultiRegRet = true;
9082
9083             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9084             op->gtFlags |= GTF_DONT_CSE;
9085
9086             return op;
9087         }
9088
9089         if (op->gtOper == GT_CALL)
9090         {
9091             return op;
9092         }
9093
9094         return impAssignMultiRegTypeToVar(op, retClsHnd);
9095     }
9096 #else  // !UNIX_AMD64_ABI
9097     assert(info.compRetNativeType != TYP_STRUCT);
9098 #endif // !UNIX_AMD64_ABI
9099
9100 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
9101
9102     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
9103     {
9104         if (op->gtOper == GT_LCL_VAR)
9105         {
9106             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
9107             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9108             // Make sure this struct type stays as struct so that we can return it as an HFA
9109             lvaTable[lclNum].lvIsMultiRegRet = true;
9110
9111             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9112             op->gtFlags |= GTF_DONT_CSE;
9113
9114             return op;
9115         }
9116
9117         if (op->gtOper == GT_CALL)
9118         {
9119             if (op->gtCall.IsVarargs())
9120             {
9121                 // We cannot tail call because control needs to return to fixup the calling
9122                 // convention for result return.
9123                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9124                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9125             }
9126             else
9127             {
9128                 return op;
9129             }
9130         }
9131         return impAssignMultiRegTypeToVar(op, retClsHnd);
9132     }
9133
9134 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
9135
9136     // Is method returning a multi-reg struct?
9137     if (IsMultiRegReturnedType(retClsHnd))
9138     {
9139         if (op->gtOper == GT_LCL_VAR)
9140         {
9141             // This LCL_VAR stays as a TYP_STRUCT
9142             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9143
9144             // Make sure this struct type is not struct promoted
9145             lvaTable[lclNum].lvIsMultiRegRet = true;
9146
9147             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9148             op->gtFlags |= GTF_DONT_CSE;
9149
9150             return op;
9151         }
9152
9153         if (op->gtOper == GT_CALL)
9154         {
9155             if (op->gtCall.IsVarargs())
9156             {
9157                 // We cannot tail call because control needs to return to fixup the calling
9158                 // convention for result return.
9159                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9160                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9161             }
9162             else
9163             {
9164                 return op;
9165             }
9166         }
9167         return impAssignMultiRegTypeToVar(op, retClsHnd);
9168     }
9169
9170 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
9171
9172 REDO_RETURN_NODE:
9173     // adjust the type away from struct to integral
9174     // and no normalizing
9175     if (op->gtOper == GT_LCL_VAR)
9176     {
9177         // It is possible that we now have a lclVar of scalar type.
9178         // If so, don't transform it to GT_LCL_FLD.
9179         if (varTypeIsStruct(lvaTable[op->AsLclVar()->gtLclNum].lvType))
9180         {
9181             op->ChangeOper(GT_LCL_FLD);
9182         }
9183     }
9184     else if (op->gtOper == GT_OBJ)
9185     {
9186         GenTree* op1 = op->AsObj()->Addr();
9187
9188         // We will fold away OBJ/ADDR
9189         // except for OBJ/ADDR/INDEX
9190         //     as the array type influences the array element's offset
9191         //     Later in this method we change op->gtType to info.compRetNativeType
9192         //     This is not correct when op is a GT_INDEX as the starting offset
9193         //     for the array elements 'elemOffs' is different for an array of
9194         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
9195         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
9196         //
9197         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
9198         {
9199             // Change '*(&X)' to 'X' and see if we can do better
9200             op = op1->gtOp.gtOp1;
9201             goto REDO_RETURN_NODE;
9202         }
9203         op->gtObj.gtClass = NO_CLASS_HANDLE;
9204         op->ChangeOperUnchecked(GT_IND);
9205         op->gtFlags |= GTF_IND_TGTANYWHERE;
9206     }
9207     else if (op->gtOper == GT_CALL)
9208     {
9209         if (op->AsCall()->TreatAsHasRetBufArg(this))
9210         {
9211             // This must be one of those 'special' helpers that don't
9212             // really have a return buffer, but instead use it as a way
9213             // to keep the trees cleaner with fewer address-taken temps.
9214             //
9215             // Well now we have to materialize the the return buffer as
9216             // an address-taken temp. Then we can return the temp.
9217             //
9218             // NOTE: this code assumes that since the call directly
9219             // feeds the return, then the call must be returning the
9220             // same structure/class/type.
9221             //
9222             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
9223
9224             // No need to spill anything as we're about to return.
9225             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
9226
9227             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
9228             // jump directly to a GT_LCL_FLD.
9229             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
9230             op->ChangeOper(GT_LCL_FLD);
9231         }
9232         else
9233         {
9234             // Don't change the gtType of the call just yet, it will get changed later.
9235             return op;
9236         }
9237     }
9238 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
9239     else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
9240     {
9241         // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
9242         // assert(op->gtType == info.compRetNativeType)
9243         if (op->gtType != info.compRetNativeType)
9244         {
9245             // Insert a register move to keep target type of SIMD intrinsic intact
9246             op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
9247         }
9248     }
9249 #endif
9250     else if (op->gtOper == GT_COMMA)
9251     {
9252         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
9253     }
9254
9255     op->gtType = info.compRetNativeType;
9256
9257     JITDUMP("\nimpFixupStructReturnType: result of retyping is\n");
9258     DISPTREE(op);
9259
9260     return op;
9261 }
9262
9263 /*****************************************************************************
9264    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
9265    finally-protected try. We find the finally blocks protecting the current
9266    offset (in order) by walking over the complete exception table and
9267    finding enclosing clauses. This assumes that the table is sorted.
9268    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
9269
9270    If we are leaving a catch handler, we need to attach the
9271    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
9272
9273    After this function, the BBJ_LEAVE block has been converted to a different type.
9274  */
9275
9276 #if !FEATURE_EH_FUNCLETS
9277
9278 void Compiler::impImportLeave(BasicBlock* block)
9279 {
9280 #ifdef DEBUG
9281     if (verbose)
9282     {
9283         printf("\nBefore import CEE_LEAVE:\n");
9284         fgDispBasicBlocks();
9285         fgDispHandlerTab();
9286     }
9287 #endif // DEBUG
9288
9289     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9290     unsigned    blkAddr         = block->bbCodeOffs;
9291     BasicBlock* leaveTarget     = block->bbJumpDest;
9292     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9293
9294     // LEAVE clears the stack, spill side effects, and set stack to 0
9295
9296     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9297     verCurrentState.esStackDepth = 0;
9298
9299     assert(block->bbJumpKind == BBJ_LEAVE);
9300     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
9301
9302     BasicBlock* step         = DUMMY_INIT(NULL);
9303     unsigned    encFinallies = 0; // Number of enclosing finallies.
9304     GenTree*    endCatches   = NULL;
9305     GenTree*    endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
9306
9307     unsigned  XTnum;
9308     EHblkDsc* HBtab;
9309
9310     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9311     {
9312         // Grab the handler offsets
9313
9314         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9315         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9316         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9317         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9318
9319         /* Is this a catch-handler we are CEE_LEAVEing out of?
9320          * If so, we need to call CORINFO_HELP_ENDCATCH.
9321          */
9322
9323         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9324         {
9325             // Can't CEE_LEAVE out of a finally/fault handler
9326             if (HBtab->HasFinallyOrFaultHandler())
9327                 BADCODE("leave out of fault/finally block");
9328
9329             // Create the call to CORINFO_HELP_ENDCATCH
9330             GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
9331
9332             // Make a list of all the currently pending endCatches
9333             if (endCatches)
9334                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
9335             else
9336                 endCatches = endCatch;
9337
9338 #ifdef DEBUG
9339             if (verbose)
9340             {
9341                 printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
9342                        "CORINFO_HELP_ENDCATCH\n",
9343                        block->bbNum, XTnum);
9344             }
9345 #endif
9346         }
9347         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9348                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9349         {
9350             /* This is a finally-protected try we are jumping out of */
9351
9352             /* If there are any pending endCatches, and we have already
9353                jumped out of a finally-protected try, then the endCatches
9354                have to be put in a block in an outer try for async
9355                exceptions to work correctly.
9356                Else, just use append to the original block */
9357
9358             BasicBlock* callBlock;
9359
9360             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
9361
9362             if (encFinallies == 0)
9363             {
9364                 assert(step == DUMMY_INIT(NULL));
9365                 callBlock             = block;
9366                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9367
9368                 if (endCatches)
9369                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9370
9371 #ifdef DEBUG
9372                 if (verbose)
9373                 {
9374                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
9375                            "block %s\n",
9376                            callBlock->dspToString());
9377                 }
9378 #endif
9379             }
9380             else
9381             {
9382                 assert(step != DUMMY_INIT(NULL));
9383
9384                 /* Calling the finally block */
9385                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
9386                 assert(step->bbJumpKind == BBJ_ALWAYS);
9387                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9388                                               // finally in the chain)
9389                 step->bbJumpDest->bbRefs++;
9390
9391                 /* The new block will inherit this block's weight */
9392                 callBlock->setBBWeight(block->bbWeight);
9393                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9394
9395 #ifdef DEBUG
9396                 if (verbose)
9397                 {
9398                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
9399                            callBlock->dspToString());
9400                 }
9401 #endif
9402
9403                 GenTree* lastStmt;
9404
9405                 if (endCatches)
9406                 {
9407                     lastStmt         = gtNewStmt(endCatches);
9408                     endLFin->gtNext  = lastStmt;
9409                     lastStmt->gtPrev = endLFin;
9410                 }
9411                 else
9412                 {
9413                     lastStmt = endLFin;
9414                 }
9415
9416                 // note that this sets BBF_IMPORTED on the block
9417                 impEndTreeList(callBlock, endLFin, lastStmt);
9418             }
9419
9420             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9421             /* The new block will inherit this block's weight */
9422             step->setBBWeight(block->bbWeight);
9423             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9424
9425 #ifdef DEBUG
9426             if (verbose)
9427             {
9428                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
9429                        step->dspToString());
9430             }
9431 #endif
9432
9433             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
9434             assert(finallyNesting <= compHndBBtabCount);
9435
9436             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9437             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
9438             endLFin               = gtNewStmt(endLFin);
9439             endCatches            = NULL;
9440
9441             encFinallies++;
9442
9443             invalidatePreds = true;
9444         }
9445     }
9446
9447     /* Append any remaining endCatches, if any */
9448
9449     assert(!encFinallies == !endLFin);
9450
9451     if (encFinallies == 0)
9452     {
9453         assert(step == DUMMY_INIT(NULL));
9454         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9455
9456         if (endCatches)
9457             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9458
9459 #ifdef DEBUG
9460         if (verbose)
9461         {
9462             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
9463                    "block %s\n",
9464                    block->dspToString());
9465         }
9466 #endif
9467     }
9468     else
9469     {
9470         // If leaveTarget is the start of another try block, we want to make sure that
9471         // we do not insert finalStep into that try block. Hence, we find the enclosing
9472         // try block.
9473         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
9474
9475         // Insert a new BB either in the try region indicated by tryIndex or
9476         // the handler region indicated by leaveTarget->bbHndIndex,
9477         // depending on which is the inner region.
9478         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
9479         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
9480         step->bbJumpDest = finalStep;
9481
9482         /* The new block will inherit this block's weight */
9483         finalStep->setBBWeight(block->bbWeight);
9484         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9485
9486 #ifdef DEBUG
9487         if (verbose)
9488         {
9489             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9490                    finalStep->dspToString());
9491         }
9492 #endif
9493
9494         GenTree* lastStmt;
9495
9496         if (endCatches)
9497         {
9498             lastStmt         = gtNewStmt(endCatches);
9499             endLFin->gtNext  = lastStmt;
9500             lastStmt->gtPrev = endLFin;
9501         }
9502         else
9503         {
9504             lastStmt = endLFin;
9505         }
9506
9507         impEndTreeList(finalStep, endLFin, lastStmt);
9508
9509         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9510
9511         // Queue up the jump target for importing
9512
9513         impImportBlockPending(leaveTarget);
9514
9515         invalidatePreds = true;
9516     }
9517
9518     if (invalidatePreds && fgComputePredsDone)
9519     {
9520         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9521         fgRemovePreds();
9522     }
9523
9524 #ifdef DEBUG
9525     fgVerifyHandlerTab();
9526
9527     if (verbose)
9528     {
9529         printf("\nAfter import CEE_LEAVE:\n");
9530         fgDispBasicBlocks();
9531         fgDispHandlerTab();
9532     }
9533 #endif // DEBUG
9534 }
9535
9536 #else // FEATURE_EH_FUNCLETS
9537
9538 void Compiler::impImportLeave(BasicBlock* block)
9539 {
9540 #ifdef DEBUG
9541     if (verbose)
9542     {
9543         printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
9544                block->bbJumpDest->bbNum);
9545         fgDispBasicBlocks();
9546         fgDispHandlerTab();
9547     }
9548 #endif // DEBUG
9549
9550     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9551     unsigned    blkAddr         = block->bbCodeOffs;
9552     BasicBlock* leaveTarget     = block->bbJumpDest;
9553     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
9554
9555     // LEAVE clears the stack, spill side effects, and set stack to 0
9556
9557     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9558     verCurrentState.esStackDepth = 0;
9559
9560     assert(block->bbJumpKind == BBJ_LEAVE);
9561     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9562
9563     BasicBlock* step = nullptr;
9564
9565     enum StepType
9566     {
9567         // No step type; step == NULL.
9568         ST_None,
9569
9570         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9571         // That is, is step->bbJumpDest where a finally will return to?
9572         ST_FinallyReturn,
9573
9574         // The step block is a catch return.
9575         ST_Catch,
9576
9577         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9578         ST_Try
9579     };
9580     StepType stepType = ST_None;
9581
9582     unsigned  XTnum;
9583     EHblkDsc* HBtab;
9584
9585     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9586     {
9587         // Grab the handler offsets
9588
9589         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9590         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9591         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9592         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9593
9594         /* Is this a catch-handler we are CEE_LEAVEing out of?
9595          */
9596
9597         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9598         {
9599             // Can't CEE_LEAVE out of a finally/fault handler
9600             if (HBtab->HasFinallyOrFaultHandler())
9601             {
9602                 BADCODE("leave out of fault/finally block");
9603             }
9604
9605             /* We are jumping out of a catch */
9606
9607             if (step == nullptr)
9608             {
9609                 step             = block;
9610                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9611                 stepType         = ST_Catch;
9612
9613 #ifdef DEBUG
9614                 if (verbose)
9615                 {
9616                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
9617                            " to BBJ_EHCATCHRET "
9618                            "block\n",
9619                            XTnum, step->bbNum);
9620                 }
9621 #endif
9622             }
9623             else
9624             {
9625                 BasicBlock* exitBlock;
9626
9627                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9628                  * scope */
9629                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9630
9631                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9632                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9633                                               // exit) returns to this block
9634                 step->bbJumpDest->bbRefs++;
9635
9636 #if defined(_TARGET_ARM_)
9637                 if (stepType == ST_FinallyReturn)
9638                 {
9639                     assert(step->bbJumpKind == BBJ_ALWAYS);
9640                     // Mark the target of a finally return
9641                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9642                 }
9643 #endif // defined(_TARGET_ARM_)
9644
9645                 /* The new block will inherit this block's weight */
9646                 exitBlock->setBBWeight(block->bbWeight);
9647                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9648
9649                 /* This exit block is the new step */
9650                 step     = exitBlock;
9651                 stepType = ST_Catch;
9652
9653                 invalidatePreds = true;
9654
9655 #ifdef DEBUG
9656                 if (verbose)
9657                 {
9658                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
9659                            XTnum, exitBlock->bbNum);
9660                 }
9661 #endif
9662             }
9663         }
9664         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9665                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9666         {
9667             /* We are jumping out of a finally-protected try */
9668
9669             BasicBlock* callBlock;
9670
9671             if (step == nullptr)
9672             {
9673 #if FEATURE_EH_CALLFINALLY_THUNKS
9674
9675                 // Put the call to the finally in the enclosing region.
9676                 unsigned callFinallyTryIndex =
9677                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9678                 unsigned callFinallyHndIndex =
9679                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9680                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9681
9682                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9683                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9684                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9685                 // next block, and flow optimizations will remove it.
9686                 block->bbJumpKind = BBJ_ALWAYS;
9687                 block->bbJumpDest = callBlock;
9688                 block->bbJumpDest->bbRefs++;
9689
9690                 /* The new block will inherit this block's weight */
9691                 callBlock->setBBWeight(block->bbWeight);
9692                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9693
9694 #ifdef DEBUG
9695                 if (verbose)
9696                 {
9697                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9698                            " to "
9699                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
9700                            XTnum, block->bbNum, callBlock->bbNum);
9701                 }
9702 #endif
9703
9704 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9705
9706                 callBlock             = block;
9707                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9708
9709 #ifdef DEBUG
9710                 if (verbose)
9711                 {
9712                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9713                            " to "
9714                            "BBJ_CALLFINALLY block\n",
9715                            XTnum, callBlock->bbNum);
9716                 }
9717 #endif
9718
9719 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9720             }
9721             else
9722             {
9723                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9724                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9725                 // a 'finally'), or the step block is the return from a catch.
9726                 //
9727                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9728                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9729                 // automatically re-raise the exception, using the return address of the catch (that is, the target
9730                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9731                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9732                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9733                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9734                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9735                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9736                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9737                 // stack walks.)
9738
9739                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9740
9741 #if FEATURE_EH_CALLFINALLY_THUNKS
9742                 if (step->bbJumpKind == BBJ_EHCATCHRET)
9743                 {
9744                     // Need to create another step block in the 'try' region that will actually branch to the
9745                     // call-to-finally thunk.
9746                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9747                     step->bbJumpDest  = step2;
9748                     step->bbJumpDest->bbRefs++;
9749                     step2->setBBWeight(block->bbWeight);
9750                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9751
9752 #ifdef DEBUG
9753                     if (verbose)
9754                     {
9755                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9756                                "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
9757                                XTnum, step->bbNum, step2->bbNum);
9758                     }
9759 #endif
9760
9761                     step = step2;
9762                     assert(stepType == ST_Catch); // Leave it as catch type for now.
9763                 }
9764 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9765
9766 #if FEATURE_EH_CALLFINALLY_THUNKS
9767                 unsigned callFinallyTryIndex =
9768                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9769                 unsigned callFinallyHndIndex =
9770                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9771 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
9772                 unsigned callFinallyTryIndex = XTnum + 1;
9773                 unsigned callFinallyHndIndex = 0; // don't care
9774 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9775
9776                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9777                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9778                                               // finally in the chain)
9779                 step->bbJumpDest->bbRefs++;
9780
9781 #if defined(_TARGET_ARM_)
9782                 if (stepType == ST_FinallyReturn)
9783                 {
9784                     assert(step->bbJumpKind == BBJ_ALWAYS);
9785                     // Mark the target of a finally return
9786                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9787                 }
9788 #endif // defined(_TARGET_ARM_)
9789
9790                 /* The new block will inherit this block's weight */
9791                 callBlock->setBBWeight(block->bbWeight);
9792                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9793
9794 #ifdef DEBUG
9795                 if (verbose)
9796                 {
9797                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
9798                            "block " FMT_BB "\n",
9799                            XTnum, callBlock->bbNum);
9800                 }
9801 #endif
9802             }
9803
9804             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9805             stepType = ST_FinallyReturn;
9806
9807             /* The new block will inherit this block's weight */
9808             step->setBBWeight(block->bbWeight);
9809             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9810
9811 #ifdef DEBUG
9812             if (verbose)
9813             {
9814                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9815                        "block " FMT_BB "\n",
9816                        XTnum, step->bbNum);
9817             }
9818 #endif
9819
9820             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9821
9822             invalidatePreds = true;
9823         }
9824         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9825                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9826         {
9827             // We are jumping out of a catch-protected try.
9828             //
9829             // If we are returning from a call to a finally, then we must have a step block within a try
9830             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9831             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9832             // and invoke the appropriate catch.
9833             //
9834             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9835             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9836             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9837             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9838             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9839             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9840             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9841             // For example:
9842             //
9843             // try {
9844             //    try {
9845             //       // something here raises ThreadAbortException
9846             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9847             //    } catch (Exception) {
9848             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9849             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9850             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9851             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9852             //       // need to do this transformation if the current EH block is a try/catch that catches
9853             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9854             //       // information, so currently we do it for all catch types.
9855             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9856             //    }
9857             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9858             // } catch (ThreadAbortException) {
9859             // }
9860             // LABEL_1:
9861             //
9862             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9863             // compiler.
9864
9865             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9866             {
9867                 BasicBlock* catchStep;
9868
9869                 assert(step);
9870
9871                 if (stepType == ST_FinallyReturn)
9872                 {
9873                     assert(step->bbJumpKind == BBJ_ALWAYS);
9874                 }
9875                 else
9876                 {
9877                     assert(stepType == ST_Catch);
9878                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9879                 }
9880
9881                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9882                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9883                 step->bbJumpDest = catchStep;
9884                 step->bbJumpDest->bbRefs++;
9885
9886 #if defined(_TARGET_ARM_)
9887                 if (stepType == ST_FinallyReturn)
9888                 {
9889                     // Mark the target of a finally return
9890                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9891                 }
9892 #endif // defined(_TARGET_ARM_)
9893
9894                 /* The new block will inherit this block's weight */
9895                 catchStep->setBBWeight(block->bbWeight);
9896                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9897
9898 #ifdef DEBUG
9899                 if (verbose)
9900                 {
9901                     if (stepType == ST_FinallyReturn)
9902                     {
9903                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9904                                "BBJ_ALWAYS block " FMT_BB "\n",
9905                                XTnum, catchStep->bbNum);
9906                     }
9907                     else
9908                     {
9909                         assert(stepType == ST_Catch);
9910                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9911                                "BBJ_ALWAYS block " FMT_BB "\n",
9912                                XTnum, catchStep->bbNum);
9913                     }
9914                 }
9915 #endif // DEBUG
9916
9917                 /* This block is the new step */
9918                 step     = catchStep;
9919                 stepType = ST_Try;
9920
9921                 invalidatePreds = true;
9922             }
9923         }
9924     }
9925
9926     if (step == nullptr)
9927     {
9928         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9929
9930 #ifdef DEBUG
9931         if (verbose)
9932         {
9933             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9934                    "block " FMT_BB " to BBJ_ALWAYS\n",
9935                    block->bbNum);
9936         }
9937 #endif
9938     }
9939     else
9940     {
9941         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9942
9943 #if defined(_TARGET_ARM_)
9944         if (stepType == ST_FinallyReturn)
9945         {
9946             assert(step->bbJumpKind == BBJ_ALWAYS);
9947             // Mark the target of a finally return
9948             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9949         }
9950 #endif // defined(_TARGET_ARM_)
9951
9952 #ifdef DEBUG
9953         if (verbose)
9954         {
9955             printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
9956         }
9957 #endif
9958
9959         // Queue up the jump target for importing
9960
9961         impImportBlockPending(leaveTarget);
9962     }
9963
9964     if (invalidatePreds && fgComputePredsDone)
9965     {
9966         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9967         fgRemovePreds();
9968     }
9969
9970 #ifdef DEBUG
9971     fgVerifyHandlerTab();
9972
9973     if (verbose)
9974     {
9975         printf("\nAfter import CEE_LEAVE:\n");
9976         fgDispBasicBlocks();
9977         fgDispHandlerTab();
9978     }
9979 #endif // DEBUG
9980 }
9981
9982 #endif // FEATURE_EH_FUNCLETS
9983
9984 /*****************************************************************************/
9985 // This is called when reimporting a leave block. It resets the JumpKind,
9986 // JumpDest, and bbNext to the original values
9987
9988 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9989 {
9990 #if FEATURE_EH_FUNCLETS
9991     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9992     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9993     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9994     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9995     // only predecessor are also considered orphans and attempted to be deleted.
9996     //
9997     //  try  {
9998     //     ....
9999     //     try
10000     //     {
10001     //         ....
10002     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
10003     //     } finally { }
10004     //  } finally { }
10005     //  OUTSIDE:
10006     //
10007     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
10008     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
10009     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
10010     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
10011     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
10012     // will be treated as pair and handled correctly.
10013     if (block->bbJumpKind == BBJ_CALLFINALLY)
10014     {
10015         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
10016         dupBlock->bbFlags    = block->bbFlags;
10017         dupBlock->bbJumpDest = block->bbJumpDest;
10018         dupBlock->copyEHRegion(block);
10019         dupBlock->bbCatchTyp = block->bbCatchTyp;
10020
10021         // Mark this block as
10022         //  a) not referenced by any other block to make sure that it gets deleted
10023         //  b) weight zero
10024         //  c) prevent from being imported
10025         //  d) as internal
10026         //  e) as rarely run
10027         dupBlock->bbRefs   = 0;
10028         dupBlock->bbWeight = 0;
10029         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
10030
10031         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
10032         // will be next to each other.
10033         fgInsertBBafter(block, dupBlock);
10034
10035 #ifdef DEBUG
10036         if (verbose)
10037         {
10038             printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
10039         }
10040 #endif
10041     }
10042 #endif // FEATURE_EH_FUNCLETS
10043
10044     block->bbJumpKind = BBJ_LEAVE;
10045     fgInitBBLookup();
10046     block->bbJumpDest = fgLookupBB(jmpAddr);
10047
10048     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
10049     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
10050     // reason we don't want to remove the block at this point is that if we call
10051     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
10052     // added and the linked list length will be different than fgBBcount.
10053 }
10054
10055 /*****************************************************************************/
10056 // Get the first non-prefix opcode. Used for verification of valid combinations
10057 // of prefixes and actual opcodes.
10058
10059 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
10060 {
10061     while (codeAddr < codeEndp)
10062     {
10063         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10064         codeAddr += sizeof(__int8);
10065
10066         if (opcode == CEE_PREFIX1)
10067         {
10068             if (codeAddr >= codeEndp)
10069             {
10070                 break;
10071             }
10072             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10073             codeAddr += sizeof(__int8);
10074         }
10075
10076         switch (opcode)
10077         {
10078             case CEE_UNALIGNED:
10079             case CEE_VOLATILE:
10080             case CEE_TAILCALL:
10081             case CEE_CONSTRAINED:
10082             case CEE_READONLY:
10083                 break;
10084             default:
10085                 return opcode;
10086         }
10087
10088         codeAddr += opcodeSizes[opcode];
10089     }
10090
10091     return CEE_ILLEGAL;
10092 }
10093
10094 /*****************************************************************************/
10095 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
10096
10097 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
10098 {
10099     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
10100
10101     if (!(
10102             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
10103             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
10104             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
10105             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
10106             // volatile. prefix is allowed with the ldsfld and stsfld
10107             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
10108     {
10109         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
10110     }
10111 }
10112
10113 /*****************************************************************************/
10114
10115 #ifdef DEBUG
10116
10117 #undef RETURN // undef contracts RETURN macro
10118
10119 enum controlFlow_t
10120 {
10121     NEXT,
10122     CALL,
10123     RETURN,
10124     THROW,
10125     BRANCH,
10126     COND_BRANCH,
10127     BREAK,
10128     PHI,
10129     META,
10130 };
10131
10132 const static controlFlow_t controlFlow[] = {
10133 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
10134 #include "opcode.def"
10135 #undef OPDEF
10136 };
10137
10138 #endif // DEBUG
10139
10140 /*****************************************************************************
10141  *  Determine the result type of an arithemetic operation
10142  *  On 64-bit inserts upcasts when native int is mixed with int32
10143  */
10144 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
10145 {
10146     var_types type = TYP_UNDEF;
10147     GenTree*  op1  = *pOp1;
10148     GenTree*  op2  = *pOp2;
10149
10150     // Arithemetic operations are generally only allowed with
10151     // primitive types, but certain operations are allowed
10152     // with byrefs
10153
10154     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10155     {
10156         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10157         {
10158             // byref1-byref2 => gives a native int
10159             type = TYP_I_IMPL;
10160         }
10161         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10162         {
10163             // [native] int - byref => gives a native int
10164
10165             //
10166             // The reason is that it is possible, in managed C++,
10167             // to have a tree like this:
10168             //
10169             //              -
10170             //             / \
10171             //            /   \
10172             //           /     \
10173             //          /       \
10174             // const(h) int     addr byref
10175             //
10176             // <BUGNUM> VSW 318822 </BUGNUM>
10177             //
10178             // So here we decide to make the resulting type to be a native int.
10179             CLANG_FORMAT_COMMENT_ANCHOR;
10180
10181 #ifdef _TARGET_64BIT_
10182             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10183             {
10184                 // insert an explicit upcast
10185                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10186             }
10187 #endif // _TARGET_64BIT_
10188
10189             type = TYP_I_IMPL;
10190         }
10191         else
10192         {
10193             // byref - [native] int => gives a byref
10194             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
10195
10196 #ifdef _TARGET_64BIT_
10197             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
10198             {
10199                 // insert an explicit upcast
10200                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10201             }
10202 #endif // _TARGET_64BIT_
10203
10204             type = TYP_BYREF;
10205         }
10206     }
10207     else if ((oper == GT_ADD) &&
10208              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10209     {
10210         // byref + [native] int => gives a byref
10211         // (or)
10212         // [native] int + byref => gives a byref
10213
10214         // only one can be a byref : byref op byref not allowed
10215         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
10216         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
10217
10218 #ifdef _TARGET_64BIT_
10219         if (genActualType(op2->TypeGet()) == TYP_BYREF)
10220         {
10221             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10222             {
10223                 // insert an explicit upcast
10224                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10225             }
10226         }
10227         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10228         {
10229             // insert an explicit upcast
10230             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10231         }
10232 #endif // _TARGET_64BIT_
10233
10234         type = TYP_BYREF;
10235     }
10236 #ifdef _TARGET_64BIT_
10237     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
10238     {
10239         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10240
10241         // int + long => gives long
10242         // long + int => gives long
10243         // we get this because in the IL the long isn't Int64, it's just IntPtr
10244
10245         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10246         {
10247             // insert an explicit upcast
10248             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10249         }
10250         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10251         {
10252             // insert an explicit upcast
10253             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10254         }
10255
10256         type = TYP_I_IMPL;
10257     }
10258 #else  // 32-bit TARGET
10259     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
10260     {
10261         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10262
10263         // int + long => gives long
10264         // long + int => gives long
10265
10266         type = TYP_LONG;
10267     }
10268 #endif // _TARGET_64BIT_
10269     else
10270     {
10271         // int + int => gives an int
10272         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
10273
10274         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
10275                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
10276
10277         type = genActualType(op1->gtType);
10278
10279         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
10280         // Otherwise, turn floats into doubles
10281         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
10282         {
10283             assert(genActualType(op2->gtType) == TYP_DOUBLE);
10284             type = TYP_DOUBLE;
10285         }
10286     }
10287
10288     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
10289     return type;
10290 }
10291
10292 //------------------------------------------------------------------------
10293 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
10294 //
10295 // Arguments:
10296 //   op1 - value to cast
10297 //   pResolvedToken - resolved token for type to cast to
10298 //   isCastClass - true if this is a castclass, false if isinst
10299 //
10300 // Return Value:
10301 //   tree representing optimized cast, or null if no optimization possible
10302
10303 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
10304 {
10305     assert(op1->TypeGet() == TYP_REF);
10306
10307     // Don't optimize for minopts or debug codegen.
10308     if (opts.compDbgCode || opts.MinOpts())
10309     {
10310         return nullptr;
10311     }
10312
10313     // See what we know about the type of the object being cast.
10314     bool                 isExact   = false;
10315     bool                 isNonNull = false;
10316     CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
10317     GenTree*             optResult = nullptr;
10318
10319     if (fromClass != nullptr)
10320     {
10321         CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
10322         JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
10323                 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
10324                 info.compCompHnd->getClassName(toClass));
10325
10326         // Perhaps we know if the cast will succeed or fail.
10327         TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
10328
10329         if (castResult == TypeCompareState::Must)
10330         {
10331             // Cast will succeed, result is simply op1.
10332             JITDUMP("Cast will succeed, optimizing to simply return input\n");
10333             return op1;
10334         }
10335         else if (castResult == TypeCompareState::MustNot)
10336         {
10337             // See if we can sharpen exactness by looking for final classes
10338             if (!isExact)
10339             {
10340                 DWORD flags     = info.compCompHnd->getClassAttribs(fromClass);
10341                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
10342                                   CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
10343                 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10344             }
10345
10346             // Cast to exact type will fail. Handle case where we have
10347             // an exact type (that is, fromClass is not a subtype)
10348             // and we're not going to throw on failure.
10349             if (isExact && !isCastClass)
10350             {
10351                 JITDUMP("Cast will fail, optimizing to return null\n");
10352                 GenTree* result = gtNewIconNode(0, TYP_REF);
10353
10354                 // If the cast was fed by a box, we can remove that too.
10355                 if (op1->IsBoxedValue())
10356                 {
10357                     JITDUMP("Also removing upstream box\n");
10358                     gtTryRemoveBoxUpstreamEffects(op1);
10359                 }
10360
10361                 return result;
10362             }
10363             else if (isExact)
10364             {
10365                 JITDUMP("Not optimizing failing castclass (yet)\n");
10366             }
10367             else
10368             {
10369                 JITDUMP("Can't optimize since fromClass is inexact\n");
10370             }
10371         }
10372         else
10373         {
10374             JITDUMP("Result of cast unknown, must generate runtime test\n");
10375         }
10376     }
10377     else
10378     {
10379         JITDUMP("\nCan't optimize since fromClass is unknown\n");
10380     }
10381
10382     return nullptr;
10383 }
10384
10385 //------------------------------------------------------------------------
10386 // impCastClassOrIsInstToTree: build and import castclass/isinst
10387 //
10388 // Arguments:
10389 //   op1 - value to cast
10390 //   op2 - type handle for type to cast to
10391 //   pResolvedToken - resolved token from the cast operation
10392 //   isCastClass - true if this is castclass, false means isinst
10393 //
10394 // Return Value:
10395 //   Tree representing the cast
10396 //
10397 // Notes:
10398 //   May expand into a series of runtime checks or a helper call.
10399
10400 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree*                op1,
10401                                               GenTree*                op2,
10402                                               CORINFO_RESOLVED_TOKEN* pResolvedToken,
10403                                               bool                    isCastClass)
10404 {
10405     assert(op1->TypeGet() == TYP_REF);
10406
10407     // Optimistically assume the jit should expand this as an inline test
10408     bool shouldExpandInline = true;
10409
10410     // Profitability check.
10411     //
10412     // Don't bother with inline expansion when jit is trying to
10413     // generate code quickly, or the cast is in code that won't run very
10414     // often, or the method already is pretty big.
10415     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
10416     {
10417         // not worth the code expansion if jitting fast or in a rarely run block
10418         shouldExpandInline = false;
10419     }
10420     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
10421     {
10422         // not worth creating an untracked local variable
10423         shouldExpandInline = false;
10424     }
10425
10426     // Pessimistically assume the jit cannot expand this as an inline test
10427     bool                  canExpandInline = false;
10428     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
10429
10430     // Legality check.
10431     //
10432     // Not all classclass/isinst operations can be inline expanded.
10433     // Check legality only if an inline expansion is desirable.
10434     if (shouldExpandInline)
10435     {
10436         if (isCastClass)
10437         {
10438             // Jit can only inline expand the normal CHKCASTCLASS helper.
10439             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
10440         }
10441         else
10442         {
10443             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
10444             {
10445                 // Check the class attributes.
10446                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
10447
10448                 // If the class is final and is not marshal byref or
10449                 // contextful, the jit can expand the IsInst check inline.
10450                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
10451                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10452             }
10453         }
10454     }
10455
10456     const bool expandInline = canExpandInline && shouldExpandInline;
10457
10458     if (!expandInline)
10459     {
10460         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
10461                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
10462
10463         // If we CSE this class handle we prevent assertionProp from making SubType assertions
10464         // so instead we force the CSE logic to not consider CSE-ing this class handle.
10465         //
10466         op2->gtFlags |= GTF_DONT_CSE;
10467
10468         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
10469     }
10470
10471     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
10472
10473     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
10474
10475     GenTree* temp;
10476     GenTree* condMT;
10477     //
10478     // expand the methodtable match:
10479     //
10480     //  condMT ==>   GT_NE
10481     //               /    \
10482     //           GT_IND   op2 (typically CNS_INT)
10483     //              |
10484     //           op1Copy
10485     //
10486
10487     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10488     //
10489     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10490     //
10491     // op1 is now known to be a non-complex tree
10492     // thus we can use gtClone(op1) from now on
10493     //
10494
10495     GenTree* op2Var = op2;
10496     if (isCastClass)
10497     {
10498         op2Var                                                  = fgInsertCommaFormTemp(&op2);
10499         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10500     }
10501     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10502     temp->gtFlags |= GTF_EXCEPT;
10503     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10504
10505     GenTree* condNull;
10506     //
10507     // expand the null check:
10508     //
10509     //  condNull ==>   GT_EQ
10510     //                 /    \
10511     //             op1Copy CNS_INT
10512     //                      null
10513     //
10514     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10515
10516     //
10517     // expand the true and false trees for the condMT
10518     //
10519     GenTree* condFalse = gtClone(op1);
10520     GenTree* condTrue;
10521     if (isCastClass)
10522     {
10523         //
10524         // use the special helper that skips the cases checked by our inlined cast
10525         //
10526         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10527
10528         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10529     }
10530     else
10531     {
10532         condTrue = gtNewIconNode(0, TYP_REF);
10533     }
10534
10535 #define USE_QMARK_TREES
10536
10537 #ifdef USE_QMARK_TREES
10538     GenTree* qmarkMT;
10539     //
10540     // Generate first QMARK - COLON tree
10541     //
10542     //  qmarkMT ==>   GT_QMARK
10543     //                 /     \
10544     //            condMT   GT_COLON
10545     //                      /     \
10546     //                condFalse  condTrue
10547     //
10548     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10549     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10550
10551     GenTree* qmarkNull;
10552     //
10553     // Generate second QMARK - COLON tree
10554     //
10555     //  qmarkNull ==>  GT_QMARK
10556     //                 /     \
10557     //           condNull  GT_COLON
10558     //                      /     \
10559     //                qmarkMT   op1Copy
10560     //
10561     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10562     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10563     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10564
10565     // Make QMark node a top level node by spilling it.
10566     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10567     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10568
10569     // TODO-CQ: Is it possible op1 has a better type?
10570     //
10571     // See also gtGetHelperCallClassHandle where we make the same
10572     // determination for the helper call variants.
10573     LclVarDsc* lclDsc = lvaGetDesc(tmp);
10574     assert(lclDsc->lvSingleDef == 0);
10575     lclDsc->lvSingleDef = 1;
10576     JITDUMP("Marked V%02u as a single def temp\n", tmp);
10577     lvaSetClass(tmp, pResolvedToken->hClass);
10578     return gtNewLclvNode(tmp, TYP_REF);
10579 #endif
10580 }
10581
10582 #ifndef DEBUG
10583 #define assertImp(cond) ((void)0)
10584 #else
10585 #define assertImp(cond)                                                                                                \
10586     do                                                                                                                 \
10587     {                                                                                                                  \
10588         if (!(cond))                                                                                                   \
10589         {                                                                                                              \
10590             const int cchAssertImpBuf = 600;                                                                           \
10591             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
10592             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
10593                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
10594                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
10595                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
10596             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
10597         }                                                                                                              \
10598     } while (0)
10599 #endif // DEBUG
10600
10601 #ifdef _PREFAST_
10602 #pragma warning(push)
10603 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10604 #endif
10605 /*****************************************************************************
10606  *  Import the instr for the given basic block
10607  */
10608 void Compiler::impImportBlockCode(BasicBlock* block)
10609 {
10610 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10611
10612 #ifdef DEBUG
10613
10614     if (verbose)
10615     {
10616         printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10617     }
10618 #endif
10619
10620     unsigned  nxtStmtIndex = impInitBlockLineInfo();
10621     IL_OFFSET nxtStmtOffs;
10622
10623     GenTree*                     arrayNodeFrom;
10624     GenTree*                     arrayNodeTo;
10625     GenTree*                     arrayNodeToIndex;
10626     CorInfoHelpFunc              helper;
10627     CorInfoIsAccessAllowedResult accessAllowedResult;
10628     CORINFO_HELPER_DESC          calloutHelper;
10629     const BYTE*                  lastLoadToken = nullptr;
10630
10631     // reject cyclic constraints
10632     if (tiVerificationNeeded)
10633     {
10634         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10635         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10636     }
10637
10638     /* Get the tree list started */
10639
10640     impBeginTreeList();
10641
10642     /* Walk the opcodes that comprise the basic block */
10643
10644     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10645     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10646
10647     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
10648     IL_OFFSET lastSpillOffs = opcodeOffs;
10649
10650     signed jmpDist;
10651
10652     /* remember the start of the delegate creation sequence (used for verification) */
10653     const BYTE* delegateCreateStart = nullptr;
10654
10655     int  prefixFlags = 0;
10656     bool explicitTailCall, constraintCall, readonlyCall;
10657
10658     typeInfo tiRetVal;
10659
10660     unsigned numArgs = info.compArgsCount;
10661
10662     /* Now process all the opcodes in the block */
10663
10664     var_types callTyp    = TYP_COUNT;
10665     OPCODE    prevOpcode = CEE_ILLEGAL;
10666
10667     if (block->bbCatchTyp)
10668     {
10669         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10670         {
10671             impCurStmtOffsSet(block->bbCodeOffs);
10672         }
10673
10674         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10675         // to a temp. This is a trade off for code simplicity
10676         impSpillSpecialSideEff();
10677     }
10678
10679     while (codeAddr < codeEndp)
10680     {
10681         bool                   usingReadyToRunHelper = false;
10682         CORINFO_RESOLVED_TOKEN resolvedToken;
10683         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10684         CORINFO_CALL_INFO      callInfo;
10685         CORINFO_FIELD_INFO     fieldInfo;
10686
10687         tiRetVal = typeInfo(); // Default type info
10688
10689         //---------------------------------------------------------------------
10690
10691         /* We need to restrict the max tree depth as many of the Compiler
10692            functions are recursive. We do this by spilling the stack */
10693
10694         if (verCurrentState.esStackDepth)
10695         {
10696             /* Has it been a while since we last saw a non-empty stack (which
10697                guarantees that the tree depth isnt accumulating. */
10698
10699             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10700             {
10701                 impSpillStackEnsure();
10702                 lastSpillOffs = opcodeOffs;
10703             }
10704         }
10705         else
10706         {
10707             lastSpillOffs   = opcodeOffs;
10708             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10709         }
10710
10711         /* Compute the current instr offset */
10712
10713         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10714
10715 #ifndef DEBUG
10716         if (opts.compDbgInfo)
10717 #endif
10718         {
10719             if (!compIsForInlining())
10720             {
10721                 nxtStmtOffs =
10722                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10723
10724                 /* Have we reached the next stmt boundary ? */
10725
10726                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10727                 {
10728                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10729
10730                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10731                     {
10732                         /* We need to provide accurate IP-mapping at this point.
10733                            So spill anything on the stack so that it will form
10734                            gtStmts with the correct stmt offset noted */
10735
10736                         impSpillStackEnsure(true);
10737                     }
10738
10739                     // Has impCurStmtOffs been reported in any tree?
10740
10741                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10742                     {
10743                         GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10744                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10745
10746                         assert(impCurStmtOffs == BAD_IL_OFFSET);
10747                     }
10748
10749                     if (impCurStmtOffs == BAD_IL_OFFSET)
10750                     {
10751                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10752                            If opcodeOffs has gone past nxtStmtIndex, catch up */
10753
10754                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10755                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10756                         {
10757                             nxtStmtIndex++;
10758                         }
10759
10760                         /* Go to the new stmt */
10761
10762                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10763
10764                         /* Update the stmt boundary index */
10765
10766                         nxtStmtIndex++;
10767                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10768
10769                         /* Are there any more line# entries after this one? */
10770
10771                         if (nxtStmtIndex < info.compStmtOffsetsCount)
10772                         {
10773                             /* Remember where the next line# starts */
10774
10775                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10776                         }
10777                         else
10778                         {
10779                             /* No more line# entries */
10780
10781                             nxtStmtOffs = BAD_IL_OFFSET;
10782                         }
10783                     }
10784                 }
10785                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10786                          (verCurrentState.esStackDepth == 0))
10787                 {
10788                     /* At stack-empty locations, we have already added the tree to
10789                        the stmt list with the last offset. We just need to update
10790                        impCurStmtOffs
10791                      */
10792
10793                     impCurStmtOffsSet(opcodeOffs);
10794                 }
10795                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10796                          impOpcodeIsCallSiteBoundary(prevOpcode))
10797                 {
10798                     /* Make sure we have a type cached */
10799                     assert(callTyp != TYP_COUNT);
10800
10801                     if (callTyp == TYP_VOID)
10802                     {
10803                         impCurStmtOffsSet(opcodeOffs);
10804                     }
10805                     else if (opts.compDbgCode)
10806                     {
10807                         impSpillStackEnsure(true);
10808                         impCurStmtOffsSet(opcodeOffs);
10809                     }
10810                 }
10811                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10812                 {
10813                     if (opts.compDbgCode)
10814                     {
10815                         impSpillStackEnsure(true);
10816                     }
10817
10818                     impCurStmtOffsSet(opcodeOffs);
10819                 }
10820
10821                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10822                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10823             }
10824         }
10825
10826         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
10827         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10828         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10829
10830         var_types       lclTyp, ovflType = TYP_UNKNOWN;
10831         GenTree*        op1           = DUMMY_INIT(NULL);
10832         GenTree*        op2           = DUMMY_INIT(NULL);
10833         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
10834         GenTree*        newObjThisPtr = DUMMY_INIT(NULL);
10835         bool            uns           = DUMMY_INIT(false);
10836         bool            isLocal       = false;
10837
10838         /* Get the next opcode and the size of its parameters */
10839
10840         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10841         codeAddr += sizeof(__int8);
10842
10843 #ifdef DEBUG
10844         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10845         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10846 #endif
10847
10848     DECODE_OPCODE:
10849
10850         // Return if any previous code has caused inline to fail.
10851         if (compDonotInline())
10852         {
10853             return;
10854         }
10855
10856         /* Get the size of additional parameters */
10857
10858         signed int sz = opcodeSizes[opcode];
10859
10860 #ifdef DEBUG
10861         clsHnd  = NO_CLASS_HANDLE;
10862         lclTyp  = TYP_COUNT;
10863         callTyp = TYP_COUNT;
10864
10865         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10866         impCurOpcName = opcodeNames[opcode];
10867
10868         if (verbose && (opcode != CEE_PREFIX1))
10869         {
10870             printf("%s", impCurOpcName);
10871         }
10872
10873         /* Use assertImp() to display the opcode */
10874
10875         op1 = op2 = nullptr;
10876 #endif
10877
10878         /* See what kind of an opcode we have, then */
10879
10880         unsigned mflags   = 0;
10881         unsigned clsFlags = 0;
10882
10883         switch (opcode)
10884         {
10885             unsigned  lclNum;
10886             var_types type;
10887
10888             GenTree*   op3;
10889             genTreeOps oper;
10890             unsigned   size;
10891
10892             int val;
10893
10894             CORINFO_SIG_INFO     sig;
10895             IL_OFFSET            jmpAddr;
10896             bool                 ovfl, unordered, callNode;
10897             bool                 ldstruct;
10898             CORINFO_CLASS_HANDLE tokenType;
10899
10900             union {
10901                 int     intVal;
10902                 float   fltVal;
10903                 __int64 lngVal;
10904                 double  dblVal;
10905             } cval;
10906
10907             case CEE_PREFIX1:
10908                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10909                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10910                 codeAddr += sizeof(__int8);
10911                 goto DECODE_OPCODE;
10912
10913             SPILL_APPEND:
10914
10915                 // We need to call impSpillLclRefs() for a struct type lclVar.
10916                 // This is done for non-block assignments in the handling of stloc.
10917                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10918                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10919                 {
10920                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10921                 }
10922
10923                 /* Append 'op1' to the list of statements */
10924                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10925                 goto DONE_APPEND;
10926
10927             APPEND:
10928
10929                 /* Append 'op1' to the list of statements */
10930
10931                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10932                 goto DONE_APPEND;
10933
10934             DONE_APPEND:
10935
10936 #ifdef DEBUG
10937                 // Remember at which BC offset the tree was finished
10938                 impNoteLastILoffs();
10939 #endif
10940                 break;
10941
10942             case CEE_LDNULL:
10943                 impPushNullObjRefOnStack();
10944                 break;
10945
10946             case CEE_LDC_I4_M1:
10947             case CEE_LDC_I4_0:
10948             case CEE_LDC_I4_1:
10949             case CEE_LDC_I4_2:
10950             case CEE_LDC_I4_3:
10951             case CEE_LDC_I4_4:
10952             case CEE_LDC_I4_5:
10953             case CEE_LDC_I4_6:
10954             case CEE_LDC_I4_7:
10955             case CEE_LDC_I4_8:
10956                 cval.intVal = (opcode - CEE_LDC_I4_0);
10957                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10958                 goto PUSH_I4CON;
10959
10960             case CEE_LDC_I4_S:
10961                 cval.intVal = getI1LittleEndian(codeAddr);
10962                 goto PUSH_I4CON;
10963             case CEE_LDC_I4:
10964                 cval.intVal = getI4LittleEndian(codeAddr);
10965                 goto PUSH_I4CON;
10966             PUSH_I4CON:
10967                 JITDUMP(" %d", cval.intVal);
10968                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10969                 break;
10970
10971             case CEE_LDC_I8:
10972                 cval.lngVal = getI8LittleEndian(codeAddr);
10973                 JITDUMP(" 0x%016llx", cval.lngVal);
10974                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10975                 break;
10976
10977             case CEE_LDC_R8:
10978                 cval.dblVal = getR8LittleEndian(codeAddr);
10979                 JITDUMP(" %#.17g", cval.dblVal);
10980                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10981                 break;
10982
10983             case CEE_LDC_R4:
10984                 cval.dblVal = getR4LittleEndian(codeAddr);
10985                 JITDUMP(" %#.17g", cval.dblVal);
10986                 {
10987                     GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10988                     cnsOp->gtType  = TYP_FLOAT;
10989                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10990                 }
10991                 break;
10992
10993             case CEE_LDSTR:
10994
10995                 if (compIsForInlining())
10996                 {
10997                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10998                     {
10999                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
11000                         return;
11001                     }
11002                 }
11003
11004                 val = getU4LittleEndian(codeAddr);
11005                 JITDUMP(" %08X", val);
11006                 if (tiVerificationNeeded)
11007                 {
11008                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
11009                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
11010                 }
11011                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
11012
11013                 break;
11014
11015             case CEE_LDARG:
11016                 lclNum = getU2LittleEndian(codeAddr);
11017                 JITDUMP(" %u", lclNum);
11018                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11019                 break;
11020
11021             case CEE_LDARG_S:
11022                 lclNum = getU1LittleEndian(codeAddr);
11023                 JITDUMP(" %u", lclNum);
11024                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11025                 break;
11026
11027             case CEE_LDARG_0:
11028             case CEE_LDARG_1:
11029             case CEE_LDARG_2:
11030             case CEE_LDARG_3:
11031                 lclNum = (opcode - CEE_LDARG_0);
11032                 assert(lclNum >= 0 && lclNum < 4);
11033                 impLoadArg(lclNum, opcodeOffs + sz + 1);
11034                 break;
11035
11036             case CEE_LDLOC:
11037                 lclNum = getU2LittleEndian(codeAddr);
11038                 JITDUMP(" %u", lclNum);
11039                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11040                 break;
11041
11042             case CEE_LDLOC_S:
11043                 lclNum = getU1LittleEndian(codeAddr);
11044                 JITDUMP(" %u", lclNum);
11045                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11046                 break;
11047
11048             case CEE_LDLOC_0:
11049             case CEE_LDLOC_1:
11050             case CEE_LDLOC_2:
11051             case CEE_LDLOC_3:
11052                 lclNum = (opcode - CEE_LDLOC_0);
11053                 assert(lclNum >= 0 && lclNum < 4);
11054                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11055                 break;
11056
11057             case CEE_STARG:
11058                 lclNum = getU2LittleEndian(codeAddr);
11059                 goto STARG;
11060
11061             case CEE_STARG_S:
11062                 lclNum = getU1LittleEndian(codeAddr);
11063             STARG:
11064                 JITDUMP(" %u", lclNum);
11065
11066                 if (tiVerificationNeeded)
11067                 {
11068                     Verify(lclNum < info.compILargsCount, "bad arg num");
11069                 }
11070
11071                 if (compIsForInlining())
11072                 {
11073                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11074                     noway_assert(op1->gtOper == GT_LCL_VAR);
11075                     lclNum = op1->AsLclVar()->gtLclNum;
11076
11077                     goto VAR_ST_VALID;
11078                 }
11079
11080                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11081                 assertImp(lclNum < numArgs);
11082
11083                 if (lclNum == info.compThisArg)
11084                 {
11085                     lclNum = lvaArg0Var;
11086                 }
11087
11088                 // We should have seen this arg write in the prescan
11089                 assert(lvaTable[lclNum].lvHasILStoreOp);
11090
11091                 if (tiVerificationNeeded)
11092                 {
11093                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
11094                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
11095                            "type mismatch");
11096
11097                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11098                     {
11099                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
11100                     }
11101                 }
11102
11103                 goto VAR_ST;
11104
11105             case CEE_STLOC:
11106                 lclNum  = getU2LittleEndian(codeAddr);
11107                 isLocal = true;
11108                 JITDUMP(" %u", lclNum);
11109                 goto LOC_ST;
11110
11111             case CEE_STLOC_S:
11112                 lclNum  = getU1LittleEndian(codeAddr);
11113                 isLocal = true;
11114                 JITDUMP(" %u", lclNum);
11115                 goto LOC_ST;
11116
11117             case CEE_STLOC_0:
11118             case CEE_STLOC_1:
11119             case CEE_STLOC_2:
11120             case CEE_STLOC_3:
11121                 isLocal = true;
11122                 lclNum  = (opcode - CEE_STLOC_0);
11123                 assert(lclNum >= 0 && lclNum < 4);
11124
11125             LOC_ST:
11126                 if (tiVerificationNeeded)
11127                 {
11128                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11129                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
11130                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
11131                            "type mismatch");
11132                 }
11133
11134                 if (compIsForInlining())
11135                 {
11136                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11137
11138                     /* Have we allocated a temp for this local? */
11139
11140                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
11141
11142                     goto _PopValue;
11143                 }
11144
11145                 lclNum += numArgs;
11146
11147             VAR_ST:
11148
11149                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
11150                 {
11151                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11152                     BADCODE("Bad IL");
11153                 }
11154
11155             VAR_ST_VALID:
11156
11157                 /* if it is a struct assignment, make certain we don't overflow the buffer */
11158                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
11159
11160                 if (lvaTable[lclNum].lvNormalizeOnLoad())
11161                 {
11162                     lclTyp = lvaGetRealType(lclNum);
11163                 }
11164                 else
11165                 {
11166                     lclTyp = lvaGetActualType(lclNum);
11167                 }
11168
11169             _PopValue:
11170                 /* Pop the value being assigned */
11171
11172                 {
11173                     StackEntry se = impPopStack();
11174                     clsHnd        = se.seTypeInfo.GetClassHandle();
11175                     op1           = se.val;
11176                     tiRetVal      = se.seTypeInfo;
11177                 }
11178
11179 #ifdef FEATURE_SIMD
11180                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
11181                 {
11182                     assert(op1->TypeGet() == TYP_STRUCT);
11183                     op1->gtType = lclTyp;
11184                 }
11185 #endif // FEATURE_SIMD
11186
11187                 op1 = impImplicitIorI4Cast(op1, lclTyp);
11188
11189 #ifdef _TARGET_64BIT_
11190                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11191                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
11192                 {
11193                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11194                     op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
11195                 }
11196 #endif // _TARGET_64BIT_
11197
11198                 // We had better assign it a value of the correct type
11199                 assertImp(
11200                     genActualType(lclTyp) == genActualType(op1->gtType) ||
11201                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
11202                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
11203                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
11204                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
11205                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
11206
11207                 /* If op1 is "&var" then its type is the transient "*" and it can
11208                    be used either as TYP_BYREF or TYP_I_IMPL */
11209
11210                 if (op1->IsVarAddr())
11211                 {
11212                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
11213
11214                     /* When "&var" is created, we assume it is a byref. If it is
11215                        being assigned to a TYP_I_IMPL var, change the type to
11216                        prevent unnecessary GC info */
11217
11218                     if (genActualType(lclTyp) == TYP_I_IMPL)
11219                     {
11220                         op1->gtType = TYP_I_IMPL;
11221                     }
11222                 }
11223
11224                 // If this is a local and the local is a ref type, see
11225                 // if we can improve type information based on the
11226                 // value being assigned.
11227                 if (isLocal && (lclTyp == TYP_REF))
11228                 {
11229                     // We should have seen a stloc in our IL prescan.
11230                     assert(lvaTable[lclNum].lvHasILStoreOp);
11231
11232                     // Is there just one place this local is defined?
11233                     const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
11234
11235                     // Conservative check that there is just one
11236                     // definition that reaches this store.
11237                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
11238
11239                     if (isSingleDefLocal && hasSingleReachingDef)
11240                     {
11241                         lvaUpdateClass(lclNum, op1, clsHnd);
11242                     }
11243                 }
11244
11245                 /* Filter out simple assignments to itself */
11246
11247                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
11248                 {
11249                     if (opts.compDbgCode)
11250                     {
11251                         op1 = gtNewNothingNode();
11252                         goto SPILL_APPEND;
11253                     }
11254                     else
11255                     {
11256                         break;
11257                     }
11258                 }
11259
11260                 /* Create the assignment node */
11261
11262                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
11263
11264                 /* If the local is aliased or pinned, we need to spill calls and
11265                    indirections from the stack. */
11266
11267                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
11268                     (verCurrentState.esStackDepth > 0))
11269                 {
11270                     impSpillSideEffects(false,
11271                                         (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
11272                 }
11273
11274                 /* Spill any refs to the local from the stack */
11275
11276                 impSpillLclRefs(lclNum);
11277
11278                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
11279                 // We insert a cast to the dest 'op2' type
11280                 //
11281                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
11282                     varTypeIsFloating(op2->gtType))
11283                 {
11284                     op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
11285                 }
11286
11287                 if (varTypeIsStruct(lclTyp))
11288                 {
11289                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
11290                 }
11291                 else
11292                 {
11293                     // The code generator generates GC tracking information
11294                     // based on the RHS of the assignment.  Later the LHS (which is
11295                     // is a BYREF) gets used and the emitter checks that that variable
11296                     // is being tracked.  It is not (since the RHS was an int and did
11297                     // not need tracking).  To keep this assert happy, we change the RHS
11298                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
11299                     {
11300                         op1->gtType = TYP_BYREF;
11301                     }
11302                     op1 = gtNewAssignNode(op2, op1);
11303                 }
11304
11305                 goto SPILL_APPEND;
11306
11307             case CEE_LDLOCA:
11308                 lclNum = getU2LittleEndian(codeAddr);
11309                 goto LDLOCA;
11310
11311             case CEE_LDLOCA_S:
11312                 lclNum = getU1LittleEndian(codeAddr);
11313             LDLOCA:
11314                 JITDUMP(" %u", lclNum);
11315                 if (tiVerificationNeeded)
11316                 {
11317                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11318                     Verify(info.compInitMem, "initLocals not set");
11319                 }
11320
11321                 if (compIsForInlining())
11322                 {
11323                     // Get the local type
11324                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11325
11326                     /* Have we allocated a temp for this local? */
11327
11328                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
11329
11330                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
11331
11332                     goto _PUSH_ADRVAR;
11333                 }
11334
11335                 lclNum += numArgs;
11336                 assertImp(lclNum < info.compLocalsCount);
11337                 goto ADRVAR;
11338
11339             case CEE_LDARGA:
11340                 lclNum = getU2LittleEndian(codeAddr);
11341                 goto LDARGA;
11342
11343             case CEE_LDARGA_S:
11344                 lclNum = getU1LittleEndian(codeAddr);
11345             LDARGA:
11346                 JITDUMP(" %u", lclNum);
11347                 Verify(lclNum < info.compILargsCount, "bad arg num");
11348
11349                 if (compIsForInlining())
11350                 {
11351                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
11352                     // followed by a ldfld to load the field.
11353
11354                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11355                     if (op1->gtOper != GT_LCL_VAR)
11356                     {
11357                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
11358                         return;
11359                     }
11360
11361                     assert(op1->gtOper == GT_LCL_VAR);
11362
11363                     goto _PUSH_ADRVAR;
11364                 }
11365
11366                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11367                 assertImp(lclNum < numArgs);
11368
11369                 if (lclNum == info.compThisArg)
11370                 {
11371                     lclNum = lvaArg0Var;
11372                 }
11373
11374                 goto ADRVAR;
11375
11376             ADRVAR:
11377
11378                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
11379
11380             _PUSH_ADRVAR:
11381                 assert(op1->gtOper == GT_LCL_VAR);
11382
11383                 /* Note that this is supposed to create the transient type "*"
11384                    which may be used as a TYP_I_IMPL. However we catch places
11385                    where it is used as a TYP_I_IMPL and change the node if needed.
11386                    Thus we are pessimistic and may report byrefs in the GC info
11387                    where it was not absolutely needed, but it is safer this way.
11388                  */
11389                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11390
11391                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
11392                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
11393
11394                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
11395                 if (tiVerificationNeeded)
11396                 {
11397                     // Don't allow taking address of uninit this ptr.
11398                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11399                     {
11400                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
11401                     }
11402
11403                     if (!tiRetVal.IsByRef())
11404                     {
11405                         tiRetVal.MakeByRef();
11406                     }
11407                     else
11408                     {
11409                         Verify(false, "byref to byref");
11410                     }
11411                 }
11412
11413                 impPushOnStack(op1, tiRetVal);
11414                 break;
11415
11416             case CEE_ARGLIST:
11417
11418                 if (!info.compIsVarArgs)
11419                 {
11420                     BADCODE("arglist in non-vararg method");
11421                 }
11422
11423                 if (tiVerificationNeeded)
11424                 {
11425                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
11426                 }
11427                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
11428
11429                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
11430                    adjusted the arg count cos this is like fetching the last param */
11431                 assertImp(0 < numArgs);
11432                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
11433                 lclNum = lvaVarargsHandleArg;
11434                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
11435                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11436                 impPushOnStack(op1, tiRetVal);
11437                 break;
11438
11439             case CEE_ENDFINALLY:
11440
11441                 if (compIsForInlining())
11442                 {
11443                     assert(!"Shouldn't have exception handlers in the inliner!");
11444                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
11445                     return;
11446                 }
11447
11448                 if (verCurrentState.esStackDepth > 0)
11449                 {
11450                     impEvalSideEffects();
11451                 }
11452
11453                 if (info.compXcptnsCount == 0)
11454                 {
11455                     BADCODE("endfinally outside finally");
11456                 }
11457
11458                 assert(verCurrentState.esStackDepth == 0);
11459
11460                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
11461                 goto APPEND;
11462
11463             case CEE_ENDFILTER:
11464
11465                 if (compIsForInlining())
11466                 {
11467                     assert(!"Shouldn't have exception handlers in the inliner!");
11468                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
11469                     return;
11470                 }
11471
11472                 block->bbSetRunRarely(); // filters are rare
11473
11474                 if (info.compXcptnsCount == 0)
11475                 {
11476                     BADCODE("endfilter outside filter");
11477                 }
11478
11479                 if (tiVerificationNeeded)
11480                 {
11481                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11482                 }
11483
11484                 op1 = impPopStack().val;
11485                 assertImp(op1->gtType == TYP_INT);
11486                 if (!bbInFilterILRange(block))
11487                 {
11488                     BADCODE("EndFilter outside a filter handler");
11489                 }
11490
11491                 /* Mark current bb as end of filter */
11492
11493                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11494                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11495
11496                 /* Mark catch handler as successor */
11497
11498                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11499                 if (verCurrentState.esStackDepth != 0)
11500                 {
11501                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11502                                                 DEBUGARG(__LINE__));
11503                 }
11504                 goto APPEND;
11505
11506             case CEE_RET:
11507                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11508             RET:
11509                 if (!impReturnInstruction(block, prefixFlags, opcode))
11510                 {
11511                     return; // abort
11512                 }
11513                 else
11514                 {
11515                     break;
11516                 }
11517
11518             case CEE_JMP:
11519
11520                 assert(!compIsForInlining());
11521
11522                 if (tiVerificationNeeded)
11523                 {
11524                     Verify(false, "Invalid opcode: CEE_JMP");
11525                 }
11526
11527                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11528                 {
11529                     /* CEE_JMP does not make sense in some "protected" regions. */
11530
11531                     BADCODE("Jmp not allowed in protected region");
11532                 }
11533
11534                 if (verCurrentState.esStackDepth != 0)
11535                 {
11536                     BADCODE("Stack must be empty after CEE_JMPs");
11537                 }
11538
11539                 _impResolveToken(CORINFO_TOKENKIND_Method);
11540
11541                 JITDUMP(" %08X", resolvedToken.token);
11542
11543                 /* The signature of the target has to be identical to ours.
11544                    At least check that argCnt and returnType match */
11545
11546                 eeGetMethodSig(resolvedToken.hMethod, &sig);
11547                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11548                     sig.retType != info.compMethodInfo->args.retType ||
11549                     sig.callConv != info.compMethodInfo->args.callConv)
11550                 {
11551                     BADCODE("Incompatible target for CEE_JMPs");
11552                 }
11553
11554                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11555
11556                 /* Mark the basic block as being a JUMP instead of RETURN */
11557
11558                 block->bbFlags |= BBF_HAS_JMP;
11559
11560                 /* Set this flag to make sure register arguments have a location assigned
11561                  * even if we don't use them inside the method */
11562
11563                 compJmpOpUsed = true;
11564
11565                 fgNoStructPromotion = true;
11566
11567                 goto APPEND;
11568
11569             case CEE_LDELEMA:
11570                 assertImp(sz == sizeof(unsigned));
11571
11572                 _impResolveToken(CORINFO_TOKENKIND_Class);
11573
11574                 JITDUMP(" %08X", resolvedToken.token);
11575
11576                 ldelemClsHnd = resolvedToken.hClass;
11577
11578                 if (tiVerificationNeeded)
11579                 {
11580                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11581                     typeInfo tiIndex = impStackTop().seTypeInfo;
11582
11583                     // As per ECMA 'index' specified can be either int32 or native int.
11584                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11585
11586                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11587                     Verify(tiArray.IsNullObjRef() ||
11588                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11589                            "bad array");
11590
11591                     tiRetVal = arrayElemType;
11592                     tiRetVal.MakeByRef();
11593                     if (prefixFlags & PREFIX_READONLY)
11594                     {
11595                         tiRetVal.SetIsReadonlyByRef();
11596                     }
11597
11598                     // an array interior pointer is always in the heap
11599                     tiRetVal.SetIsPermanentHomeByRef();
11600                 }
11601
11602                 // If it's a value class array we just do a simple address-of
11603                 if (eeIsValueClass(ldelemClsHnd))
11604                 {
11605                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11606                     if (cit == CORINFO_TYPE_UNDEF)
11607                     {
11608                         lclTyp = TYP_STRUCT;
11609                     }
11610                     else
11611                     {
11612                         lclTyp = JITtype2varType(cit);
11613                     }
11614                     goto ARR_LD_POST_VERIFY;
11615                 }
11616
11617                 // Similarly, if its a readonly access, we can do a simple address-of
11618                 // without doing a runtime type-check
11619                 if (prefixFlags & PREFIX_READONLY)
11620                 {
11621                     lclTyp = TYP_REF;
11622                     goto ARR_LD_POST_VERIFY;
11623                 }
11624
11625                 // Otherwise we need the full helper function with run-time type check
11626                 op1 = impTokenToHandle(&resolvedToken);
11627                 if (op1 == nullptr)
11628                 { // compDonotInline()
11629                     return;
11630                 }
11631
11632                 args = gtNewArgList(op1);                      // Type
11633                 args = gtNewListNode(impPopStack().val, args); // index
11634                 args = gtNewListNode(impPopStack().val, args); // array
11635                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11636
11637                 impPushOnStack(op1, tiRetVal);
11638                 break;
11639
11640             // ldelem for reference and value types
11641             case CEE_LDELEM:
11642                 assertImp(sz == sizeof(unsigned));
11643
11644                 _impResolveToken(CORINFO_TOKENKIND_Class);
11645
11646                 JITDUMP(" %08X", resolvedToken.token);
11647
11648                 ldelemClsHnd = resolvedToken.hClass;
11649
11650                 if (tiVerificationNeeded)
11651                 {
11652                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11653                     typeInfo tiIndex = impStackTop().seTypeInfo;
11654
11655                     // As per ECMA 'index' specified can be either int32 or native int.
11656                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11657                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11658
11659                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11660                            "type of array incompatible with type operand");
11661                     tiRetVal.NormaliseForStack();
11662                 }
11663
11664                 // If it's a reference type or generic variable type
11665                 // then just generate code as though it's a ldelem.ref instruction
11666                 if (!eeIsValueClass(ldelemClsHnd))
11667                 {
11668                     lclTyp = TYP_REF;
11669                     opcode = CEE_LDELEM_REF;
11670                 }
11671                 else
11672                 {
11673                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11674                     lclTyp             = JITtype2varType(jitTyp);
11675                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11676                     tiRetVal.NormaliseForStack();
11677                 }
11678                 goto ARR_LD_POST_VERIFY;
11679
11680             case CEE_LDELEM_I1:
11681                 lclTyp = TYP_BYTE;
11682                 goto ARR_LD;
11683             case CEE_LDELEM_I2:
11684                 lclTyp = TYP_SHORT;
11685                 goto ARR_LD;
11686             case CEE_LDELEM_I:
11687                 lclTyp = TYP_I_IMPL;
11688                 goto ARR_LD;
11689
11690             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11691             // and treating it as TYP_INT avoids other asserts.
11692             case CEE_LDELEM_U4:
11693                 lclTyp = TYP_INT;
11694                 goto ARR_LD;
11695
11696             case CEE_LDELEM_I4:
11697                 lclTyp = TYP_INT;
11698                 goto ARR_LD;
11699             case CEE_LDELEM_I8:
11700                 lclTyp = TYP_LONG;
11701                 goto ARR_LD;
11702             case CEE_LDELEM_REF:
11703                 lclTyp = TYP_REF;
11704                 goto ARR_LD;
11705             case CEE_LDELEM_R4:
11706                 lclTyp = TYP_FLOAT;
11707                 goto ARR_LD;
11708             case CEE_LDELEM_R8:
11709                 lclTyp = TYP_DOUBLE;
11710                 goto ARR_LD;
11711             case CEE_LDELEM_U1:
11712                 lclTyp = TYP_UBYTE;
11713                 goto ARR_LD;
11714             case CEE_LDELEM_U2:
11715                 lclTyp = TYP_USHORT;
11716                 goto ARR_LD;
11717
11718             ARR_LD:
11719
11720                 if (tiVerificationNeeded)
11721                 {
11722                     typeInfo tiArray = impStackTop(1).seTypeInfo;
11723                     typeInfo tiIndex = impStackTop().seTypeInfo;
11724
11725                     // As per ECMA 'index' specified can be either int32 or native int.
11726                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11727                     if (tiArray.IsNullObjRef())
11728                     {
11729                         if (lclTyp == TYP_REF)
11730                         { // we will say a deref of a null array yields a null ref
11731                             tiRetVal = typeInfo(TI_NULL);
11732                         }
11733                         else
11734                         {
11735                             tiRetVal = typeInfo(lclTyp);
11736                         }
11737                     }
11738                     else
11739                     {
11740                         tiRetVal             = verGetArrayElemType(tiArray);
11741                         typeInfo arrayElemTi = typeInfo(lclTyp);
11742 #ifdef _TARGET_64BIT_
11743                         if (opcode == CEE_LDELEM_I)
11744                         {
11745                             arrayElemTi = typeInfo::nativeInt();
11746                         }
11747
11748                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11749                         {
11750                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11751                         }
11752                         else
11753 #endif // _TARGET_64BIT_
11754                         {
11755                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11756                         }
11757                     }
11758                     tiRetVal.NormaliseForStack();
11759                 }
11760             ARR_LD_POST_VERIFY:
11761
11762                 /* Pull the index value and array address */
11763                 op2 = impPopStack().val;
11764                 op1 = impPopStack().val;
11765                 assertImp(op1->gtType == TYP_REF);
11766
11767                 /* Check for null pointer - in the inliner case we simply abort */
11768
11769                 if (compIsForInlining())
11770                 {
11771                     if (op1->gtOper == GT_CNS_INT)
11772                     {
11773                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11774                         return;
11775                     }
11776                 }
11777
11778                 op1 = impCheckForNullPointer(op1);
11779
11780                 /* Mark the block as containing an index expression */
11781
11782                 if (op1->gtOper == GT_LCL_VAR)
11783                 {
11784                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11785                     {
11786                         block->bbFlags |= BBF_HAS_IDX_LEN;
11787                         optMethodFlags |= OMF_HAS_ARRAYREF;
11788                     }
11789                 }
11790
11791                 /* Create the index node and push it on the stack */
11792
11793                 op1 = gtNewIndexRef(lclTyp, op1, op2);
11794
11795                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11796
11797                 if ((opcode == CEE_LDELEMA) || ldstruct ||
11798                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11799                 {
11800                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
11801
11802                     // remember the element size
11803                     if (lclTyp == TYP_REF)
11804                     {
11805                         op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11806                     }
11807                     else
11808                     {
11809                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11810                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11811                         {
11812                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11813                         }
11814                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11815                         if (lclTyp == TYP_STRUCT)
11816                         {
11817                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
11818                             op1->gtIndex.gtIndElemSize = size;
11819                             op1->gtType                = lclTyp;
11820                         }
11821                     }
11822
11823                     if ((opcode == CEE_LDELEMA) || ldstruct)
11824                     {
11825                         // wrap it in a &
11826                         lclTyp = TYP_BYREF;
11827
11828                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11829                     }
11830                     else
11831                     {
11832                         assert(lclTyp != TYP_STRUCT);
11833                     }
11834                 }
11835
11836                 if (ldstruct)
11837                 {
11838                     // Create an OBJ for the result
11839                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11840                     op1->gtFlags |= GTF_EXCEPT;
11841                 }
11842                 impPushOnStack(op1, tiRetVal);
11843                 break;
11844
11845             // stelem for reference and value types
11846             case CEE_STELEM:
11847
11848                 assertImp(sz == sizeof(unsigned));
11849
11850                 _impResolveToken(CORINFO_TOKENKIND_Class);
11851
11852                 JITDUMP(" %08X", resolvedToken.token);
11853
11854                 stelemClsHnd = resolvedToken.hClass;
11855
11856                 if (tiVerificationNeeded)
11857                 {
11858                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11859                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11860                     typeInfo tiValue = impStackTop().seTypeInfo;
11861
11862                     // As per ECMA 'index' specified can be either int32 or native int.
11863                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11864                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11865
11866                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11867                            "type operand incompatible with array element type");
11868                     arrayElem.NormaliseForStack();
11869                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11870                 }
11871
11872                 // If it's a reference type just behave as though it's a stelem.ref instruction
11873                 if (!eeIsValueClass(stelemClsHnd))
11874                 {
11875                     goto STELEM_REF_POST_VERIFY;
11876                 }
11877
11878                 // Otherwise extract the type
11879                 {
11880                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11881                     lclTyp             = JITtype2varType(jitTyp);
11882                     goto ARR_ST_POST_VERIFY;
11883                 }
11884
11885             case CEE_STELEM_REF:
11886
11887                 if (tiVerificationNeeded)
11888                 {
11889                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11890                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11891                     typeInfo tiValue = impStackTop().seTypeInfo;
11892
11893                     // As per ECMA 'index' specified can be either int32 or native int.
11894                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11895                     Verify(tiValue.IsObjRef(), "bad value");
11896
11897                     // we only check that it is an object referece, The helper does additional checks
11898                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11899                 }
11900
11901             STELEM_REF_POST_VERIFY:
11902
11903                 arrayNodeTo      = impStackTop(2).val;
11904                 arrayNodeToIndex = impStackTop(1).val;
11905                 arrayNodeFrom    = impStackTop().val;
11906
11907                 //
11908                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11909                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11910                 //
11911
11912                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11913                 // This does not need CORINFO_HELP_ARRADDR_ST
11914                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11915                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11916                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11917                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11918                 {
11919                     JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11920                     lclTyp = TYP_REF;
11921                     goto ARR_ST_POST_VERIFY;
11922                 }
11923
11924                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11925                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11926                 {
11927                     JITDUMP("\nstelem of null: skipping covariant store check\n");
11928                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11929                     lclTyp = TYP_REF;
11930                     goto ARR_ST_POST_VERIFY;
11931                 }
11932
11933                 /* Call a helper function to do the assignment */
11934                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11935
11936                 goto SPILL_APPEND;
11937
11938             case CEE_STELEM_I1:
11939                 lclTyp = TYP_BYTE;
11940                 goto ARR_ST;
11941             case CEE_STELEM_I2:
11942                 lclTyp = TYP_SHORT;
11943                 goto ARR_ST;
11944             case CEE_STELEM_I:
11945                 lclTyp = TYP_I_IMPL;
11946                 goto ARR_ST;
11947             case CEE_STELEM_I4:
11948                 lclTyp = TYP_INT;
11949                 goto ARR_ST;
11950             case CEE_STELEM_I8:
11951                 lclTyp = TYP_LONG;
11952                 goto ARR_ST;
11953             case CEE_STELEM_R4:
11954                 lclTyp = TYP_FLOAT;
11955                 goto ARR_ST;
11956             case CEE_STELEM_R8:
11957                 lclTyp = TYP_DOUBLE;
11958                 goto ARR_ST;
11959
11960             ARR_ST:
11961
11962                 if (tiVerificationNeeded)
11963                 {
11964                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11965                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11966                     typeInfo tiValue = impStackTop().seTypeInfo;
11967
11968                     // As per ECMA 'index' specified can be either int32 or native int.
11969                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11970                     typeInfo arrayElem = typeInfo(lclTyp);
11971 #ifdef _TARGET_64BIT_
11972                     if (opcode == CEE_STELEM_I)
11973                     {
11974                         arrayElem = typeInfo::nativeInt();
11975                     }
11976 #endif // _TARGET_64BIT_
11977                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11978                            "bad array");
11979
11980                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11981                            "bad value");
11982                 }
11983
11984             ARR_ST_POST_VERIFY:
11985                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11986                    range-check, and then assignment. However, codegen currently
11987                    does the range-check before evaluation the RHS-operands. So to
11988                    maintain strict ordering, we spill the stack. */
11989
11990                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11991                 {
11992                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11993                                                    "Strict ordering of exceptions for Array store"));
11994                 }
11995
11996                 /* Pull the new value from the stack */
11997                 op2 = impPopStack().val;
11998
11999                 /* Pull the index value */
12000                 op1 = impPopStack().val;
12001
12002                 /* Pull the array address */
12003                 op3 = impPopStack().val;
12004
12005                 assertImp(op3->gtType == TYP_REF);
12006                 if (op2->IsVarAddr())
12007                 {
12008                     op2->gtType = TYP_I_IMPL;
12009                 }
12010
12011                 op3 = impCheckForNullPointer(op3);
12012
12013                 // Mark the block as containing an index expression
12014
12015                 if (op3->gtOper == GT_LCL_VAR)
12016                 {
12017                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
12018                     {
12019                         block->bbFlags |= BBF_HAS_IDX_LEN;
12020                         optMethodFlags |= OMF_HAS_ARRAYREF;
12021                     }
12022                 }
12023
12024                 /* Create the index node */
12025
12026                 op1 = gtNewIndexRef(lclTyp, op3, op1);
12027
12028                 /* Create the assignment node and append it */
12029
12030                 if (lclTyp == TYP_STRUCT)
12031                 {
12032                     assert(stelemClsHnd != DUMMY_INIT(NULL));
12033
12034                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
12035                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
12036                 }
12037                 if (varTypeIsStruct(op1))
12038                 {
12039                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
12040                 }
12041                 else
12042                 {
12043                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
12044                     op1 = gtNewAssignNode(op1, op2);
12045                 }
12046
12047                 /* Mark the expression as containing an assignment */
12048
12049                 op1->gtFlags |= GTF_ASG;
12050
12051                 goto SPILL_APPEND;
12052
12053             case CEE_ADD:
12054                 oper = GT_ADD;
12055                 goto MATH_OP2;
12056
12057             case CEE_ADD_OVF:
12058                 uns = false;
12059                 goto ADD_OVF;
12060             case CEE_ADD_OVF_UN:
12061                 uns = true;
12062                 goto ADD_OVF;
12063
12064             ADD_OVF:
12065                 ovfl     = true;
12066                 callNode = false;
12067                 oper     = GT_ADD;
12068                 goto MATH_OP2_FLAGS;
12069
12070             case CEE_SUB:
12071                 oper = GT_SUB;
12072                 goto MATH_OP2;
12073
12074             case CEE_SUB_OVF:
12075                 uns = false;
12076                 goto SUB_OVF;
12077             case CEE_SUB_OVF_UN:
12078                 uns = true;
12079                 goto SUB_OVF;
12080
12081             SUB_OVF:
12082                 ovfl     = true;
12083                 callNode = false;
12084                 oper     = GT_SUB;
12085                 goto MATH_OP2_FLAGS;
12086
12087             case CEE_MUL:
12088                 oper = GT_MUL;
12089                 goto MATH_MAYBE_CALL_NO_OVF;
12090
12091             case CEE_MUL_OVF:
12092                 uns = false;
12093                 goto MUL_OVF;
12094             case CEE_MUL_OVF_UN:
12095                 uns = true;
12096                 goto MUL_OVF;
12097
12098             MUL_OVF:
12099                 ovfl = true;
12100                 oper = GT_MUL;
12101                 goto MATH_MAYBE_CALL_OVF;
12102
12103             // Other binary math operations
12104
12105             case CEE_DIV:
12106                 oper = GT_DIV;
12107                 goto MATH_MAYBE_CALL_NO_OVF;
12108
12109             case CEE_DIV_UN:
12110                 oper = GT_UDIV;
12111                 goto MATH_MAYBE_CALL_NO_OVF;
12112
12113             case CEE_REM:
12114                 oper = GT_MOD;
12115                 goto MATH_MAYBE_CALL_NO_OVF;
12116
12117             case CEE_REM_UN:
12118                 oper = GT_UMOD;
12119                 goto MATH_MAYBE_CALL_NO_OVF;
12120
12121             MATH_MAYBE_CALL_NO_OVF:
12122                 ovfl = false;
12123             MATH_MAYBE_CALL_OVF:
12124                 // Morpher has some complex logic about when to turn different
12125                 // typed nodes on different platforms into helper calls. We
12126                 // need to either duplicate that logic here, or just
12127                 // pessimistically make all the nodes large enough to become
12128                 // call nodes.  Since call nodes aren't that much larger and
12129                 // these opcodes are infrequent enough I chose the latter.
12130                 callNode = true;
12131                 goto MATH_OP2_FLAGS;
12132
12133             case CEE_AND:
12134                 oper = GT_AND;
12135                 goto MATH_OP2;
12136             case CEE_OR:
12137                 oper = GT_OR;
12138                 goto MATH_OP2;
12139             case CEE_XOR:
12140                 oper = GT_XOR;
12141                 goto MATH_OP2;
12142
12143             MATH_OP2: // For default values of 'ovfl' and 'callNode'
12144
12145                 ovfl     = false;
12146                 callNode = false;
12147
12148             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
12149
12150                 /* Pull two values and push back the result */
12151
12152                 if (tiVerificationNeeded)
12153                 {
12154                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
12155                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
12156
12157                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
12158                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
12159                     {
12160                         Verify(tiOp1.IsNumberType(), "not number");
12161                     }
12162                     else
12163                     {
12164                         Verify(tiOp1.IsIntegerType(), "not integer");
12165                     }
12166
12167                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
12168
12169                     tiRetVal = tiOp1;
12170
12171 #ifdef _TARGET_64BIT_
12172                     if (tiOp2.IsNativeIntType())
12173                     {
12174                         tiRetVal = tiOp2;
12175                     }
12176 #endif // _TARGET_64BIT_
12177                 }
12178
12179                 op2 = impPopStack().val;
12180                 op1 = impPopStack().val;
12181
12182 #if !CPU_HAS_FP_SUPPORT
12183                 if (varTypeIsFloating(op1->gtType))
12184                 {
12185                     callNode = true;
12186                 }
12187 #endif
12188                 /* Can't do arithmetic with references */
12189                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
12190
12191                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
12192                 // if it is in the stack)
12193                 impBashVarAddrsToI(op1, op2);
12194
12195                 type = impGetByRefResultType(oper, uns, &op1, &op2);
12196
12197                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
12198
12199                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
12200
12201                 if (op2->gtOper == GT_CNS_INT)
12202                 {
12203                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
12204                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
12205
12206                     {
12207                         impPushOnStack(op1, tiRetVal);
12208                         break;
12209                     }
12210                 }
12211
12212                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
12213                 //
12214                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
12215                 {
12216                     if (op1->TypeGet() != type)
12217                     {
12218                         // We insert a cast of op1 to 'type'
12219                         op1 = gtNewCastNode(type, op1, false, type);
12220                     }
12221                     if (op2->TypeGet() != type)
12222                     {
12223                         // We insert a cast of op2 to 'type'
12224                         op2 = gtNewCastNode(type, op2, false, type);
12225                     }
12226                 }
12227
12228 #if SMALL_TREE_NODES
12229                 if (callNode)
12230                 {
12231                     /* These operators can later be transformed into 'GT_CALL' */
12232
12233                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
12234 #ifndef _TARGET_ARM_
12235                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
12236                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
12237                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
12238                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
12239 #endif
12240                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
12241                     // that we'll need to transform into a general large node, but rather specifically
12242                     // to a call: by doing it this way, things keep working if there are multiple sizes,
12243                     // and a CALL is no longer the largest.
12244                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
12245                     // than an "if".
12246                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
12247                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
12248                 }
12249                 else
12250 #endif // SMALL_TREE_NODES
12251                 {
12252                     op1 = gtNewOperNode(oper, type, op1, op2);
12253                 }
12254
12255                 /* Special case: integer/long division may throw an exception */
12256
12257                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
12258                 {
12259                     op1->gtFlags |= GTF_EXCEPT;
12260                 }
12261
12262                 if (ovfl)
12263                 {
12264                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
12265                     if (ovflType != TYP_UNKNOWN)
12266                     {
12267                         op1->gtType = ovflType;
12268                     }
12269                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
12270                     if (uns)
12271                     {
12272                         op1->gtFlags |= GTF_UNSIGNED;
12273                     }
12274                 }
12275
12276                 impPushOnStack(op1, tiRetVal);
12277                 break;
12278
12279             case CEE_SHL:
12280                 oper = GT_LSH;
12281                 goto CEE_SH_OP2;
12282
12283             case CEE_SHR:
12284                 oper = GT_RSH;
12285                 goto CEE_SH_OP2;
12286             case CEE_SHR_UN:
12287                 oper = GT_RSZ;
12288                 goto CEE_SH_OP2;
12289
12290             CEE_SH_OP2:
12291                 if (tiVerificationNeeded)
12292                 {
12293                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
12294                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
12295                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
12296                     tiRetVal = tiVal;
12297                 }
12298                 op2 = impPopStack().val;
12299                 op1 = impPopStack().val; // operand to be shifted
12300                 impBashVarAddrsToI(op1, op2);
12301
12302                 type = genActualType(op1->TypeGet());
12303                 op1  = gtNewOperNode(oper, type, op1, op2);
12304
12305                 impPushOnStack(op1, tiRetVal);
12306                 break;
12307
12308             case CEE_NOT:
12309                 if (tiVerificationNeeded)
12310                 {
12311                     tiRetVal = impStackTop().seTypeInfo;
12312                     Verify(tiRetVal.IsIntegerType(), "bad int value");
12313                 }
12314
12315                 op1 = impPopStack().val;
12316                 impBashVarAddrsToI(op1, nullptr);
12317                 type = genActualType(op1->TypeGet());
12318                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
12319                 break;
12320
12321             case CEE_CKFINITE:
12322                 if (tiVerificationNeeded)
12323                 {
12324                     tiRetVal = impStackTop().seTypeInfo;
12325                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
12326                 }
12327                 op1  = impPopStack().val;
12328                 type = op1->TypeGet();
12329                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
12330                 op1->gtFlags |= GTF_EXCEPT;
12331
12332                 impPushOnStack(op1, tiRetVal);
12333                 break;
12334
12335             case CEE_LEAVE:
12336
12337                 val     = getI4LittleEndian(codeAddr); // jump distance
12338                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
12339                 goto LEAVE;
12340
12341             case CEE_LEAVE_S:
12342                 val     = getI1LittleEndian(codeAddr); // jump distance
12343                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
12344
12345             LEAVE:
12346
12347                 if (compIsForInlining())
12348                 {
12349                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
12350                     return;
12351                 }
12352
12353                 JITDUMP(" %04X", jmpAddr);
12354                 if (block->bbJumpKind != BBJ_LEAVE)
12355                 {
12356                     impResetLeaveBlock(block, jmpAddr);
12357                 }
12358
12359                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
12360                 impImportLeave(block);
12361                 impNoteBranchOffs();
12362
12363                 break;
12364
12365             case CEE_BR:
12366             case CEE_BR_S:
12367                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
12368
12369                 if (compIsForInlining() && jmpDist == 0)
12370                 {
12371                     break; /* NOP */
12372                 }
12373
12374                 impNoteBranchOffs();
12375                 break;
12376
12377             case CEE_BRTRUE:
12378             case CEE_BRTRUE_S:
12379             case CEE_BRFALSE:
12380             case CEE_BRFALSE_S:
12381
12382                 /* Pop the comparand (now there's a neat term) from the stack */
12383                 if (tiVerificationNeeded)
12384                 {
12385                     typeInfo& tiVal = impStackTop().seTypeInfo;
12386                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
12387                            "bad value");
12388                 }
12389
12390                 op1  = impPopStack().val;
12391                 type = op1->TypeGet();
12392
12393                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
12394                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12395                 {
12396                     block->bbJumpKind = BBJ_NONE;
12397
12398                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12399                     {
12400                         op1 = gtUnusedValNode(op1);
12401                         goto SPILL_APPEND;
12402                     }
12403                     else
12404                     {
12405                         break;
12406                     }
12407                 }
12408
12409                 if (op1->OperIsCompare())
12410                 {
12411                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
12412                     {
12413                         // Flip the sense of the compare
12414
12415                         op1 = gtReverseCond(op1);
12416                     }
12417                 }
12418                 else
12419                 {
12420                     /* We'll compare against an equally-sized integer 0 */
12421                     /* For small types, we always compare against int   */
12422                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
12423
12424                     /* Create the comparison operator and try to fold it */
12425
12426                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
12427                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
12428                 }
12429
12430             // fall through
12431
12432             COND_JUMP:
12433
12434                 /* Fold comparison if we can */
12435
12436                 op1 = gtFoldExpr(op1);
12437
12438                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
12439                 /* Don't make any blocks unreachable in import only mode */
12440
12441                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
12442                 {
12443                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
12444                        unreachable under compDbgCode */
12445                     assert(!opts.compDbgCode);
12446
12447                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
12448                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
12449                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
12450                                                                          // block for the second time
12451
12452                     block->bbJumpKind = foldedJumpKind;
12453 #ifdef DEBUG
12454                     if (verbose)
12455                     {
12456                         if (op1->gtIntCon.gtIconVal)
12457                         {
12458                             printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
12459                                    block->bbJumpDest->bbNum);
12460                         }
12461                         else
12462                         {
12463                             printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
12464                         }
12465                     }
12466 #endif
12467                     break;
12468                 }
12469
12470                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
12471
12472                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12473                    in impImportBlock(block). For correct line numbers, spill stack. */
12474
12475                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12476                 {
12477                     impSpillStackEnsure(true);
12478                 }
12479
12480                 goto SPILL_APPEND;
12481
12482             case CEE_CEQ:
12483                 oper = GT_EQ;
12484                 uns  = false;
12485                 goto CMP_2_OPs;
12486             case CEE_CGT_UN:
12487                 oper = GT_GT;
12488                 uns  = true;
12489                 goto CMP_2_OPs;
12490             case CEE_CGT:
12491                 oper = GT_GT;
12492                 uns  = false;
12493                 goto CMP_2_OPs;
12494             case CEE_CLT_UN:
12495                 oper = GT_LT;
12496                 uns  = true;
12497                 goto CMP_2_OPs;
12498             case CEE_CLT:
12499                 oper = GT_LT;
12500                 uns  = false;
12501                 goto CMP_2_OPs;
12502
12503             CMP_2_OPs:
12504                 if (tiVerificationNeeded)
12505                 {
12506                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12507                     tiRetVal = typeInfo(TI_INT);
12508                 }
12509
12510                 op2 = impPopStack().val;
12511                 op1 = impPopStack().val;
12512
12513 #ifdef _TARGET_64BIT_
12514                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12515                 {
12516                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12517                 }
12518                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12519                 {
12520                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12521                 }
12522 #endif // _TARGET_64BIT_
12523
12524                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12525                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12526                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12527
12528                 /* Create the comparison node */
12529
12530                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12531
12532                 /* TODO: setting both flags when only one is appropriate */
12533                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12534                 {
12535                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12536                 }
12537
12538                 // Fold result, if possible.
12539                 op1 = gtFoldExpr(op1);
12540
12541                 impPushOnStack(op1, tiRetVal);
12542                 break;
12543
12544             case CEE_BEQ_S:
12545             case CEE_BEQ:
12546                 oper = GT_EQ;
12547                 goto CMP_2_OPs_AND_BR;
12548
12549             case CEE_BGE_S:
12550             case CEE_BGE:
12551                 oper = GT_GE;
12552                 goto CMP_2_OPs_AND_BR;
12553
12554             case CEE_BGE_UN_S:
12555             case CEE_BGE_UN:
12556                 oper = GT_GE;
12557                 goto CMP_2_OPs_AND_BR_UN;
12558
12559             case CEE_BGT_S:
12560             case CEE_BGT:
12561                 oper = GT_GT;
12562                 goto CMP_2_OPs_AND_BR;
12563
12564             case CEE_BGT_UN_S:
12565             case CEE_BGT_UN:
12566                 oper = GT_GT;
12567                 goto CMP_2_OPs_AND_BR_UN;
12568
12569             case CEE_BLE_S:
12570             case CEE_BLE:
12571                 oper = GT_LE;
12572                 goto CMP_2_OPs_AND_BR;
12573
12574             case CEE_BLE_UN_S:
12575             case CEE_BLE_UN:
12576                 oper = GT_LE;
12577                 goto CMP_2_OPs_AND_BR_UN;
12578
12579             case CEE_BLT_S:
12580             case CEE_BLT:
12581                 oper = GT_LT;
12582                 goto CMP_2_OPs_AND_BR;
12583
12584             case CEE_BLT_UN_S:
12585             case CEE_BLT_UN:
12586                 oper = GT_LT;
12587                 goto CMP_2_OPs_AND_BR_UN;
12588
12589             case CEE_BNE_UN_S:
12590             case CEE_BNE_UN:
12591                 oper = GT_NE;
12592                 goto CMP_2_OPs_AND_BR_UN;
12593
12594             CMP_2_OPs_AND_BR_UN:
12595                 uns       = true;
12596                 unordered = true;
12597                 goto CMP_2_OPs_AND_BR_ALL;
12598             CMP_2_OPs_AND_BR:
12599                 uns       = false;
12600                 unordered = false;
12601                 goto CMP_2_OPs_AND_BR_ALL;
12602             CMP_2_OPs_AND_BR_ALL:
12603
12604                 if (tiVerificationNeeded)
12605                 {
12606                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12607                 }
12608
12609                 /* Pull two values */
12610                 op2 = impPopStack().val;
12611                 op1 = impPopStack().val;
12612
12613 #ifdef _TARGET_64BIT_
12614                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12615                 {
12616                     op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12617                 }
12618                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12619                 {
12620                     op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12621                 }
12622 #endif // _TARGET_64BIT_
12623
12624                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12625                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12626                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12627
12628                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12629                 {
12630                     block->bbJumpKind = BBJ_NONE;
12631
12632                     if (op1->gtFlags & GTF_GLOB_EFFECT)
12633                     {
12634                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12635                                                        "Branch to next Optimization, op1 side effect"));
12636                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12637                     }
12638                     if (op2->gtFlags & GTF_GLOB_EFFECT)
12639                     {
12640                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12641                                                        "Branch to next Optimization, op2 side effect"));
12642                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12643                     }
12644
12645 #ifdef DEBUG
12646                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12647                     {
12648                         impNoteLastILoffs();
12649                     }
12650 #endif
12651                     break;
12652                 }
12653
12654                 // We can generate an compare of different sized floating point op1 and op2
12655                 // We insert a cast
12656                 //
12657                 if (varTypeIsFloating(op1->TypeGet()))
12658                 {
12659                     if (op1->TypeGet() != op2->TypeGet())
12660                     {
12661                         assert(varTypeIsFloating(op2->TypeGet()));
12662
12663                         // say op1=double, op2=float. To avoid loss of precision
12664                         // while comparing, op2 is converted to double and double
12665                         // comparison is done.
12666                         if (op1->TypeGet() == TYP_DOUBLE)
12667                         {
12668                             // We insert a cast of op2 to TYP_DOUBLE
12669                             op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12670                         }
12671                         else if (op2->TypeGet() == TYP_DOUBLE)
12672                         {
12673                             // We insert a cast of op1 to TYP_DOUBLE
12674                             op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12675                         }
12676                     }
12677                 }
12678
12679                 /* Create and append the operator */
12680
12681                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12682
12683                 if (uns)
12684                 {
12685                     op1->gtFlags |= GTF_UNSIGNED;
12686                 }
12687
12688                 if (unordered)
12689                 {
12690                     op1->gtFlags |= GTF_RELOP_NAN_UN;
12691                 }
12692
12693                 goto COND_JUMP;
12694
12695             case CEE_SWITCH:
12696                 assert(!compIsForInlining());
12697
12698                 if (tiVerificationNeeded)
12699                 {
12700                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12701                 }
12702                 /* Pop the switch value off the stack */
12703                 op1 = impPopStack().val;
12704                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12705
12706                 /* We can create a switch node */
12707
12708                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12709
12710                 val = (int)getU4LittleEndian(codeAddr);
12711                 codeAddr += 4 + val * 4; // skip over the switch-table
12712
12713                 goto SPILL_APPEND;
12714
12715             /************************** Casting OPCODES ***************************/
12716
12717             case CEE_CONV_OVF_I1:
12718                 lclTyp = TYP_BYTE;
12719                 goto CONV_OVF;
12720             case CEE_CONV_OVF_I2:
12721                 lclTyp = TYP_SHORT;
12722                 goto CONV_OVF;
12723             case CEE_CONV_OVF_I:
12724                 lclTyp = TYP_I_IMPL;
12725                 goto CONV_OVF;
12726             case CEE_CONV_OVF_I4:
12727                 lclTyp = TYP_INT;
12728                 goto CONV_OVF;
12729             case CEE_CONV_OVF_I8:
12730                 lclTyp = TYP_LONG;
12731                 goto CONV_OVF;
12732
12733             case CEE_CONV_OVF_U1:
12734                 lclTyp = TYP_UBYTE;
12735                 goto CONV_OVF;
12736             case CEE_CONV_OVF_U2:
12737                 lclTyp = TYP_USHORT;
12738                 goto CONV_OVF;
12739             case CEE_CONV_OVF_U:
12740                 lclTyp = TYP_U_IMPL;
12741                 goto CONV_OVF;
12742             case CEE_CONV_OVF_U4:
12743                 lclTyp = TYP_UINT;
12744                 goto CONV_OVF;
12745             case CEE_CONV_OVF_U8:
12746                 lclTyp = TYP_ULONG;
12747                 goto CONV_OVF;
12748
12749             case CEE_CONV_OVF_I1_UN:
12750                 lclTyp = TYP_BYTE;
12751                 goto CONV_OVF_UN;
12752             case CEE_CONV_OVF_I2_UN:
12753                 lclTyp = TYP_SHORT;
12754                 goto CONV_OVF_UN;
12755             case CEE_CONV_OVF_I_UN:
12756                 lclTyp = TYP_I_IMPL;
12757                 goto CONV_OVF_UN;
12758             case CEE_CONV_OVF_I4_UN:
12759                 lclTyp = TYP_INT;
12760                 goto CONV_OVF_UN;
12761             case CEE_CONV_OVF_I8_UN:
12762                 lclTyp = TYP_LONG;
12763                 goto CONV_OVF_UN;
12764
12765             case CEE_CONV_OVF_U1_UN:
12766                 lclTyp = TYP_UBYTE;
12767                 goto CONV_OVF_UN;
12768             case CEE_CONV_OVF_U2_UN:
12769                 lclTyp = TYP_USHORT;
12770                 goto CONV_OVF_UN;
12771             case CEE_CONV_OVF_U_UN:
12772                 lclTyp = TYP_U_IMPL;
12773                 goto CONV_OVF_UN;
12774             case CEE_CONV_OVF_U4_UN:
12775                 lclTyp = TYP_UINT;
12776                 goto CONV_OVF_UN;
12777             case CEE_CONV_OVF_U8_UN:
12778                 lclTyp = TYP_ULONG;
12779                 goto CONV_OVF_UN;
12780
12781             CONV_OVF_UN:
12782                 uns = true;
12783                 goto CONV_OVF_COMMON;
12784             CONV_OVF:
12785                 uns = false;
12786                 goto CONV_OVF_COMMON;
12787
12788             CONV_OVF_COMMON:
12789                 ovfl = true;
12790                 goto _CONV;
12791
12792             case CEE_CONV_I1:
12793                 lclTyp = TYP_BYTE;
12794                 goto CONV;
12795             case CEE_CONV_I2:
12796                 lclTyp = TYP_SHORT;
12797                 goto CONV;
12798             case CEE_CONV_I:
12799                 lclTyp = TYP_I_IMPL;
12800                 goto CONV;
12801             case CEE_CONV_I4:
12802                 lclTyp = TYP_INT;
12803                 goto CONV;
12804             case CEE_CONV_I8:
12805                 lclTyp = TYP_LONG;
12806                 goto CONV;
12807
12808             case CEE_CONV_U1:
12809                 lclTyp = TYP_UBYTE;
12810                 goto CONV;
12811             case CEE_CONV_U2:
12812                 lclTyp = TYP_USHORT;
12813                 goto CONV;
12814 #if (REGSIZE_BYTES == 8)
12815             case CEE_CONV_U:
12816                 lclTyp = TYP_U_IMPL;
12817                 goto CONV_UN;
12818 #else
12819             case CEE_CONV_U:
12820                 lclTyp = TYP_U_IMPL;
12821                 goto CONV;
12822 #endif
12823             case CEE_CONV_U4:
12824                 lclTyp = TYP_UINT;
12825                 goto CONV;
12826             case CEE_CONV_U8:
12827                 lclTyp = TYP_ULONG;
12828                 goto CONV_UN;
12829
12830             case CEE_CONV_R4:
12831                 lclTyp = TYP_FLOAT;
12832                 goto CONV;
12833             case CEE_CONV_R8:
12834                 lclTyp = TYP_DOUBLE;
12835                 goto CONV;
12836
12837             case CEE_CONV_R_UN:
12838                 lclTyp = TYP_DOUBLE;
12839                 goto CONV_UN;
12840
12841             CONV_UN:
12842                 uns  = true;
12843                 ovfl = false;
12844                 goto _CONV;
12845
12846             CONV:
12847                 uns  = false;
12848                 ovfl = false;
12849                 goto _CONV;
12850
12851             _CONV:
12852                 // just check that we have a number on the stack
12853                 if (tiVerificationNeeded)
12854                 {
12855                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12856                     Verify(tiVal.IsNumberType(), "bad arg");
12857
12858 #ifdef _TARGET_64BIT_
12859                     bool isNative = false;
12860
12861                     switch (opcode)
12862                     {
12863                         case CEE_CONV_OVF_I:
12864                         case CEE_CONV_OVF_I_UN:
12865                         case CEE_CONV_I:
12866                         case CEE_CONV_OVF_U:
12867                         case CEE_CONV_OVF_U_UN:
12868                         case CEE_CONV_U:
12869                             isNative = true;
12870                         default:
12871                             // leave 'isNative' = false;
12872                             break;
12873                     }
12874                     if (isNative)
12875                     {
12876                         tiRetVal = typeInfo::nativeInt();
12877                     }
12878                     else
12879 #endif // _TARGET_64BIT_
12880                     {
12881                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12882                     }
12883                 }
12884
12885                 // only converts from FLOAT or DOUBLE to an integer type
12886                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12887
12888                 if (varTypeIsFloating(lclTyp))
12889                 {
12890                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12891 #ifdef _TARGET_64BIT_
12892                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12893                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12894                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12895                                // and generate SSE2 code instead of going through helper calls.
12896                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12897 #endif
12898                         ;
12899                 }
12900                 else
12901                 {
12902                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12903                 }
12904
12905                 // At this point uns, ovf, callNode all set
12906
12907                 op1 = impPopStack().val;
12908                 impBashVarAddrsToI(op1);
12909
12910                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12911                 {
12912                     op2 = op1->gtOp.gtOp2;
12913
12914                     if (op2->gtOper == GT_CNS_INT)
12915                     {
12916                         ssize_t ival = op2->gtIntCon.gtIconVal;
12917                         ssize_t mask, umask;
12918
12919                         switch (lclTyp)
12920                         {
12921                             case TYP_BYTE:
12922                             case TYP_UBYTE:
12923                                 mask  = 0x00FF;
12924                                 umask = 0x007F;
12925                                 break;
12926                             case TYP_USHORT:
12927                             case TYP_SHORT:
12928                                 mask  = 0xFFFF;
12929                                 umask = 0x7FFF;
12930                                 break;
12931
12932                             default:
12933                                 assert(!"unexpected type");
12934                                 return;
12935                         }
12936
12937                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12938                         {
12939                             /* Toss the cast, it's a waste of time */
12940
12941                             impPushOnStack(op1, tiRetVal);
12942                             break;
12943                         }
12944                         else if (ival == mask)
12945                         {
12946                             /* Toss the masking, it's a waste of time, since
12947                                we sign-extend from the small value anyways */
12948
12949                             op1 = op1->gtOp.gtOp1;
12950                         }
12951                     }
12952                 }
12953
12954                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12955                     since the result of a cast to one of the 'small' integer
12956                     types is an integer.
12957                  */
12958
12959                 type = genActualType(lclTyp);
12960
12961                 // If this is a no-op cast, just use op1.
12962                 if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
12963                 {
12964                     // Nothing needs to change
12965                 }
12966                 // Work is evidently required, add cast node
12967                 else
12968                 {
12969 #if SMALL_TREE_NODES
12970                     if (callNode)
12971                     {
12972                         op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12973                     }
12974                     else
12975 #endif // SMALL_TREE_NODES
12976                     {
12977                         op1 = gtNewCastNode(type, op1, uns, lclTyp);
12978                     }
12979
12980                     if (ovfl)
12981                     {
12982                         op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12983                     }
12984                 }
12985
12986                 impPushOnStack(op1, tiRetVal);
12987                 break;
12988
12989             case CEE_NEG:
12990                 if (tiVerificationNeeded)
12991                 {
12992                     tiRetVal = impStackTop().seTypeInfo;
12993                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12994                 }
12995
12996                 op1 = impPopStack().val;
12997                 impBashVarAddrsToI(op1, nullptr);
12998                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12999                 break;
13000
13001             case CEE_POP:
13002             {
13003                 /* Pull the top value from the stack */
13004
13005                 StackEntry se = impPopStack();
13006                 clsHnd        = se.seTypeInfo.GetClassHandle();
13007                 op1           = se.val;
13008
13009                 /* Get hold of the type of the value being duplicated */
13010
13011                 lclTyp = genActualType(op1->gtType);
13012
13013                 /* Does the value have any side effects? */
13014
13015                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
13016                 {
13017                     // Since we are throwing away the value, just normalize
13018                     // it to its address.  This is more efficient.
13019
13020                     if (varTypeIsStruct(op1))
13021                     {
13022                         JITDUMP("\n ... CEE_POP struct ...\n");
13023                         DISPTREE(op1);
13024 #ifdef UNIX_AMD64_ABI
13025                         // Non-calls, such as obj or ret_expr, have to go through this.
13026                         // Calls with large struct return value have to go through this.
13027                         // Helper calls with small struct return value also have to go
13028                         // through this since they do not follow Unix calling convention.
13029                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
13030                             op1->AsCall()->gtCallType == CT_HELPER)
13031 #endif // UNIX_AMD64_ABI
13032                         {
13033                             // If the value being produced comes from loading
13034                             // via an underlying address, just null check the address.
13035                             if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
13036                             {
13037                                 op1->ChangeOper(GT_NULLCHECK);
13038                                 op1->gtType = TYP_BYTE;
13039                             }
13040                             else
13041                             {
13042                                 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
13043                             }
13044
13045                             JITDUMP("\n ... optimized to ...\n");
13046                             DISPTREE(op1);
13047                         }
13048                     }
13049
13050                     // If op1 is non-overflow cast, throw it away since it is useless.
13051                     // Another reason for throwing away the useless cast is in the context of
13052                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
13053                     // The cast gets added as part of importing GT_CALL, which gets in the way
13054                     // of fgMorphCall() on the forms of tail call nodes that we assert.
13055                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
13056                     {
13057                         op1 = op1->gtOp.gtOp1;
13058                     }
13059
13060                     // If 'op1' is an expression, create an assignment node.
13061                     // Helps analyses (like CSE) to work fine.
13062
13063                     if (op1->gtOper != GT_CALL)
13064                     {
13065                         op1 = gtUnusedValNode(op1);
13066                     }
13067
13068                     /* Append the value to the tree list */
13069                     goto SPILL_APPEND;
13070                 }
13071
13072                 /* No side effects - just throw the <BEEP> thing away */
13073             }
13074             break;
13075
13076             case CEE_DUP:
13077             {
13078                 if (tiVerificationNeeded)
13079                 {
13080                     // Dup could start the begining of delegate creation sequence, remember that
13081                     delegateCreateStart = codeAddr - 1;
13082                     impStackTop(0);
13083                 }
13084
13085                 // If the expression to dup is simple, just clone it.
13086                 // Otherwise spill it to a temp, and reload the temp
13087                 // twice.
13088                 StackEntry se   = impPopStack();
13089                 GenTree*   tree = se.val;
13090                 tiRetVal        = se.seTypeInfo;
13091                 op1             = tree;
13092
13093                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
13094                 {
13095                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
13096                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
13097                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
13098                     op1            = gtNewLclvNode(tmpNum, type);
13099
13100                     // Propagate type info to the temp from the stack and the original tree
13101                     if (type == TYP_REF)
13102                     {
13103                         assert(lvaTable[tmpNum].lvSingleDef == 0);
13104                         lvaTable[tmpNum].lvSingleDef = 1;
13105                         JITDUMP("Marked V%02u as a single def local\n", tmpNum);
13106                         lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
13107                     }
13108                 }
13109
13110                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
13111                                    nullptr DEBUGARG("DUP instruction"));
13112
13113                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
13114                 impPushOnStack(op1, tiRetVal);
13115                 impPushOnStack(op2, tiRetVal);
13116             }
13117             break;
13118
13119             case CEE_STIND_I1:
13120                 lclTyp = TYP_BYTE;
13121                 goto STIND;
13122             case CEE_STIND_I2:
13123                 lclTyp = TYP_SHORT;
13124                 goto STIND;
13125             case CEE_STIND_I4:
13126                 lclTyp = TYP_INT;
13127                 goto STIND;
13128             case CEE_STIND_I8:
13129                 lclTyp = TYP_LONG;
13130                 goto STIND;
13131             case CEE_STIND_I:
13132                 lclTyp = TYP_I_IMPL;
13133                 goto STIND;
13134             case CEE_STIND_REF:
13135                 lclTyp = TYP_REF;
13136                 goto STIND;
13137             case CEE_STIND_R4:
13138                 lclTyp = TYP_FLOAT;
13139                 goto STIND;
13140             case CEE_STIND_R8:
13141                 lclTyp = TYP_DOUBLE;
13142                 goto STIND;
13143             STIND:
13144
13145                 if (tiVerificationNeeded)
13146                 {
13147                     typeInfo instrType(lclTyp);
13148 #ifdef _TARGET_64BIT_
13149                     if (opcode == CEE_STIND_I)
13150                     {
13151                         instrType = typeInfo::nativeInt();
13152                     }
13153 #endif // _TARGET_64BIT_
13154                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
13155                 }
13156                 else
13157                 {
13158                     compUnsafeCastUsed = true; // Have to go conservative
13159                 }
13160
13161             STIND_POST_VERIFY:
13162
13163                 op2 = impPopStack().val; // value to store
13164                 op1 = impPopStack().val; // address to store to
13165
13166                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
13167                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13168
13169                 impBashVarAddrsToI(op1, op2);
13170
13171                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
13172
13173 #ifdef _TARGET_64BIT_
13174                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13175                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13176                 {
13177                     op2->gtType = TYP_I_IMPL;
13178                 }
13179                 else
13180                 {
13181                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13182                     //
13183                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13184                     {
13185                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13186                         op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
13187                     }
13188                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13189                     //
13190                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13191                     {
13192                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13193                         op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
13194                     }
13195                 }
13196 #endif // _TARGET_64BIT_
13197
13198                 if (opcode == CEE_STIND_REF)
13199                 {
13200                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
13201                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
13202                     lclTyp = genActualType(op2->TypeGet());
13203                 }
13204
13205 // Check target type.
13206 #ifdef DEBUG
13207                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
13208                 {
13209                     if (op2->gtType == TYP_BYREF)
13210                     {
13211                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
13212                     }
13213                     else if (lclTyp == TYP_BYREF)
13214                     {
13215                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
13216                     }
13217                 }
13218                 else
13219                 {
13220                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
13221                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
13222                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
13223                 }
13224 #endif
13225
13226                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13227
13228                 // stind could point anywhere, example a boxed class static int
13229                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13230
13231                 if (prefixFlags & PREFIX_VOLATILE)
13232                 {
13233                     assert(op1->OperGet() == GT_IND);
13234                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13235                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13236                     op1->gtFlags |= GTF_IND_VOLATILE;
13237                 }
13238
13239                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13240                 {
13241                     assert(op1->OperGet() == GT_IND);
13242                     op1->gtFlags |= GTF_IND_UNALIGNED;
13243                 }
13244
13245                 op1 = gtNewAssignNode(op1, op2);
13246                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
13247
13248                 // Spill side-effects AND global-data-accesses
13249                 if (verCurrentState.esStackDepth > 0)
13250                 {
13251                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
13252                 }
13253
13254                 goto APPEND;
13255
13256             case CEE_LDIND_I1:
13257                 lclTyp = TYP_BYTE;
13258                 goto LDIND;
13259             case CEE_LDIND_I2:
13260                 lclTyp = TYP_SHORT;
13261                 goto LDIND;
13262             case CEE_LDIND_U4:
13263             case CEE_LDIND_I4:
13264                 lclTyp = TYP_INT;
13265                 goto LDIND;
13266             case CEE_LDIND_I8:
13267                 lclTyp = TYP_LONG;
13268                 goto LDIND;
13269             case CEE_LDIND_REF:
13270                 lclTyp = TYP_REF;
13271                 goto LDIND;
13272             case CEE_LDIND_I:
13273                 lclTyp = TYP_I_IMPL;
13274                 goto LDIND;
13275             case CEE_LDIND_R4:
13276                 lclTyp = TYP_FLOAT;
13277                 goto LDIND;
13278             case CEE_LDIND_R8:
13279                 lclTyp = TYP_DOUBLE;
13280                 goto LDIND;
13281             case CEE_LDIND_U1:
13282                 lclTyp = TYP_UBYTE;
13283                 goto LDIND;
13284             case CEE_LDIND_U2:
13285                 lclTyp = TYP_USHORT;
13286                 goto LDIND;
13287             LDIND:
13288
13289                 if (tiVerificationNeeded)
13290                 {
13291                     typeInfo lclTiType(lclTyp);
13292 #ifdef _TARGET_64BIT_
13293                     if (opcode == CEE_LDIND_I)
13294                     {
13295                         lclTiType = typeInfo::nativeInt();
13296                     }
13297 #endif // _TARGET_64BIT_
13298                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
13299                     tiRetVal.NormaliseForStack();
13300                 }
13301                 else
13302                 {
13303                     compUnsafeCastUsed = true; // Have to go conservative
13304                 }
13305
13306             LDIND_POST_VERIFY:
13307
13308                 op1 = impPopStack().val; // address to load from
13309                 impBashVarAddrsToI(op1);
13310
13311 #ifdef _TARGET_64BIT_
13312                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13313                 //
13314                 if (genActualType(op1->gtType) == TYP_INT)
13315                 {
13316                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13317                     op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
13318                 }
13319 #endif
13320
13321                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13322
13323                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13324
13325                 // ldind could point anywhere, example a boxed class static int
13326                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
13327
13328                 if (prefixFlags & PREFIX_VOLATILE)
13329                 {
13330                     assert(op1->OperGet() == GT_IND);
13331                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13332                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13333                     op1->gtFlags |= GTF_IND_VOLATILE;
13334                 }
13335
13336                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13337                 {
13338                     assert(op1->OperGet() == GT_IND);
13339                     op1->gtFlags |= GTF_IND_UNALIGNED;
13340                 }
13341
13342                 impPushOnStack(op1, tiRetVal);
13343
13344                 break;
13345
13346             case CEE_UNALIGNED:
13347
13348                 assert(sz == 1);
13349                 val = getU1LittleEndian(codeAddr);
13350                 ++codeAddr;
13351                 JITDUMP(" %u", val);
13352                 if ((val != 1) && (val != 2) && (val != 4))
13353                 {
13354                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
13355                 }
13356
13357                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
13358                 prefixFlags |= PREFIX_UNALIGNED;
13359
13360                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
13361
13362             PREFIX:
13363                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
13364                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
13365                 codeAddr += sizeof(__int8);
13366                 goto DECODE_OPCODE;
13367
13368             case CEE_VOLATILE:
13369
13370                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
13371                 prefixFlags |= PREFIX_VOLATILE;
13372
13373                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
13374
13375                 assert(sz == 0);
13376                 goto PREFIX;
13377
13378             case CEE_LDFTN:
13379             {
13380                 // Need to do a lookup here so that we perform an access check
13381                 // and do a NOWAY if protections are violated
13382                 _impResolveToken(CORINFO_TOKENKIND_Method);
13383
13384                 JITDUMP(" %08X", resolvedToken.token);
13385
13386                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13387                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
13388                               &callInfo);
13389
13390                 // This check really only applies to intrinsic Array.Address methods
13391                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13392                 {
13393                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
13394                 }
13395
13396                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
13397                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13398
13399                 if (tiVerificationNeeded)
13400                 {
13401                     // LDFTN could start the begining of delegate creation sequence, remember that
13402                     delegateCreateStart = codeAddr - 2;
13403
13404                     // check any constraints on the callee's class and type parameters
13405                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13406                                    "method has unsatisfied class constraints");
13407                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13408                                                                                 resolvedToken.hMethod),
13409                                    "method has unsatisfied method constraints");
13410
13411                     mflags = callInfo.verMethodFlags;
13412                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
13413                 }
13414
13415             DO_LDFTN:
13416                 op1 = impMethodPointer(&resolvedToken, &callInfo);
13417
13418                 if (compDonotInline())
13419                 {
13420                     return;
13421                 }
13422
13423                 // Call info may have more precise information about the function than
13424                 // the resolved token.
13425                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13426                 assert(callInfo.hMethod != nullptr);
13427                 heapToken->hMethod = callInfo.hMethod;
13428                 impPushOnStack(op1, typeInfo(heapToken));
13429
13430                 break;
13431             }
13432
13433             case CEE_LDVIRTFTN:
13434             {
13435                 /* Get the method token */
13436
13437                 _impResolveToken(CORINFO_TOKENKIND_Method);
13438
13439                 JITDUMP(" %08X", resolvedToken.token);
13440
13441                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
13442                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
13443                                                     CORINFO_CALLINFO_CALLVIRT)),
13444                               &callInfo);
13445
13446                 // This check really only applies to intrinsic Array.Address methods
13447                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13448                 {
13449                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
13450                 }
13451
13452                 mflags = callInfo.methodFlags;
13453
13454                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13455
13456                 if (compIsForInlining())
13457                 {
13458                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13459                     {
13460                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
13461                         return;
13462                     }
13463                 }
13464
13465                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
13466
13467                 if (tiVerificationNeeded)
13468                 {
13469
13470                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
13471                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
13472
13473                     // JIT32 verifier rejects verifiable ldvirtftn pattern
13474                     typeInfo declType =
13475                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
13476
13477                     typeInfo arg = impStackTop().seTypeInfo;
13478                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
13479                            "bad ldvirtftn");
13480
13481                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
13482                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
13483                     {
13484                         instanceClassHnd = arg.GetClassHandleForObjRef();
13485                     }
13486
13487                     // check any constraints on the method's class and type parameters
13488                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13489                                    "method has unsatisfied class constraints");
13490                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13491                                                                                 resolvedToken.hMethod),
13492                                    "method has unsatisfied method constraints");
13493
13494                     if (mflags & CORINFO_FLG_PROTECTED)
13495                     {
13496                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
13497                                "Accessing protected method through wrong type.");
13498                     }
13499                 }
13500
13501                 /* Get the object-ref */
13502                 op1 = impPopStack().val;
13503                 assertImp(op1->gtType == TYP_REF);
13504
13505                 if (opts.IsReadyToRun())
13506                 {
13507                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13508                     {
13509                         if (op1->gtFlags & GTF_SIDE_EFFECT)
13510                         {
13511                             op1 = gtUnusedValNode(op1);
13512                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13513                         }
13514                         goto DO_LDFTN;
13515                     }
13516                 }
13517                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13518                 {
13519                     if (op1->gtFlags & GTF_SIDE_EFFECT)
13520                     {
13521                         op1 = gtUnusedValNode(op1);
13522                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13523                     }
13524                     goto DO_LDFTN;
13525                 }
13526
13527                 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13528                 if (compDonotInline())
13529                 {
13530                     return;
13531                 }
13532
13533                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13534
13535                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13536                 assert(callInfo.hMethod != nullptr);
13537
13538                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13539                 heapToken->hMethod   = callInfo.hMethod;
13540                 impPushOnStack(fptr, typeInfo(heapToken));
13541
13542                 break;
13543             }
13544
13545             case CEE_CONSTRAINED:
13546
13547                 assertImp(sz == sizeof(unsigned));
13548                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13549                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13550                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13551
13552                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13553                 prefixFlags |= PREFIX_CONSTRAINED;
13554
13555                 {
13556                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13557                     if (actualOpcode != CEE_CALLVIRT)
13558                     {
13559                         BADCODE("constrained. has to be followed by callvirt");
13560                     }
13561                 }
13562
13563                 goto PREFIX;
13564
13565             case CEE_READONLY:
13566                 JITDUMP(" readonly.");
13567
13568                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13569                 prefixFlags |= PREFIX_READONLY;
13570
13571                 {
13572                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13573                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13574                     {
13575                         BADCODE("readonly. has to be followed by ldelema or call");
13576                     }
13577                 }
13578
13579                 assert(sz == 0);
13580                 goto PREFIX;
13581
13582             case CEE_TAILCALL:
13583                 JITDUMP(" tail.");
13584
13585                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13586                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13587
13588                 {
13589                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13590                     if (!impOpcodeIsCallOpcode(actualOpcode))
13591                     {
13592                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
13593                     }
13594                 }
13595                 assert(sz == 0);
13596                 goto PREFIX;
13597
13598             case CEE_NEWOBJ:
13599
13600                 /* Since we will implicitly insert newObjThisPtr at the start of the
13601                    argument list, spill any GTF_ORDER_SIDEEFF */
13602                 impSpillSpecialSideEff();
13603
13604                 /* NEWOBJ does not respond to TAIL */
13605                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13606
13607                 /* NEWOBJ does not respond to CONSTRAINED */
13608                 prefixFlags &= ~PREFIX_CONSTRAINED;
13609
13610                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13611
13612                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13613                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13614                               &callInfo);
13615
13616                 if (compIsForInlining())
13617                 {
13618                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13619                     {
13620                         // Check to see if this call violates the boundary.
13621                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13622                         return;
13623                     }
13624                 }
13625
13626                 mflags = callInfo.methodFlags;
13627
13628                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13629                 {
13630                     BADCODE("newobj on static or abstract method");
13631                 }
13632
13633                 // Insert the security callout before any actual code is generated
13634                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13635
13636                 // There are three different cases for new
13637                 // Object size is variable (depends on arguments)
13638                 //      1) Object is an array (arrays treated specially by the EE)
13639                 //      2) Object is some other variable sized object (e.g. String)
13640                 //      3) Class Size can be determined beforehand (normal case)
13641                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13642                 // in the second case we call the constructor with a '0' this pointer
13643                 // In the third case we alloc the memory, then call the constuctor
13644
13645                 clsFlags = callInfo.classFlags;
13646                 if (clsFlags & CORINFO_FLG_ARRAY)
13647                 {
13648                     if (tiVerificationNeeded)
13649                     {
13650                         CORINFO_CLASS_HANDLE elemTypeHnd;
13651                         INDEBUG(CorInfoType corType =)
13652                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13653                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13654                         Verify(elemTypeHnd == nullptr ||
13655                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13656                                "newarr of byref-like objects");
13657                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13658                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13659                                       &callInfo DEBUGARG(info.compFullName));
13660                     }
13661                     // Arrays need to call the NEWOBJ helper.
13662                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13663
13664                     impImportNewObjArray(&resolvedToken, &callInfo);
13665                     if (compDonotInline())
13666                     {
13667                         return;
13668                     }
13669
13670                     callTyp = TYP_REF;
13671                     break;
13672                 }
13673                 // At present this can only be String
13674                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13675                 {
13676                     if (IsTargetAbi(CORINFO_CORERT_ABI))
13677                     {
13678                         // The dummy argument does not exist in CoreRT
13679                         newObjThisPtr = nullptr;
13680                     }
13681                     else
13682                     {
13683                         // This is the case for variable-sized objects that are not
13684                         // arrays.  In this case, call the constructor with a null 'this'
13685                         // pointer
13686                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
13687                     }
13688
13689                     /* Remember that this basic block contains 'new' of an object */
13690                     block->bbFlags |= BBF_HAS_NEWOBJ;
13691                     optMethodFlags |= OMF_HAS_NEWOBJ;
13692                 }
13693                 else
13694                 {
13695                     // This is the normal case where the size of the object is
13696                     // fixed.  Allocate the memory and call the constructor.
13697
13698                     // Note: We cannot add a peep to avoid use of temp here
13699                     // becase we don't have enough interference info to detect when
13700                     // sources and destination interfere, example: s = new S(ref);
13701
13702                     // TODO: We find the correct place to introduce a general
13703                     // reverse copy prop for struct return values from newobj or
13704                     // any function returning structs.
13705
13706                     /* get a temporary for the new object */
13707                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13708                     if (compDonotInline())
13709                     {
13710                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13711                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13712                         return;
13713                     }
13714
13715                     // In the value class case we only need clsHnd for size calcs.
13716                     //
13717                     // The lookup of the code pointer will be handled by CALL in this case
13718                     if (clsFlags & CORINFO_FLG_VALUECLASS)
13719                     {
13720                         if (compIsForInlining())
13721                         {
13722                             // If value class has GC fields, inform the inliner. It may choose to
13723                             // bail out on the inline.
13724                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13725                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13726                             {
13727                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13728                                 if (compInlineResult->IsFailure())
13729                                 {
13730                                     return;
13731                                 }
13732
13733                                 // Do further notification in the case where the call site is rare;
13734                                 // some policies do not track the relative hotness of call sites for
13735                                 // "always" inline cases.
13736                                 if (impInlineInfo->iciBlock->isRunRarely())
13737                                 {
13738                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13739                                     if (compInlineResult->IsFailure())
13740                                     {
13741                                         return;
13742                                     }
13743                                 }
13744                             }
13745                         }
13746
13747                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13748                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
13749
13750                         if (impIsPrimitive(jitTyp))
13751                         {
13752                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13753                         }
13754                         else
13755                         {
13756                             // The local variable itself is the allocated space.
13757                             // Here we need unsafe value cls check, since the address of struct is taken for further use
13758                             // and potentially exploitable.
13759                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13760                         }
13761                         if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13762                         {
13763                             // Append a tree to zero-out the temp
13764                             newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13765
13766                             newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
13767                                                            gtNewIconNode(0), // Value
13768                                                            size,             // Size
13769                                                            false,            // isVolatile
13770                                                            false);           // not copyBlock
13771                             impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13772                         }
13773
13774                         // Obtain the address of the temp
13775                         newObjThisPtr =
13776                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13777                     }
13778                     else
13779                     {
13780 #ifdef FEATURE_READYTORUN_COMPILER
13781                         if (opts.IsReadyToRun())
13782                         {
13783                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13784                             usingReadyToRunHelper = (op1 != nullptr);
13785                         }
13786
13787                         if (!usingReadyToRunHelper)
13788 #endif
13789                         {
13790                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13791                             if (op1 == nullptr)
13792                             { // compDonotInline()
13793                                 return;
13794                             }
13795
13796                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13797                             // and the newfast call with a single call to a dynamic R2R cell that will:
13798                             //      1) Load the context
13799                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
13800                             //      stub
13801                             //      3) Allocate and return the new object
13802                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13803
13804                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13805                                                     resolvedToken.hClass, TYP_REF, op1);
13806                         }
13807
13808                         // Remember that this basic block contains 'new' of an object
13809                         block->bbFlags |= BBF_HAS_NEWOBJ;
13810                         optMethodFlags |= OMF_HAS_NEWOBJ;
13811
13812                         // Append the assignment to the temp/local. Dont need to spill
13813                         // at all as we are just calling an EE-Jit helper which can only
13814                         // cause an (async) OutOfMemoryException.
13815
13816                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13817                         // to a temp. Note that the pattern "temp = allocObj" is required
13818                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13819                         // without exhaustive walk over all expressions.
13820
13821                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13822
13823                         assert(lvaTable[lclNum].lvSingleDef == 0);
13824                         lvaTable[lclNum].lvSingleDef = 1;
13825                         JITDUMP("Marked V%02u as a single def local\n", lclNum);
13826                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13827
13828                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13829                     }
13830                 }
13831                 goto CALL;
13832
13833             case CEE_CALLI:
13834
13835                 /* CALLI does not respond to CONSTRAINED */
13836                 prefixFlags &= ~PREFIX_CONSTRAINED;
13837
13838                 if (compIsForInlining())
13839                 {
13840                     // CALLI doesn't have a method handle, so assume the worst.
13841                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13842                     {
13843                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13844                         return;
13845                     }
13846                 }
13847
13848             // fall through
13849
13850             case CEE_CALLVIRT:
13851             case CEE_CALL:
13852
13853                 // We can't call getCallInfo on the token from a CALLI, but we need it in
13854                 // many other places.  We unfortunately embed that knowledge here.
13855                 if (opcode != CEE_CALLI)
13856                 {
13857                     _impResolveToken(CORINFO_TOKENKIND_Method);
13858
13859                     eeGetCallInfo(&resolvedToken,
13860                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13861                                   // this is how impImportCall invokes getCallInfo
13862                                   addVerifyFlag(
13863                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13864                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13865                                                                        : CORINFO_CALLINFO_NONE)),
13866                                   &callInfo);
13867                 }
13868                 else
13869                 {
13870                     // Suppress uninitialized use warning.
13871                     memset(&resolvedToken, 0, sizeof(resolvedToken));
13872                     memset(&callInfo, 0, sizeof(callInfo));
13873
13874                     resolvedToken.token        = getU4LittleEndian(codeAddr);
13875                     resolvedToken.tokenContext = impTokenLookupContextHandle;
13876                     resolvedToken.tokenScope   = info.compScopeHnd;
13877                 }
13878
13879             CALL: // memberRef should be set.
13880                 // newObjThisPtr should be set for CEE_NEWOBJ
13881
13882                 JITDUMP(" %08X", resolvedToken.token);
13883                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13884
13885                 bool newBBcreatedForTailcallStress;
13886
13887                 newBBcreatedForTailcallStress = false;
13888
13889                 if (compIsForInlining())
13890                 {
13891                     if (compDonotInline())
13892                     {
13893                         return;
13894                     }
13895                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13896                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13897                 }
13898                 else
13899                 {
13900                     if (compTailCallStress())
13901                     {
13902                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13903                         // Tail call stress only recognizes call+ret patterns and forces them to be
13904                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13905                         // doesn't import 'ret' opcode following the call into the basic block containing
13906                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13907                         // is already checking that there is an opcode following call and hence it is
13908                         // safe here to read next opcode without bounds check.
13909                         newBBcreatedForTailcallStress =
13910                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13911                                                              // make it jump to RET.
13912                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13913
13914                         bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
13915                         if (newBBcreatedForTailcallStress && !hasTailPrefix && // User hasn't set "tail." prefix yet.
13916                             verCheckTailCallConstraint(opcode, &resolvedToken,
13917                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13918                                                        true) // Is it legal to do tailcall?
13919                             )
13920                         {
13921                             CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
13922                             bool                  isVirtual         = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
13923                                              (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
13924                             CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
13925                             if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
13926                                                               hasTailPrefix)) // Is it legal to do tailcall?
13927                             {
13928                                 // Stress the tailcall.
13929                                 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13930                                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13931                             }
13932                         }
13933                     }
13934                 }
13935
13936                 // This is split up to avoid goto flow warnings.
13937                 bool isRecursive;
13938                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13939
13940                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13941                 // hence will not be considered for implicit tail calling.
13942                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13943                 {
13944                     if (compIsForInlining())
13945                     {
13946 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13947                         // Are we inlining at an implicit tail call site? If so the we can flag
13948                         // implicit tail call sites in the inline body. These call sites
13949                         // often end up in non BBJ_RETURN blocks, so only flag them when
13950                         // we're able to handle shared returns.
13951                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13952                         {
13953                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13954                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13955                         }
13956 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13957                     }
13958                     else
13959                     {
13960                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13961                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13962                     }
13963                 }
13964
13965                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13966                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13967                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13968
13969                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13970                 {
13971                     // All calls and delegates need a security callout.
13972                     // For delegates, this is the call to the delegate constructor, not the access check on the
13973                     // LD(virt)FTN.
13974                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13975                 }
13976
13977                 if (tiVerificationNeeded)
13978                 {
13979                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13980                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13981                                   &callInfo DEBUGARG(info.compFullName));
13982                 }
13983
13984                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13985                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13986                 if (compDonotInline())
13987                 {
13988                     // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13989                     assert((callTyp == TYP_UNDEF) ||
13990                            (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13991                     return;
13992                 }
13993
13994                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13995                                                                        // have created a new BB after the "call"
13996                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13997                 {
13998                     assert(!compIsForInlining());
13999                     goto RET;
14000                 }
14001
14002                 break;
14003
14004             case CEE_LDFLD:
14005             case CEE_LDSFLD:
14006             case CEE_LDFLDA:
14007             case CEE_LDSFLDA:
14008             {
14009
14010                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
14011                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
14012
14013                 /* Get the CP_Fieldref index */
14014                 assertImp(sz == sizeof(unsigned));
14015
14016                 _impResolveToken(CORINFO_TOKENKIND_Field);
14017
14018                 JITDUMP(" %08X", resolvedToken.token);
14019
14020                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
14021
14022                 GenTree*             obj     = nullptr;
14023                 typeInfo*            tiObj   = nullptr;
14024                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
14025
14026                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
14027                 {
14028                     tiObj         = &impStackTop().seTypeInfo;
14029                     StackEntry se = impPopStack();
14030                     objType       = se.seTypeInfo.GetClassHandle();
14031                     obj           = se.val;
14032
14033                     if (impIsThis(obj))
14034                     {
14035                         aflags |= CORINFO_ACCESS_THIS;
14036
14037                         // An optimization for Contextful classes:
14038                         // we unwrap the proxy when we have a 'this reference'
14039
14040                         if (info.compUnwrapContextful)
14041                         {
14042                             aflags |= CORINFO_ACCESS_UNWRAP;
14043                         }
14044                     }
14045                 }
14046
14047                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14048
14049                 // Figure out the type of the member.  We always call canAccessField, so you always need this
14050                 // handle
14051                 CorInfoType ciType = fieldInfo.fieldType;
14052                 clsHnd             = fieldInfo.structType;
14053
14054                 lclTyp = JITtype2varType(ciType);
14055
14056 #ifdef _TARGET_AMD64
14057                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
14058 #endif // _TARGET_AMD64
14059
14060                 if (compIsForInlining())
14061                 {
14062                     switch (fieldInfo.fieldAccessor)
14063                     {
14064                         case CORINFO_FIELD_INSTANCE_HELPER:
14065                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14066                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
14067                         case CORINFO_FIELD_STATIC_TLS:
14068
14069                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
14070                             return;
14071
14072                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14073                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14074                             /* We may be able to inline the field accessors in specific instantiations of generic
14075                              * methods */
14076                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
14077                             return;
14078
14079                         default:
14080                             break;
14081                     }
14082
14083                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
14084                         clsHnd)
14085                     {
14086                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
14087                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
14088                         {
14089                             // Loading a static valuetype field usually will cause a JitHelper to be called
14090                             // for the static base. This will bloat the code.
14091                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
14092
14093                             if (compInlineResult->IsFailure())
14094                             {
14095                                 return;
14096                             }
14097                         }
14098                     }
14099                 }
14100
14101                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
14102                 if (isLoadAddress)
14103                 {
14104                     tiRetVal.MakeByRef();
14105                 }
14106                 else
14107                 {
14108                     tiRetVal.NormaliseForStack();
14109                 }
14110
14111                 // Perform this check always to ensure that we get field access exceptions even with
14112                 // SkipVerification.
14113                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14114
14115                 if (tiVerificationNeeded)
14116                 {
14117                     // You can also pass the unboxed struct to  LDFLD
14118                     BOOL bAllowPlainValueTypeAsThis = FALSE;
14119                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
14120                     {
14121                         bAllowPlainValueTypeAsThis = TRUE;
14122                     }
14123
14124                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
14125
14126                     // If we're doing this on a heap object or from a 'safe' byref
14127                     // then the result is a safe byref too
14128                     if (isLoadAddress) // load address
14129                     {
14130                         if (fieldInfo.fieldFlags &
14131                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
14132                         {
14133                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
14134                             {
14135                                 tiRetVal.SetIsPermanentHomeByRef();
14136                             }
14137                         }
14138                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
14139                         {
14140                             // ldflda of byref is safe if done on a gc object or on  a
14141                             // safe byref
14142                             tiRetVal.SetIsPermanentHomeByRef();
14143                         }
14144                     }
14145                 }
14146                 else
14147                 {
14148                     // tiVerificationNeeded is false.
14149                     // Raise InvalidProgramException if static load accesses non-static field
14150                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14151                     {
14152                         BADCODE("static access on an instance field");
14153                     }
14154                 }
14155
14156                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
14157                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14158                 {
14159                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14160                     {
14161                         obj = gtUnusedValNode(obj);
14162                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14163                     }
14164                     obj = nullptr;
14165                 }
14166
14167                 /* Preserve 'small' int types */
14168                 if (!varTypeIsSmall(lclTyp))
14169                 {
14170                     lclTyp = genActualType(lclTyp);
14171                 }
14172
14173                 bool usesHelper = false;
14174
14175                 switch (fieldInfo.fieldAccessor)
14176                 {
14177                     case CORINFO_FIELD_INSTANCE:
14178 #ifdef FEATURE_READYTORUN_COMPILER
14179                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14180 #endif
14181                     {
14182                         obj = impCheckForNullPointer(obj);
14183
14184                         // If the object is a struct, what we really want is
14185                         // for the field to operate on the address of the struct.
14186                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
14187                         {
14188                             assert(opcode == CEE_LDFLD && objType != nullptr);
14189
14190                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
14191                         }
14192
14193                         /* Create the data member node */
14194                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14195
14196 #ifdef FEATURE_READYTORUN_COMPILER
14197                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14198                         {
14199                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14200                         }
14201 #endif
14202
14203                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14204
14205                         if (fgAddrCouldBeNull(obj))
14206                         {
14207                             op1->gtFlags |= GTF_EXCEPT;
14208                         }
14209
14210                         // If gtFldObj is a BYREF then our target is a value class and
14211                         // it could point anywhere, example a boxed class static int
14212                         if (obj->gtType == TYP_BYREF)
14213                         {
14214                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14215                         }
14216
14217                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14218                         if (StructHasOverlappingFields(typeFlags))
14219                         {
14220                             op1->gtField.gtFldMayOverlap = true;
14221                         }
14222
14223                         // wrap it in a address of operator if necessary
14224                         if (isLoadAddress)
14225                         {
14226                             op1 = gtNewOperNode(GT_ADDR,
14227                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
14228                         }
14229                         else
14230                         {
14231                             if (compIsForInlining() &&
14232                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
14233                                                                                    impInlineInfo->inlArgInfo))
14234                             {
14235                                 impInlineInfo->thisDereferencedFirst = true;
14236                             }
14237                         }
14238                     }
14239                     break;
14240
14241                     case CORINFO_FIELD_STATIC_TLS:
14242 #ifdef _TARGET_X86_
14243                         // Legacy TLS access is implemented as intrinsic on x86 only
14244
14245                         /* Create the data member node */
14246                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14247                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14248
14249                         if (isLoadAddress)
14250                         {
14251                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
14252                         }
14253                         break;
14254 #else
14255                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14256
14257                         __fallthrough;
14258 #endif
14259
14260                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14261                     case CORINFO_FIELD_INSTANCE_HELPER:
14262                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14263                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14264                                                clsHnd, nullptr);
14265                         usesHelper = true;
14266                         break;
14267
14268                     case CORINFO_FIELD_STATIC_ADDRESS:
14269                         // Replace static read-only fields with constant if possible
14270                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
14271                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
14272                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
14273                         {
14274                             CorInfoInitClassResult initClassResult =
14275                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
14276                                                             impTokenLookupContextHandle);
14277
14278                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
14279                             {
14280                                 void** pFldAddr = nullptr;
14281                                 void*  fldAddr =
14282                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
14283
14284                                 // We should always be able to access this static's address directly
14285                                 assert(pFldAddr == nullptr);
14286
14287                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
14288                                 goto FIELD_DONE;
14289                             }
14290                         }
14291
14292                         __fallthrough;
14293
14294                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14295                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14296                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14297                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14298                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14299                                                          lclTyp);
14300                         break;
14301
14302                     case CORINFO_FIELD_INTRINSIC_ZERO:
14303                     {
14304                         assert(aflags & CORINFO_ACCESS_GET);
14305                         op1 = gtNewIconNode(0, lclTyp);
14306                         goto FIELD_DONE;
14307                     }
14308                     break;
14309
14310                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
14311                     {
14312                         assert(aflags & CORINFO_ACCESS_GET);
14313
14314                         LPVOID         pValue;
14315                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
14316                         op1                = gtNewStringLiteralNode(iat, pValue);
14317                         goto FIELD_DONE;
14318                     }
14319                     break;
14320
14321                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
14322                     {
14323                         assert(aflags & CORINFO_ACCESS_GET);
14324 #if BIGENDIAN
14325                         op1 = gtNewIconNode(0, lclTyp);
14326 #else
14327                         op1                     = gtNewIconNode(1, lclTyp);
14328 #endif
14329                         goto FIELD_DONE;
14330                     }
14331                     break;
14332
14333                     default:
14334                         assert(!"Unexpected fieldAccessor");
14335                 }
14336
14337                 if (!isLoadAddress)
14338                 {
14339
14340                     if (prefixFlags & PREFIX_VOLATILE)
14341                     {
14342                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14343                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14344
14345                         if (!usesHelper)
14346                         {
14347                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14348                                    (op1->OperGet() == GT_OBJ));
14349                             op1->gtFlags |= GTF_IND_VOLATILE;
14350                         }
14351                     }
14352
14353                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14354                     {
14355                         if (!usesHelper)
14356                         {
14357                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14358                                    (op1->OperGet() == GT_OBJ));
14359                             op1->gtFlags |= GTF_IND_UNALIGNED;
14360                         }
14361                     }
14362                 }
14363
14364                 /* Check if the class needs explicit initialization */
14365
14366                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14367                 {
14368                     GenTree* helperNode = impInitClass(&resolvedToken);
14369                     if (compDonotInline())
14370                     {
14371                         return;
14372                     }
14373                     if (helperNode != nullptr)
14374                     {
14375                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14376                     }
14377                 }
14378
14379             FIELD_DONE:
14380                 impPushOnStack(op1, tiRetVal);
14381             }
14382             break;
14383
14384             case CEE_STFLD:
14385             case CEE_STSFLD:
14386             {
14387
14388                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
14389
14390                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
14391
14392                 /* Get the CP_Fieldref index */
14393
14394                 assertImp(sz == sizeof(unsigned));
14395
14396                 _impResolveToken(CORINFO_TOKENKIND_Field);
14397
14398                 JITDUMP(" %08X", resolvedToken.token);
14399
14400                 int       aflags = CORINFO_ACCESS_SET;
14401                 GenTree*  obj    = nullptr;
14402                 typeInfo* tiObj  = nullptr;
14403                 typeInfo  tiVal;
14404
14405                 /* Pull the value from the stack */
14406                 StackEntry se = impPopStack();
14407                 op2           = se.val;
14408                 tiVal         = se.seTypeInfo;
14409                 clsHnd        = tiVal.GetClassHandle();
14410
14411                 if (opcode == CEE_STFLD)
14412                 {
14413                     tiObj = &impStackTop().seTypeInfo;
14414                     obj   = impPopStack().val;
14415
14416                     if (impIsThis(obj))
14417                     {
14418                         aflags |= CORINFO_ACCESS_THIS;
14419
14420                         // An optimization for Contextful classes:
14421                         // we unwrap the proxy when we have a 'this reference'
14422
14423                         if (info.compUnwrapContextful)
14424                         {
14425                             aflags |= CORINFO_ACCESS_UNWRAP;
14426                         }
14427                     }
14428                 }
14429
14430                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14431
14432                 // Figure out the type of the member.  We always call canAccessField, so you always need this
14433                 // handle
14434                 CorInfoType ciType = fieldInfo.fieldType;
14435                 fieldClsHnd        = fieldInfo.structType;
14436
14437                 lclTyp = JITtype2varType(ciType);
14438
14439                 if (compIsForInlining())
14440                 {
14441                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
14442                      * per-inst static? */
14443
14444                     switch (fieldInfo.fieldAccessor)
14445                     {
14446                         case CORINFO_FIELD_INSTANCE_HELPER:
14447                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14448                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
14449                         case CORINFO_FIELD_STATIC_TLS:
14450
14451                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
14452                             return;
14453
14454                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14455                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14456                             /* We may be able to inline the field accessors in specific instantiations of generic
14457                              * methods */
14458                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
14459                             return;
14460
14461                         default:
14462                             break;
14463                     }
14464                 }
14465
14466                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14467
14468                 if (tiVerificationNeeded)
14469                 {
14470                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
14471                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
14472                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14473                 }
14474                 else
14475                 {
14476                     // tiVerificationNeed is false.
14477                     // Raise InvalidProgramException if static store accesses non-static field
14478                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14479                     {
14480                         BADCODE("static access on an instance field");
14481                     }
14482                 }
14483
14484                 // We are using stfld on a static field.
14485                 // We allow it, but need to eval any side-effects for obj
14486                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14487                 {
14488                     if (obj->gtFlags & GTF_SIDE_EFFECT)
14489                     {
14490                         obj = gtUnusedValNode(obj);
14491                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14492                     }
14493                     obj = nullptr;
14494                 }
14495
14496                 /* Preserve 'small' int types */
14497                 if (!varTypeIsSmall(lclTyp))
14498                 {
14499                     lclTyp = genActualType(lclTyp);
14500                 }
14501
14502                 switch (fieldInfo.fieldAccessor)
14503                 {
14504                     case CORINFO_FIELD_INSTANCE:
14505 #ifdef FEATURE_READYTORUN_COMPILER
14506                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
14507 #endif
14508                     {
14509                         obj = impCheckForNullPointer(obj);
14510
14511                         /* Create the data member node */
14512                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14513                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14514                         if (StructHasOverlappingFields(typeFlags))
14515                         {
14516                             op1->gtField.gtFldMayOverlap = true;
14517                         }
14518
14519 #ifdef FEATURE_READYTORUN_COMPILER
14520                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14521                         {
14522                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14523                         }
14524 #endif
14525
14526                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14527
14528                         if (fgAddrCouldBeNull(obj))
14529                         {
14530                             op1->gtFlags |= GTF_EXCEPT;
14531                         }
14532
14533                         // If gtFldObj is a BYREF then our target is a value class and
14534                         // it could point anywhere, example a boxed class static int
14535                         if (obj->gtType == TYP_BYREF)
14536                         {
14537                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
14538                         }
14539
14540                         if (compIsForInlining() &&
14541                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14542                         {
14543                             impInlineInfo->thisDereferencedFirst = true;
14544                         }
14545                     }
14546                     break;
14547
14548                     case CORINFO_FIELD_STATIC_TLS:
14549 #ifdef _TARGET_X86_
14550                         // Legacy TLS access is implemented as intrinsic on x86 only
14551
14552                         /* Create the data member node */
14553                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14554                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14555
14556                         break;
14557 #else
14558                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14559
14560                         __fallthrough;
14561 #endif
14562
14563                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
14564                     case CORINFO_FIELD_INSTANCE_HELPER:
14565                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14566                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14567                                                clsHnd, op2);
14568                         goto SPILL_APPEND;
14569
14570                     case CORINFO_FIELD_STATIC_ADDRESS:
14571                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14572                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14573                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14574                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14575                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14576                                                          lclTyp);
14577                         break;
14578
14579                     default:
14580                         assert(!"Unexpected fieldAccessor");
14581                 }
14582
14583                 // Create the member assignment, unless we have a struct.
14584                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14585                 bool deferStructAssign = varTypeIsStruct(lclTyp);
14586
14587                 if (!deferStructAssign)
14588                 {
14589                     if (prefixFlags & PREFIX_VOLATILE)
14590                     {
14591                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14592                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
14593                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14594                         op1->gtFlags |= GTF_IND_VOLATILE;
14595                     }
14596                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14597                     {
14598                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14599                         op1->gtFlags |= GTF_IND_UNALIGNED;
14600                     }
14601
14602                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14603                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14604                        importation and reads from the union as if it were a long during code generation. Though this
14605                        can potentially read garbage, one can get lucky to have this working correctly.
14606
14607                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14608                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14609                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14610                        it works correctly always.
14611
14612                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14613                        for V4.0.
14614                     */
14615                     CLANG_FORMAT_COMMENT_ANCHOR;
14616
14617 #ifndef _TARGET_64BIT_
14618                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14619                     // generated for ARM as well as x86, so the following IR will be accepted:
14620                     //     *  STMT      void
14621                     //         |  /--*  CNS_INT   int    2
14622                     //         \--*  ASG       long
14623                     //            \--*  CLS_VAR   long
14624
14625                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14626                         varTypeIsLong(op1->TypeGet()))
14627                     {
14628                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14629                     }
14630 #endif
14631
14632 #ifdef _TARGET_64BIT_
14633                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14634                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14635                     {
14636                         op2->gtType = TYP_I_IMPL;
14637                     }
14638                     else
14639                     {
14640                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14641                         //
14642                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14643                         {
14644                             op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14645                         }
14646                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14647                         //
14648                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14649                         {
14650                             op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14651                         }
14652                     }
14653 #endif
14654
14655                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14656                     // We insert a cast to the dest 'op1' type
14657                     //
14658                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14659                         varTypeIsFloating(op2->gtType))
14660                     {
14661                         op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14662                     }
14663
14664                     op1 = gtNewAssignNode(op1, op2);
14665
14666                     /* Mark the expression as containing an assignment */
14667
14668                     op1->gtFlags |= GTF_ASG;
14669                 }
14670
14671                 /* Check if the class needs explicit initialization */
14672
14673                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14674                 {
14675                     GenTree* helperNode = impInitClass(&resolvedToken);
14676                     if (compDonotInline())
14677                     {
14678                         return;
14679                     }
14680                     if (helperNode != nullptr)
14681                     {
14682                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14683                     }
14684                 }
14685
14686                 /* stfld can interfere with value classes (consider the sequence
14687                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
14688                    spill all value class references from the stack. */
14689
14690                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14691                 {
14692                     assert(tiObj);
14693
14694                     if (impIsValueType(tiObj))
14695                     {
14696                         impSpillEvalStack();
14697                     }
14698                     else
14699                     {
14700                         impSpillValueClasses();
14701                     }
14702                 }
14703
14704                 /* Spill any refs to the same member from the stack */
14705
14706                 impSpillLclRefs((ssize_t)resolvedToken.hField);
14707
14708                 /* stsfld also interferes with indirect accesses (for aliased
14709                    statics) and calls. But don't need to spill other statics
14710                    as we have explicitly spilled this particular static field. */
14711
14712                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14713
14714                 if (deferStructAssign)
14715                 {
14716                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14717                 }
14718             }
14719                 goto APPEND;
14720
14721             case CEE_NEWARR:
14722             {
14723
14724                 /* Get the class type index operand */
14725
14726                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14727
14728                 JITDUMP(" %08X", resolvedToken.token);
14729
14730                 if (!opts.IsReadyToRun())
14731                 {
14732                     // Need to restore array classes before creating array objects on the heap
14733                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14734                     if (op1 == nullptr)
14735                     { // compDonotInline()
14736                         return;
14737                     }
14738                 }
14739
14740                 if (tiVerificationNeeded)
14741                 {
14742                     // As per ECMA 'numElems' specified can be either int32 or native int.
14743                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14744
14745                     CORINFO_CLASS_HANDLE elemTypeHnd;
14746                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14747                     Verify(elemTypeHnd == nullptr ||
14748                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14749                            "array of byref-like type");
14750                 }
14751
14752                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14753
14754                 accessAllowedResult =
14755                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14756                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14757
14758                 /* Form the arglist: array class handle, size */
14759                 op2 = impPopStack().val;
14760                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14761
14762 #ifdef _TARGET_64BIT_
14763                 // The array helper takes a native int for array length.
14764                 // So if we have an int, explicitly extend it to be a native int.
14765                 if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
14766                 {
14767                     if (op2->IsIntegralConst())
14768                     {
14769                         op2->gtType = TYP_I_IMPL;
14770                     }
14771                     else
14772                     {
14773                         bool isUnsigned = false;
14774                         op2             = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
14775                     }
14776                 }
14777 #endif // _TARGET_64BIT_
14778
14779 #ifdef FEATURE_READYTORUN_COMPILER
14780                 if (opts.IsReadyToRun())
14781                 {
14782                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14783                                                     gtNewArgList(op2));
14784                     usingReadyToRunHelper = (op1 != nullptr);
14785
14786                     if (!usingReadyToRunHelper)
14787                     {
14788                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14789                         // and the newarr call with a single call to a dynamic R2R cell that will:
14790                         //      1) Load the context
14791                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14792                         //      3) Allocate the new array
14793                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14794
14795                         // Need to restore array classes before creating array objects on the heap
14796                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14797                         if (op1 == nullptr)
14798                         { // compDonotInline()
14799                             return;
14800                         }
14801                     }
14802                 }
14803
14804                 if (!usingReadyToRunHelper)
14805 #endif
14806                 {
14807                     args = gtNewArgList(op1, op2);
14808
14809                     /* Create a call to 'new' */
14810
14811                     // Note that this only works for shared generic code because the same helper is used for all
14812                     // reference array types
14813                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14814                 }
14815
14816                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14817
14818                 /* Remember that this basic block contains 'new' of an sd array */
14819
14820                 block->bbFlags |= BBF_HAS_NEWARRAY;
14821                 optMethodFlags |= OMF_HAS_NEWARRAY;
14822
14823                 /* Push the result of the call on the stack */
14824
14825                 impPushOnStack(op1, tiRetVal);
14826
14827                 callTyp = TYP_REF;
14828             }
14829             break;
14830
14831             case CEE_LOCALLOC:
14832                 if (tiVerificationNeeded)
14833                 {
14834                     Verify(false, "bad opcode");
14835                 }
14836
14837                 // We don't allow locallocs inside handlers
14838                 if (block->hasHndIndex())
14839                 {
14840                     BADCODE("Localloc can't be inside handler");
14841                 }
14842
14843                 // Get the size to allocate
14844
14845                 op2 = impPopStack().val;
14846                 assertImp(genActualTypeIsIntOrI(op2->gtType));
14847
14848                 if (verCurrentState.esStackDepth != 0)
14849                 {
14850                     BADCODE("Localloc can only be used when the stack is empty");
14851                 }
14852
14853                 // If the localloc is not in a loop and its size is a small constant,
14854                 // create a new local var of TYP_BLK and return its address.
14855                 {
14856                     bool convertedToLocal = false;
14857
14858                     // Need to aggressively fold here, as even fixed-size locallocs
14859                     // will have casts in the way.
14860                     op2 = gtFoldExpr(op2);
14861
14862                     if (op2->IsIntegralConst())
14863                     {
14864                         const ssize_t allocSize = op2->AsIntCon()->IconValue();
14865
14866                         if (allocSize == 0)
14867                         {
14868                             // Result is nullptr
14869                             JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14870                             op1              = gtNewIconNode(0, TYP_I_IMPL);
14871                             convertedToLocal = true;
14872                         }
14873                         else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14874                         {
14875                             // Get the size threshold for local conversion
14876                             ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14877
14878 #ifdef DEBUG
14879                             // Optionally allow this to be modified
14880                             maxSize = JitConfig.JitStackAllocToLocalSize();
14881 #endif // DEBUG
14882
14883                             if (allocSize <= maxSize)
14884                             {
14885                                 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14886                                 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14887                                         stackallocAsLocal);
14888                                 lvaTable[stackallocAsLocal].lvType           = TYP_BLK;
14889                                 lvaTable[stackallocAsLocal].lvExactSize      = (unsigned)allocSize;
14890                                 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14891                                 op1              = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14892                                 op1              = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14893                                 convertedToLocal = true;
14894
14895                                 if (!this->opts.compDbgEnC)
14896                                 {
14897                                     // Ensure we have stack security for this method.
14898                                     // Reorder layout since the converted localloc is treated as an unsafe buffer.
14899                                     setNeedsGSSecurityCookie();
14900                                     compGSReorderStackLayout = true;
14901                                 }
14902                             }
14903                         }
14904                     }
14905
14906                     if (!convertedToLocal)
14907                     {
14908                         // Bail out if inlining and the localloc was not converted.
14909                         //
14910                         // Note we might consider allowing the inline, if the call
14911                         // site is not in a loop.
14912                         if (compIsForInlining())
14913                         {
14914                             InlineObservation obs = op2->IsIntegralConst()
14915                                                         ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14916                                                         : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14917                             compInlineResult->NoteFatal(obs);
14918                             return;
14919                         }
14920
14921                         op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14922                         // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14923                         op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14924
14925                         // Ensure we have stack security for this method.
14926                         setNeedsGSSecurityCookie();
14927
14928                         /* The FP register may not be back to the original value at the end
14929                            of the method, even if the frame size is 0, as localloc may
14930                            have modified it. So we will HAVE to reset it */
14931                         compLocallocUsed = true;
14932                     }
14933                     else
14934                     {
14935                         compLocallocOptimized = true;
14936                     }
14937                 }
14938
14939                 impPushOnStack(op1, tiRetVal);
14940                 break;
14941
14942             case CEE_ISINST:
14943             {
14944                 /* Get the type token */
14945                 assertImp(sz == sizeof(unsigned));
14946
14947                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14948
14949                 JITDUMP(" %08X", resolvedToken.token);
14950
14951                 if (!opts.IsReadyToRun())
14952                 {
14953                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14954                     if (op2 == nullptr)
14955                     { // compDonotInline()
14956                         return;
14957                     }
14958                 }
14959
14960                 if (tiVerificationNeeded)
14961                 {
14962                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14963                     // Even if this is a value class, we know it is boxed.
14964                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14965                 }
14966                 accessAllowedResult =
14967                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14968                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14969
14970                 op1 = impPopStack().val;
14971
14972                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14973
14974                 if (optTree != nullptr)
14975                 {
14976                     impPushOnStack(optTree, tiRetVal);
14977                 }
14978                 else
14979                 {
14980
14981 #ifdef FEATURE_READYTORUN_COMPILER
14982                     if (opts.IsReadyToRun())
14983                     {
14984                         GenTreeCall* opLookup =
14985                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14986                                                       gtNewArgList(op1));
14987                         usingReadyToRunHelper = (opLookup != nullptr);
14988                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
14989
14990                         if (!usingReadyToRunHelper)
14991                         {
14992                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14993                             // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14994                             //      1) Load the context
14995                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
14996                             //      stub
14997                             //      3) Perform the 'is instance' check on the input object
14998                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14999
15000                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15001                             if (op2 == nullptr)
15002                             { // compDonotInline()
15003                                 return;
15004                             }
15005                         }
15006                     }
15007
15008                     if (!usingReadyToRunHelper)
15009 #endif
15010                     {
15011                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
15012                     }
15013                     if (compDonotInline())
15014                     {
15015                         return;
15016                     }
15017
15018                     impPushOnStack(op1, tiRetVal);
15019                 }
15020                 break;
15021             }
15022
15023             case CEE_REFANYVAL:
15024
15025                 // get the class handle and make a ICON node out of it
15026
15027                 _impResolveToken(CORINFO_TOKENKIND_Class);
15028
15029                 JITDUMP(" %08X", resolvedToken.token);
15030
15031                 op2 = impTokenToHandle(&resolvedToken);
15032                 if (op2 == nullptr)
15033                 { // compDonotInline()
15034                     return;
15035                 }
15036
15037                 if (tiVerificationNeeded)
15038                 {
15039                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15040                            "need refany");
15041                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
15042                 }
15043
15044                 op1 = impPopStack().val;
15045                 // make certain it is normalized;
15046                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15047
15048                 // Call helper GETREFANY(classHandle, op1);
15049                 args = gtNewArgList(op2, op1);
15050                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
15051
15052                 impPushOnStack(op1, tiRetVal);
15053                 break;
15054
15055             case CEE_REFANYTYPE:
15056
15057                 if (tiVerificationNeeded)
15058                 {
15059                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15060                            "need refany");
15061                 }
15062
15063                 op1 = impPopStack().val;
15064
15065                 // make certain it is normalized;
15066                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15067
15068                 if (op1->gtOper == GT_OBJ)
15069                 {
15070                     // Get the address of the refany
15071                     op1 = op1->gtOp.gtOp1;
15072
15073                     // Fetch the type from the correct slot
15074                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15075                                         gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
15076                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
15077                 }
15078                 else
15079                 {
15080                     assertImp(op1->gtOper == GT_MKREFANY);
15081
15082                     // The pointer may have side-effects
15083                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
15084                     {
15085                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15086 #ifdef DEBUG
15087                         impNoteLastILoffs();
15088 #endif
15089                     }
15090
15091                     // We already have the class handle
15092                     op1 = op1->gtOp.gtOp2;
15093                 }
15094
15095                 // convert native TypeHandle to RuntimeTypeHandle
15096                 {
15097                     GenTreeArgList* helperArgs = gtNewArgList(op1);
15098
15099                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
15100                                               helperArgs);
15101
15102                     // The handle struct is returned in register
15103                     op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15104
15105                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
15106                 }
15107
15108                 impPushOnStack(op1, tiRetVal);
15109                 break;
15110
15111             case CEE_LDTOKEN:
15112             {
15113                 /* Get the Class index */
15114                 assertImp(sz == sizeof(unsigned));
15115                 lastLoadToken = codeAddr;
15116                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
15117
15118                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
15119
15120                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15121                 if (op1 == nullptr)
15122                 { // compDonotInline()
15123                     return;
15124                 }
15125
15126                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
15127                 assert(resolvedToken.hClass != nullptr);
15128
15129                 if (resolvedToken.hMethod != nullptr)
15130                 {
15131                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
15132                 }
15133                 else if (resolvedToken.hField != nullptr)
15134                 {
15135                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
15136                 }
15137
15138                 GenTreeArgList* helperArgs = gtNewArgList(op1);
15139
15140                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
15141
15142                 // The handle struct is returned in register
15143                 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15144
15145                 tiRetVal = verMakeTypeInfo(tokenType);
15146                 impPushOnStack(op1, tiRetVal);
15147             }
15148             break;
15149
15150             case CEE_UNBOX:
15151             case CEE_UNBOX_ANY:
15152             {
15153                 /* Get the Class index */
15154                 assertImp(sz == sizeof(unsigned));
15155
15156                 _impResolveToken(CORINFO_TOKENKIND_Class);
15157
15158                 JITDUMP(" %08X", resolvedToken.token);
15159
15160                 BOOL runtimeLookup;
15161                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
15162                 if (op2 == nullptr)
15163                 {
15164                     assert(compDonotInline());
15165                     return;
15166                 }
15167
15168                 // Run this always so we can get access exceptions even with SkipVerification.
15169                 accessAllowedResult =
15170                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15171                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15172
15173                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
15174                 {
15175                     if (tiVerificationNeeded)
15176                     {
15177                         typeInfo tiUnbox = impStackTop().seTypeInfo;
15178                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
15179                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15180                         tiRetVal.NormaliseForStack();
15181                     }
15182                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
15183                     op1 = impPopStack().val;
15184                     goto CASTCLASS;
15185                 }
15186
15187                 /* Pop the object and create the unbox helper call */
15188                 /* You might think that for UNBOX_ANY we need to push a different */
15189                 /* (non-byref) type, but here we're making the tiRetVal that is used */
15190                 /* for the intermediate pointer which we then transfer onto the OBJ */
15191                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
15192                 if (tiVerificationNeeded)
15193                 {
15194                     typeInfo tiUnbox = impStackTop().seTypeInfo;
15195                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
15196
15197                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15198                     Verify(tiRetVal.IsValueClass(), "not value class");
15199                     tiRetVal.MakeByRef();
15200
15201                     // We always come from an objref, so this is safe byref
15202                     tiRetVal.SetIsPermanentHomeByRef();
15203                     tiRetVal.SetIsReadonlyByRef();
15204                 }
15205
15206                 op1 = impPopStack().val;
15207                 assertImp(op1->gtType == TYP_REF);
15208
15209                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
15210                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
15211
15212                 // Check legality and profitability of inline expansion for unboxing.
15213                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
15214                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
15215
15216                 if (canExpandInline && shouldExpandInline)
15217                 {
15218                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
15219                     // we are doing normal unboxing
15220                     // inline the common case of the unbox helper
15221                     // UNBOX(exp) morphs into
15222                     // clone = pop(exp);
15223                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
15224                     // push(clone + TARGET_POINTER_SIZE)
15225                     //
15226                     GenTree* cloneOperand;
15227                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15228                                        nullptr DEBUGARG("inline UNBOX clone1"));
15229                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
15230
15231                     GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
15232
15233                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15234                                        nullptr DEBUGARG("inline UNBOX clone2"));
15235                     op2 = impTokenToHandle(&resolvedToken);
15236                     if (op2 == nullptr)
15237                     { // compDonotInline()
15238                         return;
15239                     }
15240                     args = gtNewArgList(op2, op1);
15241                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
15242
15243                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
15244                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
15245
15246                     // QMARK nodes cannot reside on the evaluation stack. Because there
15247                     // may be other trees on the evaluation stack that side-effect the
15248                     // sources of the UNBOX operation we must spill the stack.
15249
15250                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15251
15252                     // Create the address-expression to reference past the object header
15253                     // to the beginning of the value-type. Today this means adjusting
15254                     // past the base of the objects vtable field which is pointer sized.
15255
15256                     op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
15257                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
15258                 }
15259                 else
15260                 {
15261                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
15262                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
15263
15264                     // Don't optimize, just call the helper and be done with it
15265                     args = gtNewArgList(op2, op1);
15266                     op1 =
15267                         gtNewHelperCallNode(helper,
15268                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
15269                 }
15270
15271                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
15272                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
15273                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
15274                        );
15275
15276                 /*
15277                   ----------------------------------------------------------------------
15278                   | \ helper  |                         |                              |
15279                   |   \       |                         |                              |
15280                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
15281                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
15282                   | opcode  \ |                         |                              |
15283                   |---------------------------------------------------------------------
15284                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
15285                   |           |                         | push the BYREF to this local |
15286                   |---------------------------------------------------------------------
15287                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
15288                   |           | the BYREF               | For Linux when the           |
15289                   |           |                         |  struct is returned in two   |
15290                   |           |                         |  registers create a temp     |
15291                   |           |                         |  which address is passed to  |
15292                   |           |                         |  the unbox_nullable helper.  |
15293                   |---------------------------------------------------------------------
15294                 */
15295
15296                 if (opcode == CEE_UNBOX)
15297                 {
15298                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
15299                     {
15300                         // Unbox nullable helper returns a struct type.
15301                         // We need to spill it to a temp so than can take the address of it.
15302                         // Here we need unsafe value cls check, since the address of struct is taken to be used
15303                         // further along and potetially be exploitable.
15304
15305                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
15306                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15307
15308                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15309                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15310                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15311
15312                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15313                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15314                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15315                     }
15316
15317                     assert(op1->gtType == TYP_BYREF);
15318                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15319                 }
15320                 else
15321                 {
15322                     assert(opcode == CEE_UNBOX_ANY);
15323
15324                     if (helper == CORINFO_HELP_UNBOX)
15325                     {
15326                         // Normal unbox helper returns a TYP_BYREF.
15327                         impPushOnStack(op1, tiRetVal);
15328                         oper = GT_OBJ;
15329                         goto OBJ;
15330                     }
15331
15332                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
15333
15334 #if FEATURE_MULTIREG_RET
15335
15336                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
15337                     {
15338                         // Unbox nullable helper returns a TYP_STRUCT.
15339                         // For the multi-reg case we need to spill it to a temp so that
15340                         // we can pass the address to the unbox_nullable jit helper.
15341
15342                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
15343                         lvaTable[tmp].lvIsMultiRegArg = true;
15344                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15345
15346                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15347                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15348                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15349
15350                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15351                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15352                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15353
15354                         // In this case the return value of the unbox helper is TYP_BYREF.
15355                         // Make sure the right type is placed on the operand type stack.
15356                         impPushOnStack(op1, tiRetVal);
15357
15358                         // Load the struct.
15359                         oper = GT_OBJ;
15360
15361                         assert(op1->gtType == TYP_BYREF);
15362                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15363
15364                         goto OBJ;
15365                     }
15366                     else
15367
15368 #endif // !FEATURE_MULTIREG_RET
15369
15370                     {
15371                         // If non register passable struct we have it materialized in the RetBuf.
15372                         assert(op1->gtType == TYP_STRUCT);
15373                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15374                         assert(tiRetVal.IsValueClass());
15375                     }
15376                 }
15377
15378                 impPushOnStack(op1, tiRetVal);
15379             }
15380             break;
15381
15382             case CEE_BOX:
15383             {
15384                 /* Get the Class index */
15385                 assertImp(sz == sizeof(unsigned));
15386
15387                 _impResolveToken(CORINFO_TOKENKIND_Box);
15388
15389                 JITDUMP(" %08X", resolvedToken.token);
15390
15391                 if (tiVerificationNeeded)
15392                 {
15393                     typeInfo tiActual = impStackTop().seTypeInfo;
15394                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
15395
15396                     Verify(verIsBoxable(tiBox), "boxable type expected");
15397
15398                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
15399                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
15400                            "boxed type has unsatisfied class constraints");
15401
15402                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
15403
15404                     // Observation: the following code introduces a boxed value class on the stack, but,
15405                     // according to the ECMA spec, one would simply expect: tiRetVal =
15406                     // typeInfo(TI_REF,impGetObjectClass());
15407
15408                     // Push the result back on the stack,
15409                     // even if clsHnd is a value class we want the TI_REF
15410                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
15411                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
15412                 }
15413
15414                 accessAllowedResult =
15415                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15416                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15417
15418                 // Note BOX can be used on things that are not value classes, in which
15419                 // case we get a NOP.  However the verifier's view of the type on the
15420                 // stack changes (in generic code a 'T' becomes a 'boxed T')
15421                 if (!eeIsValueClass(resolvedToken.hClass))
15422                 {
15423                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
15424                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
15425                     break;
15426                 }
15427
15428                 // Look ahead for unbox.any
15429                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
15430                 {
15431                     CORINFO_RESOLVED_TOKEN unboxResolvedToken;
15432
15433                     impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
15434
15435                     // See if the resolved tokens describe types that are equal.
15436                     const TypeCompareState compare =
15437                         info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
15438
15439                     // If so, box/unbox.any is a nop.
15440                     if (compare == TypeCompareState::Must)
15441                     {
15442                         JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
15443                         // Skip the next unbox.any instruction
15444                         sz += sizeof(mdToken) + 1;
15445                         break;
15446                     }
15447                 }
15448
15449                 impImportAndPushBox(&resolvedToken);
15450                 if (compDonotInline())
15451                 {
15452                     return;
15453                 }
15454             }
15455             break;
15456
15457             case CEE_SIZEOF:
15458
15459                 /* Get the Class index */
15460                 assertImp(sz == sizeof(unsigned));
15461
15462                 _impResolveToken(CORINFO_TOKENKIND_Class);
15463
15464                 JITDUMP(" %08X", resolvedToken.token);
15465
15466                 if (tiVerificationNeeded)
15467                 {
15468                     tiRetVal = typeInfo(TI_INT);
15469                 }
15470
15471                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
15472                 impPushOnStack(op1, tiRetVal);
15473                 break;
15474
15475             case CEE_CASTCLASS:
15476
15477                 /* Get the Class index */
15478
15479                 assertImp(sz == sizeof(unsigned));
15480
15481                 _impResolveToken(CORINFO_TOKENKIND_Casting);
15482
15483                 JITDUMP(" %08X", resolvedToken.token);
15484
15485                 if (!opts.IsReadyToRun())
15486                 {
15487                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15488                     if (op2 == nullptr)
15489                     { // compDonotInline()
15490                         return;
15491                     }
15492                 }
15493
15494                 if (tiVerificationNeeded)
15495                 {
15496                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
15497                     // box it
15498                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15499                 }
15500
15501                 accessAllowedResult =
15502                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15503                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15504
15505                 op1 = impPopStack().val;
15506
15507             /* Pop the address and create the 'checked cast' helper call */
15508
15509             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15510             // and op2 to contain code that creates the type handle corresponding to typeRef
15511             CASTCLASS:
15512             {
15513                 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15514
15515                 if (optTree != nullptr)
15516                 {
15517                     impPushOnStack(optTree, tiRetVal);
15518                 }
15519                 else
15520                 {
15521
15522 #ifdef FEATURE_READYTORUN_COMPILER
15523                     if (opts.IsReadyToRun())
15524                     {
15525                         GenTreeCall* opLookup =
15526                             impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15527                                                       gtNewArgList(op1));
15528                         usingReadyToRunHelper = (opLookup != nullptr);
15529                         op1                   = (usingReadyToRunHelper ? opLookup : op1);
15530
15531                         if (!usingReadyToRunHelper)
15532                         {
15533                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15534                             // and the chkcastany call with a single call to a dynamic R2R cell that will:
15535                             //      1) Load the context
15536                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
15537                             //      stub
15538                             //      3) Check the object on the stack for the type-cast
15539                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15540
15541                             op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15542                             if (op2 == nullptr)
15543                             { // compDonotInline()
15544                                 return;
15545                             }
15546                         }
15547                     }
15548
15549                     if (!usingReadyToRunHelper)
15550 #endif
15551                     {
15552                         op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15553                     }
15554                     if (compDonotInline())
15555                     {
15556                         return;
15557                     }
15558
15559                     /* Push the result back on the stack */
15560                     impPushOnStack(op1, tiRetVal);
15561                 }
15562             }
15563             break;
15564
15565             case CEE_THROW:
15566
15567                 if (compIsForInlining())
15568                 {
15569                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15570                     // TODO: Will this be too strict, given that we will inline many basic blocks?
15571                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15572
15573                     /* Do we have just the exception on the stack ?*/
15574
15575                     if (verCurrentState.esStackDepth != 1)
15576                     {
15577                         /* if not, just don't inline the method */
15578
15579                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15580                         return;
15581                     }
15582                 }
15583
15584                 if (tiVerificationNeeded)
15585                 {
15586                     tiRetVal = impStackTop().seTypeInfo;
15587                     Verify(tiRetVal.IsObjRef(), "object ref expected");
15588                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15589                     {
15590                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15591                     }
15592                 }
15593
15594                 block->bbSetRunRarely(); // any block with a throw is rare
15595                 /* Pop the exception object and create the 'throw' helper call */
15596
15597                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15598
15599             EVAL_APPEND:
15600                 if (verCurrentState.esStackDepth > 0)
15601                 {
15602                     impEvalSideEffects();
15603                 }
15604
15605                 assert(verCurrentState.esStackDepth == 0);
15606
15607                 goto APPEND;
15608
15609             case CEE_RETHROW:
15610
15611                 assert(!compIsForInlining());
15612
15613                 if (info.compXcptnsCount == 0)
15614                 {
15615                     BADCODE("rethrow outside catch");
15616                 }
15617
15618                 if (tiVerificationNeeded)
15619                 {
15620                     Verify(block->hasHndIndex(), "rethrow outside catch");
15621                     if (block->hasHndIndex())
15622                     {
15623                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15624                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15625                         if (HBtab->HasFilter())
15626                         {
15627                             // we better be in the handler clause part, not the filter part
15628                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15629                                    "rethrow in filter");
15630                         }
15631                     }
15632                 }
15633
15634                 /* Create the 'rethrow' helper call */
15635
15636                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15637
15638                 goto EVAL_APPEND;
15639
15640             case CEE_INITOBJ:
15641
15642                 assertImp(sz == sizeof(unsigned));
15643
15644                 _impResolveToken(CORINFO_TOKENKIND_Class);
15645
15646                 JITDUMP(" %08X", resolvedToken.token);
15647
15648                 if (tiVerificationNeeded)
15649                 {
15650                     typeInfo tiTo    = impStackTop().seTypeInfo;
15651                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15652
15653                     Verify(tiTo.IsByRef(), "byref expected");
15654                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15655
15656                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15657                            "type operand incompatible with type of address");
15658                 }
15659
15660                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15661                 op2  = gtNewIconNode(0);                                     // Value
15662                 op1  = impPopStack().val;                                    // Dest
15663                 op1  = gtNewBlockVal(op1, size);
15664                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15665                 goto SPILL_APPEND;
15666
15667             case CEE_INITBLK:
15668
15669                 if (tiVerificationNeeded)
15670                 {
15671                     Verify(false, "bad opcode");
15672                 }
15673
15674                 op3 = impPopStack().val; // Size
15675                 op2 = impPopStack().val; // Value
15676                 op1 = impPopStack().val; // Dest
15677
15678                 if (op3->IsCnsIntOrI())
15679                 {
15680                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15681                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15682                 }
15683                 else
15684                 {
15685                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15686                     size = 0;
15687                 }
15688                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15689
15690                 goto SPILL_APPEND;
15691
15692             case CEE_CPBLK:
15693
15694                 if (tiVerificationNeeded)
15695                 {
15696                     Verify(false, "bad opcode");
15697                 }
15698                 op3 = impPopStack().val; // Size
15699                 op2 = impPopStack().val; // Src
15700                 op1 = impPopStack().val; // Dest
15701
15702                 if (op3->IsCnsIntOrI())
15703                 {
15704                     size = (unsigned)op3->AsIntConCommon()->IconValue();
15705                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15706                 }
15707                 else
15708                 {
15709                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15710                     size = 0;
15711                 }
15712                 if (op2->OperGet() == GT_ADDR)
15713                 {
15714                     op2 = op2->gtOp.gtOp1;
15715                 }
15716                 else
15717                 {
15718                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15719                 }
15720
15721                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15722                 goto SPILL_APPEND;
15723
15724             case CEE_CPOBJ:
15725
15726                 assertImp(sz == sizeof(unsigned));
15727
15728                 _impResolveToken(CORINFO_TOKENKIND_Class);
15729
15730                 JITDUMP(" %08X", resolvedToken.token);
15731
15732                 if (tiVerificationNeeded)
15733                 {
15734                     typeInfo tiFrom  = impStackTop().seTypeInfo;
15735                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
15736                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15737
15738                     Verify(tiFrom.IsByRef(), "expected byref source");
15739                     Verify(tiTo.IsByRef(), "expected byref destination");
15740
15741                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15742                            "type of source address incompatible with type operand");
15743                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15744                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15745                            "type operand incompatible with type of destination address");
15746                 }
15747
15748                 if (!eeIsValueClass(resolvedToken.hClass))
15749                 {
15750                     op1 = impPopStack().val; // address to load from
15751
15752                     impBashVarAddrsToI(op1);
15753
15754                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15755
15756                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15757                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15758
15759                     impPushOnStack(op1, typeInfo());
15760                     opcode = CEE_STIND_REF;
15761                     lclTyp = TYP_REF;
15762                     goto STIND_POST_VERIFY;
15763                 }
15764
15765                 op2 = impPopStack().val; // Src
15766                 op1 = impPopStack().val; // Dest
15767                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15768                 goto SPILL_APPEND;
15769
15770             case CEE_STOBJ:
15771             {
15772                 assertImp(sz == sizeof(unsigned));
15773
15774                 _impResolveToken(CORINFO_TOKENKIND_Class);
15775
15776                 JITDUMP(" %08X", resolvedToken.token);
15777
15778                 if (eeIsValueClass(resolvedToken.hClass))
15779                 {
15780                     lclTyp = TYP_STRUCT;
15781                 }
15782                 else
15783                 {
15784                     lclTyp = TYP_REF;
15785                 }
15786
15787                 if (tiVerificationNeeded)
15788                 {
15789
15790                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
15791
15792                     // Make sure we have a good looking byref
15793                     Verify(tiPtr.IsByRef(), "pointer not byref");
15794                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15795                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15796                     {
15797                         compUnsafeCastUsed = true;
15798                     }
15799
15800                     typeInfo ptrVal = DereferenceByRef(tiPtr);
15801                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15802
15803                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15804                     {
15805                         Verify(false, "type of value incompatible with type operand");
15806                         compUnsafeCastUsed = true;
15807                     }
15808
15809                     if (!tiCompatibleWith(argVal, ptrVal, false))
15810                     {
15811                         Verify(false, "type operand incompatible with type of address");
15812                         compUnsafeCastUsed = true;
15813                     }
15814                 }
15815                 else
15816                 {
15817                     compUnsafeCastUsed = true;
15818                 }
15819
15820                 if (lclTyp == TYP_REF)
15821                 {
15822                     opcode = CEE_STIND_REF;
15823                     goto STIND_POST_VERIFY;
15824                 }
15825
15826                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15827                 if (impIsPrimitive(jitTyp))
15828                 {
15829                     lclTyp = JITtype2varType(jitTyp);
15830                     goto STIND_POST_VERIFY;
15831                 }
15832
15833                 op2 = impPopStack().val; // Value
15834                 op1 = impPopStack().val; // Ptr
15835
15836                 assertImp(varTypeIsStruct(op2));
15837
15838                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15839
15840                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15841                 {
15842                     op1->gtFlags |= GTF_BLK_UNALIGNED;
15843                 }
15844                 goto SPILL_APPEND;
15845             }
15846
15847             case CEE_MKREFANY:
15848
15849                 assert(!compIsForInlining());
15850
15851                 // Being lazy here. Refanys are tricky in terms of gc tracking.
15852                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15853
15854                 JITDUMP("disabling struct promotion because of mkrefany\n");
15855                 fgNoStructPromotion = true;
15856
15857                 oper = GT_MKREFANY;
15858                 assertImp(sz == sizeof(unsigned));
15859
15860                 _impResolveToken(CORINFO_TOKENKIND_Class);
15861
15862                 JITDUMP(" %08X", resolvedToken.token);
15863
15864                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15865                 if (op2 == nullptr)
15866                 { // compDonotInline()
15867                     return;
15868                 }
15869
15870                 if (tiVerificationNeeded)
15871                 {
15872                     typeInfo tiPtr   = impStackTop().seTypeInfo;
15873                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15874
15875                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15876                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15877                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15878                 }
15879
15880                 accessAllowedResult =
15881                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15882                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15883
15884                 op1 = impPopStack().val;
15885
15886                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15887                 // But JIT32 allowed it, so we continue to allow it.
15888                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15889
15890                 // MKREFANY returns a struct.  op2 is the class token.
15891                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15892
15893                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15894                 break;
15895
15896             case CEE_LDOBJ:
15897             {
15898                 oper = GT_OBJ;
15899                 assertImp(sz == sizeof(unsigned));
15900
15901                 _impResolveToken(CORINFO_TOKENKIND_Class);
15902
15903                 JITDUMP(" %08X", resolvedToken.token);
15904
15905             OBJ:
15906
15907                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15908
15909                 if (tiVerificationNeeded)
15910                 {
15911                     typeInfo tiPtr = impStackTop().seTypeInfo;
15912
15913                     // Make sure we have a byref
15914                     if (!tiPtr.IsByRef())
15915                     {
15916                         Verify(false, "pointer not byref");
15917                         compUnsafeCastUsed = true;
15918                     }
15919                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15920
15921                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15922                     {
15923                         Verify(false, "type of address incompatible with type operand");
15924                         compUnsafeCastUsed = true;
15925                     }
15926                     tiRetVal.NormaliseForStack();
15927                 }
15928                 else
15929                 {
15930                     compUnsafeCastUsed = true;
15931                 }
15932
15933                 if (eeIsValueClass(resolvedToken.hClass))
15934                 {
15935                     lclTyp = TYP_STRUCT;
15936                 }
15937                 else
15938                 {
15939                     lclTyp = TYP_REF;
15940                     opcode = CEE_LDIND_REF;
15941                     goto LDIND_POST_VERIFY;
15942                 }
15943
15944                 op1 = impPopStack().val;
15945
15946                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15947
15948                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15949                 if (impIsPrimitive(jitTyp))
15950                 {
15951                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15952
15953                     // Could point anywhere, example a boxed class static int
15954                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15955                     assertImp(varTypeIsArithmetic(op1->gtType));
15956                 }
15957                 else
15958                 {
15959                     // OBJ returns a struct
15960                     // and an inline argument which is the class token of the loaded obj
15961                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15962                 }
15963                 op1->gtFlags |= GTF_EXCEPT;
15964
15965                 if (prefixFlags & PREFIX_UNALIGNED)
15966                 {
15967                     op1->gtFlags |= GTF_IND_UNALIGNED;
15968                 }
15969
15970                 impPushOnStack(op1, tiRetVal);
15971                 break;
15972             }
15973
15974             case CEE_LDLEN:
15975                 if (tiVerificationNeeded)
15976                 {
15977                     typeInfo tiArray = impStackTop().seTypeInfo;
15978                     Verify(verIsSDArray(tiArray), "bad array");
15979                     tiRetVal = typeInfo(TI_INT);
15980                 }
15981
15982                 op1 = impPopStack().val;
15983                 if (!opts.MinOpts() && !opts.compDbgCode)
15984                 {
15985                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15986                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
15987
15988                     /* Mark the block as containing a length expression */
15989
15990                     if (op1->gtOper == GT_LCL_VAR)
15991                     {
15992                         block->bbFlags |= BBF_HAS_IDX_LEN;
15993                     }
15994
15995                     op1 = arrLen;
15996                 }
15997                 else
15998                 {
15999                     /* Create the expression "*(array_addr + ArrLenOffs)" */
16000                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
16001                                         gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
16002                     op1 = gtNewIndir(TYP_INT, op1);
16003                 }
16004
16005                 /* Push the result back on the stack */
16006                 impPushOnStack(op1, tiRetVal);
16007                 break;
16008
16009             case CEE_BREAK:
16010                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
16011                 goto SPILL_APPEND;
16012
16013             case CEE_NOP:
16014                 if (opts.compDbgCode)
16015                 {
16016                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
16017                     goto SPILL_APPEND;
16018                 }
16019                 break;
16020
16021             /******************************** NYI *******************************/
16022
16023             case 0xCC:
16024                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
16025
16026             case CEE_ILLEGAL:
16027             case CEE_MACRO_END:
16028
16029             default:
16030                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
16031         }
16032
16033         codeAddr += sz;
16034         prevOpcode = opcode;
16035
16036         prefixFlags = 0;
16037     }
16038
16039     return;
16040 #undef _impResolveToken
16041 }
16042 #ifdef _PREFAST_
16043 #pragma warning(pop)
16044 #endif
16045
16046 // Push a local/argument treeon the operand stack
16047 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
16048 {
16049     tiRetVal.NormaliseForStack();
16050
16051     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
16052     {
16053         tiRetVal.SetUninitialisedObjRef();
16054     }
16055
16056     impPushOnStack(op, tiRetVal);
16057 }
16058
16059 // Load a local/argument on the operand stack
16060 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
16061 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
16062 {
16063     var_types lclTyp;
16064
16065     if (lvaTable[lclNum].lvNormalizeOnLoad())
16066     {
16067         lclTyp = lvaGetRealType(lclNum);
16068     }
16069     else
16070     {
16071         lclTyp = lvaGetActualType(lclNum);
16072     }
16073
16074     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
16075 }
16076
16077 // Load an argument on the operand stack
16078 // Shared by the various CEE_LDARG opcodes
16079 // ilArgNum is the argument index as specified in IL.
16080 // It will be mapped to the correct lvaTable index
16081 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
16082 {
16083     Verify(ilArgNum < info.compILargsCount, "bad arg num");
16084
16085     if (compIsForInlining())
16086     {
16087         if (ilArgNum >= info.compArgsCount)
16088         {
16089             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
16090             return;
16091         }
16092
16093         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
16094                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
16095     }
16096     else
16097     {
16098         if (ilArgNum >= info.compArgsCount)
16099         {
16100             BADCODE("Bad IL");
16101         }
16102
16103         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
16104
16105         if (lclNum == info.compThisArg)
16106         {
16107             lclNum = lvaArg0Var;
16108         }
16109
16110         impLoadVar(lclNum, offset);
16111     }
16112 }
16113
16114 // Load a local on the operand stack
16115 // Shared by the various CEE_LDLOC opcodes
16116 // ilLclNum is the local index as specified in IL.
16117 // It will be mapped to the correct lvaTable index
16118 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
16119 {
16120     if (tiVerificationNeeded)
16121     {
16122         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
16123         Verify(info.compInitMem, "initLocals not set");
16124     }
16125
16126     if (compIsForInlining())
16127     {
16128         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16129         {
16130             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
16131             return;
16132         }
16133
16134         // Get the local type
16135         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
16136
16137         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
16138
16139         /* Have we allocated a temp for this local? */
16140
16141         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
16142
16143         // All vars of inlined methods should be !lvNormalizeOnLoad()
16144
16145         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
16146         lclTyp = genActualType(lclTyp);
16147
16148         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
16149     }
16150     else
16151     {
16152         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16153         {
16154             BADCODE("Bad IL");
16155         }
16156
16157         unsigned lclNum = info.compArgsCount + ilLclNum;
16158
16159         impLoadVar(lclNum, offset);
16160     }
16161 }
16162
16163 #ifdef _TARGET_ARM_
16164 /**************************************************************************************
16165  *
16166  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
16167  *  dst struct, because struct promotion will turn it into a float/double variable while
16168  *  the rhs will be an int/long variable. We don't code generate assignment of int into
16169  *  a float, but there is nothing that might prevent us from doing so. The tree however
16170  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
16171  *
16172  *  tmpNum - the lcl dst variable num that is a struct.
16173  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
16174  *  hClass - the type handle for the struct variable.
16175  *
16176  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
16177  *        however, we could do a codegen of transferring from int to float registers
16178  *        (transfer, not a cast.)
16179  *
16180  */
16181 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
16182 {
16183     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
16184     {
16185         int       hfaSlots = GetHfaCount(hClass);
16186         var_types hfaType  = GetHfaType(hClass);
16187
16188         // If we have varargs we morph the method's return type to be "int" irrespective of its original
16189         // type: struct/float at importer because the ABI calls out return in integer registers.
16190         // We don't want struct promotion to replace an expression like this:
16191         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
16192         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
16193         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
16194             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
16195         {
16196             // Make sure this struct type stays as struct so we can receive the call in a struct.
16197             lvaTable[tmpNum].lvIsMultiRegRet = true;
16198         }
16199     }
16200 }
16201 #endif // _TARGET_ARM_
16202
16203 //------------------------------------------------------------------------
16204 // impAssignSmallStructTypeToVar: ensure calls that return small structs whose
16205 //    sizes are not supported integral type sizes return values to temps.
16206 //
16207 // Arguments:
16208 //     op -- call returning a small struct in a register
16209 //     hClass -- class handle for struct
16210 //
16211 // Returns:
16212 //     Tree with reference to struct local to use as call return value.
16213 //
16214 // Remarks:
16215 //     The call will be spilled into a preceding statement.
16216 //     Currently handles struct returns for 3, 5, 6, and 7 byte structs.
16217
16218 GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16219 {
16220     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for small struct return."));
16221     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16222     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16223
16224     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of small struct returns.
16225     ret->gtFlags |= GTF_DONT_CSE;
16226
16227     return ret;
16228 }
16229
16230 #if FEATURE_MULTIREG_RET
16231 //------------------------------------------------------------------------
16232 // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
16233 //    registers return values to suitable temps.
16234 //
16235 // Arguments:
16236 //     op -- call returning a struct in a registers
16237 //     hClass -- class handle for struct
16238 //
16239 // Returns:
16240 //     Tree with reference to struct local to use as call return value.
16241
16242 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16243 {
16244     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
16245     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16246     GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16247
16248     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
16249     ret->gtFlags |= GTF_DONT_CSE;
16250
16251     assert(IsMultiRegReturnedType(hClass));
16252
16253     // Mark the var so that fields are not promoted and stay together.
16254     lvaTable[tmpNum].lvIsMultiRegRet = true;
16255
16256     return ret;
16257 }
16258 #endif // FEATURE_MULTIREG_RET
16259
16260 // do import for a return
16261 // returns false if inlining was aborted
16262 // opcode can be ret or call in the case of a tail.call
16263 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
16264 {
16265     if (tiVerificationNeeded)
16266     {
16267         verVerifyThisPtrInitialised();
16268
16269         unsigned expectedStack = 0;
16270         if (info.compRetType != TYP_VOID)
16271         {
16272             typeInfo tiVal = impStackTop().seTypeInfo;
16273             typeInfo tiDeclared =
16274                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
16275
16276             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
16277
16278             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
16279             expectedStack = 1;
16280         }
16281         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
16282     }
16283
16284 #ifdef DEBUG
16285     // If we are importing an inlinee and have GC ref locals we always
16286     // need to have a spill temp for the return value.  This temp
16287     // should have been set up in advance, over in fgFindBasicBlocks.
16288     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
16289     {
16290         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
16291     }
16292 #endif // DEBUG
16293
16294     GenTree*             op2       = nullptr;
16295     GenTree*             op1       = nullptr;
16296     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
16297
16298     if (info.compRetType != TYP_VOID)
16299     {
16300         StackEntry se = impPopStack();
16301         retClsHnd     = se.seTypeInfo.GetClassHandle();
16302         op2           = se.val;
16303
16304         if (!compIsForInlining())
16305         {
16306             impBashVarAddrsToI(op2);
16307             op2 = impImplicitIorI4Cast(op2, info.compRetType);
16308             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
16309             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
16310                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
16311                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
16312                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
16313                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
16314
16315 #ifdef DEBUG
16316             if (opts.compGcChecks && info.compRetType == TYP_REF)
16317             {
16318                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
16319                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
16320                 // one-return BB.
16321
16322                 assert(op2->gtType == TYP_REF);
16323
16324                 // confirm that the argument is a GC pointer (for debugging (GC stress))
16325                 GenTreeArgList* args = gtNewArgList(op2);
16326                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
16327
16328                 if (verbose)
16329                 {
16330                     printf("\ncompGcChecks tree:\n");
16331                     gtDispTree(op2);
16332                 }
16333             }
16334 #endif
16335         }
16336         else
16337         {
16338             // inlinee's stack should be empty now.
16339             assert(verCurrentState.esStackDepth == 0);
16340
16341 #ifdef DEBUG
16342             if (verbose)
16343             {
16344                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
16345                 gtDispTree(op2);
16346             }
16347 #endif
16348
16349             // Make sure the type matches the original call.
16350
16351             var_types returnType       = genActualType(op2->gtType);
16352             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
16353             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
16354             {
16355                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
16356             }
16357
16358             if (returnType != originalCallType)
16359             {
16360                 // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
16361                 if (((returnType == TYP_BYREF) && (originalCallType == TYP_I_IMPL)) ||
16362                     ((returnType == TYP_I_IMPL) && (originalCallType == TYP_BYREF)))
16363                 {
16364                     JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16365                             varTypeName(originalCallType));
16366                 }
16367                 else
16368                 {
16369                     JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16370                             varTypeName(originalCallType));
16371                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
16372                     return false;
16373                 }
16374             }
16375
16376             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
16377             // expression. At this point, retExpr could already be set if there are multiple
16378             // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
16379             // the other blocks already set it. If there is only a single return block,
16380             // retExpr shouldn't be set. However, this is not true if we reimport a block
16381             // with a return. In that case, retExpr will be set, then the block will be
16382             // reimported, but retExpr won't get cleared as part of setting the block to
16383             // be reimported. The reimported retExpr value should be the same, so even if
16384             // we don't unconditionally overwrite it, it shouldn't matter.
16385             if (info.compRetNativeType != TYP_STRUCT)
16386             {
16387                 // compRetNativeType is not TYP_STRUCT.
16388                 // This implies it could be either a scalar type or SIMD vector type or
16389                 // a struct type that can be normalized to a scalar type.
16390
16391                 if (varTypeIsStruct(info.compRetType))
16392                 {
16393                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
16394                     // adjust the type away from struct to integral
16395                     // and no normalizing
16396                     op2 = impFixupStructReturnType(op2, retClsHnd);
16397                 }
16398                 else
16399                 {
16400                     // Do we have to normalize?
16401                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
16402                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
16403                         fgCastNeeded(op2, fncRealRetType))
16404                     {
16405                         // Small-typed return values are normalized by the callee
16406                         op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
16407                     }
16408                 }
16409
16410                 if (fgNeedReturnSpillTemp())
16411                 {
16412                     assert(info.compRetNativeType != TYP_VOID &&
16413                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
16414
16415                     // If this method returns a ref type, track the actual types seen
16416                     // in the returns.
16417                     if (info.compRetType == TYP_REF)
16418                     {
16419                         bool                 isExact      = false;
16420                         bool                 isNonNull    = false;
16421                         CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
16422
16423                         if (impInlineInfo->retExpr == nullptr)
16424                         {
16425                             // This is the first return, so best known type is the type
16426                             // of this return value.
16427                             impInlineInfo->retExprClassHnd        = returnClsHnd;
16428                             impInlineInfo->retExprClassHndIsExact = isExact;
16429                         }
16430                         else if (impInlineInfo->retExprClassHnd != returnClsHnd)
16431                         {
16432                             // This return site type differs from earlier seen sites,
16433                             // so reset the info and we'll fall back to using the method's
16434                             // declared return type for the return spill temp.
16435                             impInlineInfo->retExprClassHnd        = nullptr;
16436                             impInlineInfo->retExprClassHndIsExact = false;
16437                         }
16438                     }
16439
16440                     // This is a bit of a workaround...
16441                     // If we are inlining a call that returns a struct, where the actual "native" return type is
16442                     // not a struct (for example, the struct is composed of exactly one int, and the native
16443                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
16444                     // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
16445                     // to the *native* return type), and at least one of the return blocks is the result of
16446                     // a call, then we have a problem. The situation is like this (from a failed test case):
16447                     //
16448                     // inliner:
16449                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
16450                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
16451                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
16452                     //
16453                     // inlinee:
16454                     //      ...
16455                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
16456                     //      ret
16457                     //      ...
16458                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
16459                     //      object&, class System.Func`1<!!0>)
16460                     //      ret
16461                     //
16462                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
16463                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
16464                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
16465                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
16466                     //
16467                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
16468                     // native return type, which is what it will be set to eventually. We generate the
16469                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
16470                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
16471
16472                     bool restoreType = false;
16473                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
16474                     {
16475                         noway_assert(op2->TypeGet() == TYP_STRUCT);
16476                         op2->gtType = info.compRetNativeType;
16477                         restoreType = true;
16478                     }
16479
16480                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16481                                      (unsigned)CHECK_SPILL_ALL);
16482
16483                     GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
16484
16485                     if (restoreType)
16486                     {
16487                         op2->gtType = TYP_STRUCT; // restore it to what it was
16488                     }
16489
16490                     op2 = tmpOp2;
16491
16492 #ifdef DEBUG
16493                     if (impInlineInfo->retExpr)
16494                     {
16495                         // Some other block(s) have seen the CEE_RET first.
16496                         // Better they spilled to the same temp.
16497                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
16498                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
16499                     }
16500 #endif
16501                 }
16502
16503 #ifdef DEBUG
16504                 if (verbose)
16505                 {
16506                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
16507                     gtDispTree(op2);
16508                 }
16509 #endif
16510
16511                 // Report the return expression
16512                 impInlineInfo->retExpr = op2;
16513             }
16514             else
16515             {
16516                 // compRetNativeType is TYP_STRUCT.
16517                 // This implies that struct return via RetBuf arg or multi-reg struct return
16518
16519                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
16520
16521                 // Assign the inlinee return into a spill temp.
16522                 // spill temp only exists if there are multiple return points
16523                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
16524                 {
16525                     // in this case we have to insert multiple struct copies to the temp
16526                     // and the retexpr is just the temp.
16527                     assert(info.compRetNativeType != TYP_VOID);
16528                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
16529
16530                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16531                                      (unsigned)CHECK_SPILL_ALL);
16532                 }
16533
16534 #if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
16535 #if defined(_TARGET_ARM_)
16536                 // TODO-ARM64-NYI: HFA
16537                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
16538                 // next ifdefs could be refactored in a single method with the ifdef inside.
16539                 if (IsHfa(retClsHnd))
16540                 {
16541 // Same as !IsHfa but just don't bother with impAssignStructPtr.
16542 #else  // defined(UNIX_AMD64_ABI)
16543                 ReturnTypeDesc retTypeDesc;
16544                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16545                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16546
16547                 if (retRegCount != 0)
16548                 {
16549                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16550                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16551                     // max allowed.)
16552                     assert(retRegCount == MAX_RET_REG_COUNT);
16553                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16554                     CLANG_FORMAT_COMMENT_ANCHOR;
16555 #endif // defined(UNIX_AMD64_ABI)
16556
16557                     if (fgNeedReturnSpillTemp())
16558                     {
16559                         if (!impInlineInfo->retExpr)
16560                         {
16561 #if defined(_TARGET_ARM_)
16562                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16563 #else  // defined(UNIX_AMD64_ABI)
16564                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16565                             impInlineInfo->retExpr =
16566                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16567 #endif // defined(UNIX_AMD64_ABI)
16568                         }
16569                     }
16570                     else
16571                     {
16572                         impInlineInfo->retExpr = op2;
16573                     }
16574                 }
16575                 else
16576 #elif defined(_TARGET_ARM64_)
16577                 ReturnTypeDesc retTypeDesc;
16578                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16579                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16580
16581                 if (retRegCount != 0)
16582                 {
16583                     assert(!iciCall->HasRetBufArg());
16584                     assert(retRegCount >= 2);
16585                     if (fgNeedReturnSpillTemp())
16586                     {
16587                         if (!impInlineInfo->retExpr)
16588                         {
16589                             // The inlinee compiler has figured out the type of the temp already. Use it here.
16590                             impInlineInfo->retExpr =
16591                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16592                         }
16593                     }
16594                     else
16595                     {
16596                         impInlineInfo->retExpr = op2;
16597                     }
16598                 }
16599                 else
16600 #endif // defined(_TARGET_ARM64_)
16601                 {
16602                     assert(iciCall->HasRetBufArg());
16603                     GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16604                     // spill temp only exists if there are multiple return points
16605                     if (fgNeedReturnSpillTemp())
16606                     {
16607                         // if this is the first return we have seen set the retExpr
16608                         if (!impInlineInfo->retExpr)
16609                         {
16610                             impInlineInfo->retExpr =
16611                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16612                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
16613                         }
16614                     }
16615                     else
16616                     {
16617                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16618                     }
16619                 }
16620             }
16621         }
16622     }
16623
16624     if (compIsForInlining())
16625     {
16626         return true;
16627     }
16628
16629     if (info.compRetType == TYP_VOID)
16630     {
16631         // return void
16632         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16633     }
16634     else if (info.compRetBuffArg != BAD_VAR_NUM)
16635     {
16636         // Assign value to return buff (first param)
16637         GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16638
16639         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16640         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16641
16642         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16643         CLANG_FORMAT_COMMENT_ANCHOR;
16644
16645 #if defined(_TARGET_AMD64_)
16646
16647         // x64 (System V and Win64) calling convention requires to
16648         // return the implicit return buffer explicitly (in RAX).
16649         // Change the return type to be BYREF.
16650         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16651 #else  // !defined(_TARGET_AMD64_)
16652         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16653         // In such case the return value of the function is changed to BYREF.
16654         // If profiler hook is not needed the return type of the function is TYP_VOID.
16655         if (compIsProfilerHookNeeded())
16656         {
16657             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16658         }
16659         else
16660         {
16661             // return void
16662             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16663         }
16664 #endif // !defined(_TARGET_AMD64_)
16665     }
16666     else if (varTypeIsStruct(info.compRetType))
16667     {
16668 #if !FEATURE_MULTIREG_RET
16669         // For both ARM architectures the HFA native types are maintained as structs.
16670         // Also on System V AMD64 the multireg structs returns are also left as structs.
16671         noway_assert(info.compRetNativeType != TYP_STRUCT);
16672 #endif
16673         op2 = impFixupStructReturnType(op2, retClsHnd);
16674         // return op2
16675         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16676     }
16677     else
16678     {
16679         // return op2
16680         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16681     }
16682
16683     // We must have imported a tailcall and jumped to RET
16684     if (prefixFlags & PREFIX_TAILCALL)
16685     {
16686 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16687         // Jit64 compat:
16688         // This cannot be asserted on Amd64 since we permit the following IL pattern:
16689         //      tail.call
16690         //      pop
16691         //      ret
16692         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16693 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16694
16695         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16696
16697         // impImportCall() would have already appended TYP_VOID calls
16698         if (info.compRetType == TYP_VOID)
16699         {
16700             return true;
16701         }
16702     }
16703
16704     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16705 #ifdef DEBUG
16706     // Remember at which BC offset the tree was finished
16707     impNoteLastILoffs();
16708 #endif
16709     return true;
16710 }
16711
16712 /*****************************************************************************
16713  *  Mark the block as unimported.
16714  *  Note that the caller is responsible for calling impImportBlockPending(),
16715  *  with the appropriate stack-state
16716  */
16717
16718 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16719 {
16720 #ifdef DEBUG
16721     if (verbose && (block->bbFlags & BBF_IMPORTED))
16722     {
16723         printf("\n" FMT_BB " will be reimported\n", block->bbNum);
16724     }
16725 #endif
16726
16727     block->bbFlags &= ~BBF_IMPORTED;
16728 }
16729
16730 /*****************************************************************************
16731  *  Mark the successors of the given block as unimported.
16732  *  Note that the caller is responsible for calling impImportBlockPending()
16733  *  for all the successors, with the appropriate stack-state.
16734  */
16735
16736 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16737 {
16738     const unsigned numSuccs = block->NumSucc();
16739     for (unsigned i = 0; i < numSuccs; i++)
16740     {
16741         impReimportMarkBlock(block->GetSucc(i));
16742     }
16743 }
16744
16745 /*****************************************************************************
16746  *
16747  *  Filter wrapper to handle only passed in exception code
16748  *  from it).
16749  */
16750
16751 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16752 {
16753     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16754     {
16755         return EXCEPTION_EXECUTE_HANDLER;
16756     }
16757
16758     return EXCEPTION_CONTINUE_SEARCH;
16759 }
16760
16761 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16762 {
16763     assert(block->hasTryIndex());
16764     assert(!compIsForInlining());
16765
16766     unsigned  tryIndex = block->getTryIndex();
16767     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
16768
16769     if (isTryStart)
16770     {
16771         assert(block->bbFlags & BBF_TRY_BEG);
16772
16773         // The Stack must be empty
16774         //
16775         if (block->bbStkDepth != 0)
16776         {
16777             BADCODE("Evaluation stack must be empty on entry into a try block");
16778         }
16779     }
16780
16781     // Save the stack contents, we'll need to restore it later
16782     //
16783     SavedStack blockState;
16784     impSaveStackState(&blockState, false);
16785
16786     while (HBtab != nullptr)
16787     {
16788         if (isTryStart)
16789         {
16790             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16791             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16792             //
16793             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16794             {
16795                 // We  trigger an invalid program exception here unless we have a try/fault region.
16796                 //
16797                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16798                 {
16799                     BADCODE(
16800                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16801                 }
16802                 else
16803                 {
16804                     // Allow a try/fault region to proceed.
16805                     assert(HBtab->HasFaultHandler());
16806                 }
16807             }
16808
16809             /* Recursively process the handler block */
16810             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16811
16812             //  Construct the proper verification stack state
16813             //   either empty or one that contains just
16814             //   the Exception Object that we are dealing with
16815             //
16816             verCurrentState.esStackDepth = 0;
16817
16818             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16819             {
16820                 CORINFO_CLASS_HANDLE clsHnd;
16821
16822                 if (HBtab->HasFilter())
16823                 {
16824                     clsHnd = impGetObjectClass();
16825                 }
16826                 else
16827                 {
16828                     CORINFO_RESOLVED_TOKEN resolvedToken;
16829
16830                     resolvedToken.tokenContext = impTokenLookupContextHandle;
16831                     resolvedToken.tokenScope   = info.compScopeHnd;
16832                     resolvedToken.token        = HBtab->ebdTyp;
16833                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
16834                     info.compCompHnd->resolveToken(&resolvedToken);
16835
16836                     clsHnd = resolvedToken.hClass;
16837                 }
16838
16839                 // push catch arg the stack, spill to a temp if necessary
16840                 // Note: can update HBtab->ebdHndBeg!
16841                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16842             }
16843
16844             // Queue up the handler for importing
16845             //
16846             impImportBlockPending(hndBegBB);
16847
16848             if (HBtab->HasFilter())
16849             {
16850                 /* @VERIFICATION : Ideally the end of filter state should get
16851                    propagated to the catch handler, this is an incompleteness,
16852                    but is not a security/compliance issue, since the only
16853                    interesting state is the 'thisInit' state.
16854                    */
16855
16856                 verCurrentState.esStackDepth = 0;
16857
16858                 BasicBlock* filterBB = HBtab->ebdFilter;
16859
16860                 // push catch arg the stack, spill to a temp if necessary
16861                 // Note: can update HBtab->ebdFilter!
16862                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16863                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16864
16865                 impImportBlockPending(filterBB);
16866             }
16867         }
16868         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16869         {
16870             /* Recursively process the handler block */
16871
16872             verCurrentState.esStackDepth = 0;
16873
16874             // Queue up the fault handler for importing
16875             //
16876             impImportBlockPending(HBtab->ebdHndBeg);
16877         }
16878
16879         // Now process our enclosing try index (if any)
16880         //
16881         tryIndex = HBtab->ebdEnclosingTryIndex;
16882         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16883         {
16884             HBtab = nullptr;
16885         }
16886         else
16887         {
16888             HBtab = ehGetDsc(tryIndex);
16889         }
16890     }
16891
16892     // Restore the stack contents
16893     impRestoreStackState(&blockState);
16894 }
16895
16896 //***************************************************************
16897 // Import the instructions for the given basic block.  Perform
16898 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
16899 // time, or whose verification pre-state is changed.
16900
16901 #ifdef _PREFAST_
16902 #pragma warning(push)
16903 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16904 #endif
16905 void Compiler::impImportBlock(BasicBlock* block)
16906 {
16907     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16908     // handle them specially. In particular, there is no IL to import for them, but we do need
16909     // to mark them as imported and put their successors on the pending import list.
16910     if (block->bbFlags & BBF_INTERNAL)
16911     {
16912         JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
16913         block->bbFlags |= BBF_IMPORTED;
16914
16915         const unsigned numSuccs = block->NumSucc();
16916         for (unsigned i = 0; i < numSuccs; i++)
16917         {
16918             impImportBlockPending(block->GetSucc(i));
16919         }
16920
16921         return;
16922     }
16923
16924     bool markImport;
16925
16926     assert(block);
16927
16928     /* Make the block globaly available */
16929
16930     compCurBB = block;
16931
16932 #ifdef DEBUG
16933     /* Initialize the debug variables */
16934     impCurOpcName = "unknown";
16935     impCurOpcOffs = block->bbCodeOffs;
16936 #endif
16937
16938     /* Set the current stack state to the merged result */
16939     verResetCurrentState(block, &verCurrentState);
16940
16941     /* Now walk the code and import the IL into GenTrees */
16942
16943     struct FilterVerificationExceptionsParam
16944     {
16945         Compiler*   pThis;
16946         BasicBlock* block;
16947     };
16948     FilterVerificationExceptionsParam param;
16949
16950     param.pThis = this;
16951     param.block = block;
16952
16953     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
16954     {
16955         /* @VERIFICATION : For now, the only state propagation from try
16956            to it's handler is "thisInit" state (stack is empty at start of try).
16957            In general, for state that we track in verification, we need to
16958            model the possibility that an exception might happen at any IL
16959            instruction, so we really need to merge all states that obtain
16960            between IL instructions in a try block into the start states of
16961            all handlers.
16962
16963            However we do not allow the 'this' pointer to be uninitialized when
16964            entering most kinds try regions (only try/fault are allowed to have
16965            an uninitialized this pointer on entry to the try)
16966
16967            Fortunately, the stack is thrown away when an exception
16968            leads to a handler, so we don't have to worry about that.
16969            We DO, however, have to worry about the "thisInit" state.
16970            But only for the try/fault case.
16971
16972            The only allowed transition is from TIS_Uninit to TIS_Init.
16973
16974            So for a try/fault region for the fault handler block
16975            we will merge the start state of the try begin
16976            and the post-state of each block that is part of this try region
16977         */
16978
16979         // merge the start state of the try begin
16980         //
16981         if (pParam->block->bbFlags & BBF_TRY_BEG)
16982         {
16983             pParam->pThis->impVerifyEHBlock(pParam->block, true);
16984         }
16985
16986         pParam->pThis->impImportBlockCode(pParam->block);
16987
16988         // As discussed above:
16989         // merge the post-state of each block that is part of this try region
16990         //
16991         if (pParam->block->hasTryIndex())
16992         {
16993             pParam->pThis->impVerifyEHBlock(pParam->block, false);
16994         }
16995     }
16996     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16997     {
16998         verHandleVerificationFailure(block DEBUGARG(false));
16999     }
17000     PAL_ENDTRY
17001
17002     if (compDonotInline())
17003     {
17004         return;
17005     }
17006
17007     assert(!compDonotInline());
17008
17009     markImport = false;
17010
17011 SPILLSTACK:
17012
17013     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
17014     bool        reimportSpillClique = false;
17015     BasicBlock* tgtBlock            = nullptr;
17016
17017     /* If the stack is non-empty, we might have to spill its contents */
17018
17019     if (verCurrentState.esStackDepth != 0)
17020     {
17021         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
17022                                   // on the stack, its lifetime is hard to determine, simply
17023                                   // don't reuse such temps.
17024
17025         GenTree* addStmt = nullptr;
17026
17027         /* Do the successors of 'block' have any other predecessors ?
17028            We do not want to do some of the optimizations related to multiRef
17029            if we can reimport blocks */
17030
17031         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
17032
17033         switch (block->bbJumpKind)
17034         {
17035             case BBJ_COND:
17036
17037                 /* Temporarily remove the 'jtrue' from the end of the tree list */
17038
17039                 assert(impTreeLast);
17040                 assert(impTreeLast->gtOper == GT_STMT);
17041                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
17042
17043                 addStmt     = impTreeLast;
17044                 impTreeLast = impTreeLast->gtPrev;
17045
17046                 /* Note if the next block has more than one ancestor */
17047
17048                 multRef |= block->bbNext->bbRefs;
17049
17050                 /* Does the next block have temps assigned? */
17051
17052                 baseTmp  = block->bbNext->bbStkTempsIn;
17053                 tgtBlock = block->bbNext;
17054
17055                 if (baseTmp != NO_BASE_TMP)
17056                 {
17057                     break;
17058                 }
17059
17060                 /* Try the target of the jump then */
17061
17062                 multRef |= block->bbJumpDest->bbRefs;
17063                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
17064                 tgtBlock = block->bbJumpDest;
17065                 break;
17066
17067             case BBJ_ALWAYS:
17068                 multRef |= block->bbJumpDest->bbRefs;
17069                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
17070                 tgtBlock = block->bbJumpDest;
17071                 break;
17072
17073             case BBJ_NONE:
17074                 multRef |= block->bbNext->bbRefs;
17075                 baseTmp  = block->bbNext->bbStkTempsIn;
17076                 tgtBlock = block->bbNext;
17077                 break;
17078
17079             case BBJ_SWITCH:
17080
17081                 BasicBlock** jmpTab;
17082                 unsigned     jmpCnt;
17083
17084                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
17085
17086                 assert(impTreeLast);
17087                 assert(impTreeLast->gtOper == GT_STMT);
17088                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
17089
17090                 addStmt     = impTreeLast;
17091                 impTreeLast = impTreeLast->gtPrev;
17092
17093                 jmpCnt = block->bbJumpSwt->bbsCount;
17094                 jmpTab = block->bbJumpSwt->bbsDstTab;
17095
17096                 do
17097                 {
17098                     tgtBlock = (*jmpTab);
17099
17100                     multRef |= tgtBlock->bbRefs;
17101
17102                     // Thanks to spill cliques, we should have assigned all or none
17103                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
17104                     baseTmp = tgtBlock->bbStkTempsIn;
17105                     if (multRef > 1)
17106                     {
17107                         break;
17108                     }
17109                 } while (++jmpTab, --jmpCnt);
17110
17111                 break;
17112
17113             case BBJ_CALLFINALLY:
17114             case BBJ_EHCATCHRET:
17115             case BBJ_RETURN:
17116             case BBJ_EHFINALLYRET:
17117             case BBJ_EHFILTERRET:
17118             case BBJ_THROW:
17119                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
17120                 break;
17121
17122             default:
17123                 noway_assert(!"Unexpected bbJumpKind");
17124                 break;
17125         }
17126
17127         assert(multRef >= 1);
17128
17129         /* Do we have a base temp number? */
17130
17131         bool newTemps = (baseTmp == NO_BASE_TMP);
17132
17133         if (newTemps)
17134         {
17135             /* Grab enough temps for the whole stack */
17136             baseTmp = impGetSpillTmpBase(block);
17137         }
17138
17139         /* Spill all stack entries into temps */
17140         unsigned level, tempNum;
17141
17142         JITDUMP("\nSpilling stack entries into temps\n");
17143         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
17144         {
17145             GenTree* tree = verCurrentState.esStack[level].val;
17146
17147             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
17148                the other. This should merge to a byref in unverifiable code.
17149                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
17150                successor would be imported assuming there was a TYP_I_IMPL on
17151                the stack. Thus the value would not get GC-tracked. Hence,
17152                change the temp to TYP_BYREF and reimport the successors.
17153                Note: We should only allow this in unverifiable code.
17154             */
17155             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
17156             {
17157                 lvaTable[tempNum].lvType = TYP_BYREF;
17158                 impReimportMarkSuccessors(block);
17159                 markImport = true;
17160             }
17161
17162 #ifdef _TARGET_64BIT_
17163             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
17164             {
17165                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
17166                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
17167                 {
17168                     // Merge the current state into the entry state of block;
17169                     // the call to verMergeEntryStates must have changed
17170                     // the entry state of the block by merging the int local var
17171                     // and the native-int stack entry.
17172                     bool changed = false;
17173                     if (verMergeEntryStates(tgtBlock, &changed))
17174                     {
17175                         impRetypeEntryStateTemps(tgtBlock);
17176                         impReimportBlockPending(tgtBlock);
17177                         assert(changed);
17178                     }
17179                     else
17180                     {
17181                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
17182                         break;
17183                     }
17184                 }
17185
17186                 // Some other block in the spill clique set this to "int", but now we have "native int".
17187                 // Change the type and go back to re-import any blocks that used the wrong type.
17188                 lvaTable[tempNum].lvType = TYP_I_IMPL;
17189                 reimportSpillClique      = true;
17190             }
17191             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
17192             {
17193                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
17194                 // Insert a sign-extension to "native int" so we match the clique.
17195                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17196             }
17197
17198             // Consider the case where one branch left a 'byref' on the stack and the other leaves
17199             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
17200             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
17201             // behavior instead of asserting and then generating bad code (where we save/restore the
17202             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
17203             // imported already, we need to change the type of the local and reimport the spill clique.
17204             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
17205             // the 'byref' size.
17206             if (!tiVerificationNeeded)
17207             {
17208                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
17209                 {
17210                     // Some other block in the spill clique set this to "int", but now we have "byref".
17211                     // Change the type and go back to re-import any blocks that used the wrong type.
17212                     lvaTable[tempNum].lvType = TYP_BYREF;
17213                     reimportSpillClique      = true;
17214                 }
17215                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
17216                 {
17217                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
17218                     // Insert a sign-extension to "native int" so we match the clique size.
17219                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17220                 }
17221             }
17222 #endif // _TARGET_64BIT_
17223
17224             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
17225             {
17226                 // Some other block in the spill clique set this to "float", but now we have "double".
17227                 // Change the type and go back to re-import any blocks that used the wrong type.
17228                 lvaTable[tempNum].lvType = TYP_DOUBLE;
17229                 reimportSpillClique      = true;
17230             }
17231             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
17232             {
17233                 // Spill clique has decided this should be "double", but this block only pushes a "float".
17234                 // Insert a cast to "double" so we match the clique.
17235                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
17236             }
17237
17238             /* If addStmt has a reference to tempNum (can only happen if we
17239                are spilling to the temps already used by a previous block),
17240                we need to spill addStmt */
17241
17242             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
17243             {
17244                 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
17245
17246                 if (addTree->gtOper == GT_JTRUE)
17247                 {
17248                     GenTree* relOp = addTree->gtOp.gtOp1;
17249                     assert(relOp->OperIsCompare());
17250
17251                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
17252
17253                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
17254                     {
17255                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
17256                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
17257                         type              = genActualType(lvaTable[temp].TypeGet());
17258                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
17259                     }
17260
17261                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
17262                     {
17263                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
17264                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
17265                         type              = genActualType(lvaTable[temp].TypeGet());
17266                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
17267                     }
17268                 }
17269                 else
17270                 {
17271                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
17272
17273                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
17274                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
17275                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
17276                 }
17277             }
17278
17279             /* Spill the stack entry, and replace with the temp */
17280
17281             if (!impSpillStackEntry(level, tempNum
17282 #ifdef DEBUG
17283                                     ,
17284                                     true, "Spill Stack Entry"
17285 #endif
17286                                     ))
17287             {
17288                 if (markImport)
17289                 {
17290                     BADCODE("bad stack state");
17291                 }
17292
17293                 // Oops. Something went wrong when spilling. Bad code.
17294                 verHandleVerificationFailure(block DEBUGARG(true));
17295
17296                 goto SPILLSTACK;
17297             }
17298         }
17299
17300         /* Put back the 'jtrue'/'switch' if we removed it earlier */
17301
17302         if (addStmt)
17303         {
17304             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
17305         }
17306     }
17307
17308     // Some of the append/spill logic works on compCurBB
17309
17310     assert(compCurBB == block);
17311
17312     /* Save the tree list in the block */
17313     impEndTreeList(block);
17314
17315     // impEndTreeList sets BBF_IMPORTED on the block
17316     // We do *NOT* want to set it later than this because
17317     // impReimportSpillClique might clear it if this block is both a
17318     // predecessor and successor in the current spill clique
17319     assert(block->bbFlags & BBF_IMPORTED);
17320
17321     // If we had a int/native int, or float/double collision, we need to re-import
17322     if (reimportSpillClique)
17323     {
17324         // This will re-import all the successors of block (as well as each of their predecessors)
17325         impReimportSpillClique(block);
17326
17327         // For blocks that haven't been imported yet, we still need to mark them as pending import.
17328         const unsigned numSuccs = block->NumSucc();
17329         for (unsigned i = 0; i < numSuccs; i++)
17330         {
17331             BasicBlock* succ = block->GetSucc(i);
17332             if ((succ->bbFlags & BBF_IMPORTED) == 0)
17333             {
17334                 impImportBlockPending(succ);
17335             }
17336         }
17337     }
17338     else // the normal case
17339     {
17340         // otherwise just import the successors of block
17341
17342         /* Does this block jump to any other blocks? */
17343         const unsigned numSuccs = block->NumSucc();
17344         for (unsigned i = 0; i < numSuccs; i++)
17345         {
17346             impImportBlockPending(block->GetSucc(i));
17347         }
17348     }
17349 }
17350 #ifdef _PREFAST_
17351 #pragma warning(pop)
17352 #endif
17353
17354 /*****************************************************************************/
17355 //
17356 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17357 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17358 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
17359 // (its "pre-state").
17360
17361 void Compiler::impImportBlockPending(BasicBlock* block)
17362 {
17363 #ifdef DEBUG
17364     if (verbose)
17365     {
17366         printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
17367     }
17368 #endif
17369
17370     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
17371     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
17372     // (When we're doing verification, we always attempt the merge to detect verification errors.)
17373
17374     // If the block has not been imported, add to pending set.
17375     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
17376
17377     // Initialize bbEntryState just the first time we try to add this block to the pending list
17378     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
17379     // We use NULL to indicate the 'common' state to avoid memory allocation
17380     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
17381         (impGetPendingBlockMember(block) == 0))
17382     {
17383         verInitBBEntryState(block, &verCurrentState);
17384         assert(block->bbStkDepth == 0);
17385         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
17386         assert(addToPending);
17387         assert(impGetPendingBlockMember(block) == 0);
17388     }
17389     else
17390     {
17391         // The stack should have the same height on entry to the block from all its predecessors.
17392         if (block->bbStkDepth != verCurrentState.esStackDepth)
17393         {
17394 #ifdef DEBUG
17395             char buffer[400];
17396             sprintf_s(buffer, sizeof(buffer),
17397                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
17398                       "Previous depth was %d, current depth is %d",
17399                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
17400                       verCurrentState.esStackDepth);
17401             buffer[400 - 1] = 0;
17402             NO_WAY(buffer);
17403 #else
17404             NO_WAY("Block entered with different stack depths");
17405 #endif
17406         }
17407
17408         // Additionally, if we need to verify, merge the verification state.
17409         if (tiVerificationNeeded)
17410         {
17411             // Merge the current state into the entry state of block; if this does not change the entry state
17412             // by merging, do not add the block to the pending-list.
17413             bool changed = false;
17414             if (!verMergeEntryStates(block, &changed))
17415             {
17416                 block->bbFlags |= BBF_FAILED_VERIFICATION;
17417                 addToPending = true; // We will pop it off, and check the flag set above.
17418             }
17419             else if (changed)
17420             {
17421                 addToPending = true;
17422
17423                 JITDUMP("Adding " FMT_BB " to pending set due to new merge result\n", block->bbNum);
17424             }
17425         }
17426
17427         if (!addToPending)
17428         {
17429             return;
17430         }
17431
17432         if (block->bbStkDepth > 0)
17433         {
17434             // We need to fix the types of any spill temps that might have changed:
17435             //   int->native int, float->double, int->byref, etc.
17436             impRetypeEntryStateTemps(block);
17437         }
17438
17439         // OK, we must add to the pending list, if it's not already in it.
17440         if (impGetPendingBlockMember(block) != 0)
17441         {
17442             return;
17443         }
17444     }
17445
17446     // Get an entry to add to the pending list
17447
17448     PendingDsc* dsc;
17449
17450     if (impPendingFree)
17451     {
17452         // We can reuse one of the freed up dscs.
17453         dsc            = impPendingFree;
17454         impPendingFree = dsc->pdNext;
17455     }
17456     else
17457     {
17458         // We have to create a new dsc
17459         dsc = new (this, CMK_Unknown) PendingDsc;
17460     }
17461
17462     dsc->pdBB                 = block;
17463     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
17464     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
17465
17466     // Save the stack trees for later
17467
17468     if (verCurrentState.esStackDepth)
17469     {
17470         impSaveStackState(&dsc->pdSavedStack, false);
17471     }
17472
17473     // Add the entry to the pending list
17474
17475     dsc->pdNext    = impPendingList;
17476     impPendingList = dsc;
17477     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17478
17479     // Various assertions require us to now to consider the block as not imported (at least for
17480     // the final time...)
17481     block->bbFlags &= ~BBF_IMPORTED;
17482
17483 #ifdef DEBUG
17484     if (verbose && 0)
17485     {
17486         printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17487     }
17488 #endif
17489 }
17490
17491 /*****************************************************************************/
17492 //
17493 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17494 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17495 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
17496
17497 void Compiler::impReimportBlockPending(BasicBlock* block)
17498 {
17499     JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
17500
17501     assert(block->bbFlags & BBF_IMPORTED);
17502
17503     // OK, we must add to the pending list, if it's not already in it.
17504     if (impGetPendingBlockMember(block) != 0)
17505     {
17506         return;
17507     }
17508
17509     // Get an entry to add to the pending list
17510
17511     PendingDsc* dsc;
17512
17513     if (impPendingFree)
17514     {
17515         // We can reuse one of the freed up dscs.
17516         dsc            = impPendingFree;
17517         impPendingFree = dsc->pdNext;
17518     }
17519     else
17520     {
17521         // We have to create a new dsc
17522         dsc = new (this, CMK_ImpStack) PendingDsc;
17523     }
17524
17525     dsc->pdBB = block;
17526
17527     if (block->bbEntryState)
17528     {
17529         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
17530         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17531         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17532     }
17533     else
17534     {
17535         dsc->pdThisPtrInit        = TIS_Bottom;
17536         dsc->pdSavedStack.ssDepth = 0;
17537         dsc->pdSavedStack.ssTrees = nullptr;
17538     }
17539
17540     // Add the entry to the pending list
17541
17542     dsc->pdNext    = impPendingList;
17543     impPendingList = dsc;
17544     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17545
17546     // Various assertions require us to now to consider the block as not imported (at least for
17547     // the final time...)
17548     block->bbFlags &= ~BBF_IMPORTED;
17549
17550 #ifdef DEBUG
17551     if (verbose && 0)
17552     {
17553         printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17554     }
17555 #endif
17556 }
17557
17558 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17559 {
17560     if (comp->impBlockListNodeFreeList == nullptr)
17561     {
17562         return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
17563     }
17564     else
17565     {
17566         BlockListNode* res             = comp->impBlockListNodeFreeList;
17567         comp->impBlockListNodeFreeList = res->m_next;
17568         return res;
17569     }
17570 }
17571
17572 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17573 {
17574     node->m_next             = impBlockListNodeFreeList;
17575     impBlockListNodeFreeList = node;
17576 }
17577
17578 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17579 {
17580     bool toDo = true;
17581
17582     noway_assert(!fgComputePredsDone);
17583     if (!fgCheapPredsValid)
17584     {
17585         fgComputeCheapPreds();
17586     }
17587
17588     BlockListNode* succCliqueToDo = nullptr;
17589     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17590     while (toDo)
17591     {
17592         toDo = false;
17593         // Look at the successors of every member of the predecessor to-do list.
17594         while (predCliqueToDo != nullptr)
17595         {
17596             BlockListNode* node = predCliqueToDo;
17597             predCliqueToDo      = node->m_next;
17598             BasicBlock* blk     = node->m_blk;
17599             FreeBlockListNode(node);
17600
17601             const unsigned numSuccs = blk->NumSucc();
17602             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17603             {
17604                 BasicBlock* succ = blk->GetSucc(succNum);
17605                 // If it's not already in the clique, add it, and also add it
17606                 // as a member of the successor "toDo" set.
17607                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17608                 {
17609                     callback->Visit(SpillCliqueSucc, succ);
17610                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17611                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17612                     toDo           = true;
17613                 }
17614             }
17615         }
17616         // Look at the predecessors of every member of the successor to-do list.
17617         while (succCliqueToDo != nullptr)
17618         {
17619             BlockListNode* node = succCliqueToDo;
17620             succCliqueToDo      = node->m_next;
17621             BasicBlock* blk     = node->m_blk;
17622             FreeBlockListNode(node);
17623
17624             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17625             {
17626                 BasicBlock* predBlock = pred->block;
17627                 // If it's not already in the clique, add it, and also add it
17628                 // as a member of the predecessor "toDo" set.
17629                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17630                 {
17631                     callback->Visit(SpillCliquePred, predBlock);
17632                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17633                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17634                     toDo           = true;
17635                 }
17636             }
17637         }
17638     }
17639
17640     // If this fails, it means we didn't walk the spill clique properly and somehow managed
17641     // miss walking back to include the predecessor we started from.
17642     // This most likely cause: missing or out of date bbPreds
17643     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17644 }
17645
17646 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17647 {
17648     if (predOrSucc == SpillCliqueSucc)
17649     {
17650         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17651         blk->bbStkTempsIn = m_baseTmp;
17652     }
17653     else
17654     {
17655         assert(predOrSucc == SpillCliquePred);
17656         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17657         blk->bbStkTempsOut = m_baseTmp;
17658     }
17659 }
17660
17661 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17662 {
17663     // For Preds we could be a little smarter and just find the existing store
17664     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17665     // just re-import the whole block (just like we do for successors)
17666
17667     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17668     {
17669         // If we haven't imported this block and we're not going to (because it isn't on
17670         // the pending list) then just ignore it for now.
17671
17672         // This block has either never been imported (EntryState == NULL) or it failed
17673         // verification. Neither state requires us to force it to be imported now.
17674         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17675         return;
17676     }
17677
17678     // For successors we have a valid verCurrentState, so just mark them for reimport
17679     // the 'normal' way
17680     // Unlike predecessors, we *DO* need to reimport the current block because the
17681     // initial import had the wrong entry state types.
17682     // Similarly, blocks that are currently on the pending list, still need to call
17683     // impImportBlockPending to fixup their entry state.
17684     if (predOrSucc == SpillCliqueSucc)
17685     {
17686         m_pComp->impReimportMarkBlock(blk);
17687
17688         // Set the current stack state to that of the blk->bbEntryState
17689         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17690         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17691
17692         m_pComp->impImportBlockPending(blk);
17693     }
17694     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17695     {
17696         // As described above, we are only visiting predecessors so they can
17697         // add the appropriate casts, since we have already done that for the current
17698         // block, it does not need to be reimported.
17699         // Nor do we need to reimport blocks that are still pending, but not yet
17700         // imported.
17701         //
17702         // For predecessors, we have no state to seed the EntryState, so we just have
17703         // to assume the existing one is correct.
17704         // If the block is also a successor, it will get the EntryState properly
17705         // updated when it is visited as a successor in the above "if" block.
17706         assert(predOrSucc == SpillCliquePred);
17707         m_pComp->impReimportBlockPending(blk);
17708     }
17709 }
17710
17711 // Re-type the incoming lclVar nodes to match the varDsc.
17712 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17713 {
17714     if (blk->bbEntryState != nullptr)
17715     {
17716         EntryState* es = blk->bbEntryState;
17717         for (unsigned level = 0; level < es->esStackDepth; level++)
17718         {
17719             GenTree* tree = es->esStack[level].val;
17720             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17721             {
17722                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17723                 noway_assert(lclNum < lvaCount);
17724                 LclVarDsc* varDsc              = lvaTable + lclNum;
17725                 es->esStack[level].val->gtType = varDsc->TypeGet();
17726             }
17727         }
17728     }
17729 }
17730
17731 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17732 {
17733     if (block->bbStkTempsOut != NO_BASE_TMP)
17734     {
17735         return block->bbStkTempsOut;
17736     }
17737
17738 #ifdef DEBUG
17739     if (verbose)
17740     {
17741         printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
17742     }
17743 #endif // DEBUG
17744
17745     // Otherwise, choose one, and propagate to all members of the spill clique.
17746     // Grab enough temps for the whole stack.
17747     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17748     SetSpillTempsBase callback(baseTmp);
17749
17750     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17751     // to one spill clique, and similarly can only be the sucessor to one spill clique
17752     impWalkSpillCliqueFromPred(block, &callback);
17753
17754     return baseTmp;
17755 }
17756
17757 void Compiler::impReimportSpillClique(BasicBlock* block)
17758 {
17759 #ifdef DEBUG
17760     if (verbose)
17761     {
17762         printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
17763     }
17764 #endif // DEBUG
17765
17766     // If we get here, it is because this block is already part of a spill clique
17767     // and one predecessor had an outgoing live stack slot of type int, and this
17768     // block has an outgoing live stack slot of type native int.
17769     // We need to reset these before traversal because they have already been set
17770     // by the previous walk to determine all the members of the spill clique.
17771     impInlineRoot()->impSpillCliquePredMembers.Reset();
17772     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17773
17774     ReimportSpillClique callback(this);
17775
17776     impWalkSpillCliqueFromPred(block, &callback);
17777 }
17778
17779 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17780 // a copy of "srcState", cloning tree pointers as required.
17781 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17782 {
17783     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17784     {
17785         block->bbEntryState = nullptr;
17786         return;
17787     }
17788
17789     block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
17790
17791     // block->bbEntryState.esRefcount = 1;
17792
17793     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
17794     block->bbEntryState->thisInitialized = TIS_Bottom;
17795
17796     if (srcState->esStackDepth > 0)
17797     {
17798         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17799         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17800
17801         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17802         for (unsigned level = 0; level < srcState->esStackDepth; level++)
17803         {
17804             GenTree* tree                           = srcState->esStack[level].val;
17805             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17806         }
17807     }
17808
17809     if (verTrackObjCtorInitState)
17810     {
17811         verSetThisInit(block, srcState->thisInitialized);
17812     }
17813
17814     return;
17815 }
17816
17817 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17818 {
17819     assert(tis != TIS_Bottom); // Precondition.
17820     if (block->bbEntryState == nullptr)
17821     {
17822         block->bbEntryState = new (this, CMK_Unknown) EntryState();
17823     }
17824
17825     block->bbEntryState->thisInitialized = tis;
17826 }
17827
17828 /*
17829  * Resets the current state to the state at the start of the basic block
17830  */
17831 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17832 {
17833
17834     if (block->bbEntryState == nullptr)
17835     {
17836         destState->esStackDepth    = 0;
17837         destState->thisInitialized = TIS_Bottom;
17838         return;
17839     }
17840
17841     destState->esStackDepth = block->bbEntryState->esStackDepth;
17842
17843     if (destState->esStackDepth > 0)
17844     {
17845         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17846
17847         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17848     }
17849
17850     destState->thisInitialized = block->bbThisOnEntry();
17851
17852     return;
17853 }
17854
17855 ThisInitState BasicBlock::bbThisOnEntry()
17856 {
17857     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17858 }
17859
17860 unsigned BasicBlock::bbStackDepthOnEntry()
17861 {
17862     return (bbEntryState ? bbEntryState->esStackDepth : 0);
17863 }
17864
17865 void BasicBlock::bbSetStack(void* stackBuffer)
17866 {
17867     assert(bbEntryState);
17868     assert(stackBuffer);
17869     bbEntryState->esStack = (StackEntry*)stackBuffer;
17870 }
17871
17872 StackEntry* BasicBlock::bbStackOnEntry()
17873 {
17874     assert(bbEntryState);
17875     return bbEntryState->esStack;
17876 }
17877
17878 void Compiler::verInitCurrentState()
17879 {
17880     verTrackObjCtorInitState        = FALSE;
17881     verCurrentState.thisInitialized = TIS_Bottom;
17882
17883     if (tiVerificationNeeded)
17884     {
17885         // Track this ptr initialization
17886         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17887         {
17888             verTrackObjCtorInitState        = TRUE;
17889             verCurrentState.thisInitialized = TIS_Uninit;
17890         }
17891     }
17892
17893     // initialize stack info
17894
17895     verCurrentState.esStackDepth = 0;
17896     assert(verCurrentState.esStack != nullptr);
17897
17898     // copy current state to entry state of first BB
17899     verInitBBEntryState(fgFirstBB, &verCurrentState);
17900 }
17901
17902 Compiler* Compiler::impInlineRoot()
17903 {
17904     if (impInlineInfo == nullptr)
17905     {
17906         return this;
17907     }
17908     else
17909     {
17910         return impInlineInfo->InlineRoot;
17911     }
17912 }
17913
17914 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17915 {
17916     if (predOrSucc == SpillCliquePred)
17917     {
17918         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17919     }
17920     else
17921     {
17922         assert(predOrSucc == SpillCliqueSucc);
17923         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17924     }
17925 }
17926
17927 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17928 {
17929     if (predOrSucc == SpillCliquePred)
17930     {
17931         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17932     }
17933     else
17934     {
17935         assert(predOrSucc == SpillCliqueSucc);
17936         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17937     }
17938 }
17939
17940 /*****************************************************************************
17941  *
17942  *  Convert the instrs ("import") into our internal format (trees). The
17943  *  basic flowgraph has already been constructed and is passed in.
17944  */
17945
17946 void Compiler::impImport(BasicBlock* method)
17947 {
17948 #ifdef DEBUG
17949     if (verbose)
17950     {
17951         printf("*************** In impImport() for %s\n", info.compFullName);
17952     }
17953 #endif
17954
17955     Compiler* inlineRoot = impInlineRoot();
17956
17957     if (info.compMaxStack <= SMALL_STACK_SIZE)
17958     {
17959         impStkSize = SMALL_STACK_SIZE;
17960     }
17961     else
17962     {
17963         impStkSize = info.compMaxStack;
17964     }
17965
17966     if (this == inlineRoot)
17967     {
17968         // Allocate the stack contents
17969         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17970     }
17971     else
17972     {
17973         // This is the inlinee compiler, steal the stack from the inliner compiler
17974         // (after ensuring that it is large enough).
17975         if (inlineRoot->impStkSize < impStkSize)
17976         {
17977             inlineRoot->impStkSize              = impStkSize;
17978             inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17979         }
17980
17981         verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
17982     }
17983
17984     // initialize the entry state at start of method
17985     verInitCurrentState();
17986
17987     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17988     if (this == inlineRoot) // These are only used on the root of the inlining tree.
17989     {
17990         // We have initialized these previously, but to size 0.  Make them larger.
17991         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17992         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17993         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17994     }
17995     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17996     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17997     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17998     impBlockListNodeFreeList = nullptr;
17999
18000 #ifdef DEBUG
18001     impLastILoffsStmt   = nullptr;
18002     impNestedStackSpill = false;
18003 #endif
18004     impBoxTemp = BAD_VAR_NUM;
18005
18006     impPendingList = impPendingFree = nullptr;
18007
18008     /* Add the entry-point to the worker-list */
18009
18010     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
18011     // from EH normalization.
18012     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
18013     // out.
18014     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
18015     {
18016         // Treat these as imported.
18017         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
18018         JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", method->bbNum);
18019         method->bbFlags |= BBF_IMPORTED;
18020     }
18021
18022     impImportBlockPending(method);
18023
18024     /* Import blocks in the worker-list until there are no more */
18025
18026     while (impPendingList)
18027     {
18028         /* Remove the entry at the front of the list */
18029
18030         PendingDsc* dsc = impPendingList;
18031         impPendingList  = impPendingList->pdNext;
18032         impSetPendingBlockMember(dsc->pdBB, 0);
18033
18034         /* Restore the stack state */
18035
18036         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
18037         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
18038         if (verCurrentState.esStackDepth)
18039         {
18040             impRestoreStackState(&dsc->pdSavedStack);
18041         }
18042
18043         /* Add the entry to the free list for reuse */
18044
18045         dsc->pdNext    = impPendingFree;
18046         impPendingFree = dsc;
18047
18048         /* Now import the block */
18049
18050         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
18051         {
18052
18053 #ifdef _TARGET_64BIT_
18054             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
18055             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
18056             // method for further explanation on why we raise this exception instead of making the jitted
18057             // code throw the verification exception during execution.
18058             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
18059             {
18060                 BADCODE("Basic block marked as not verifiable");
18061             }
18062             else
18063 #endif // _TARGET_64BIT_
18064             {
18065                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
18066                 impEndTreeList(dsc->pdBB);
18067             }
18068         }
18069         else
18070         {
18071             impImportBlock(dsc->pdBB);
18072
18073             if (compDonotInline())
18074             {
18075                 return;
18076             }
18077             if (compIsForImportOnly() && !tiVerificationNeeded)
18078             {
18079                 return;
18080             }
18081         }
18082     }
18083
18084 #ifdef DEBUG
18085     if (verbose && info.compXcptnsCount)
18086     {
18087         printf("\nAfter impImport() added block for try,catch,finally");
18088         fgDispBasicBlocks();
18089         printf("\n");
18090     }
18091
18092     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
18093     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
18094     {
18095         block->bbFlags &= ~BBF_VISITED;
18096     }
18097 #endif
18098
18099     assert(!compIsForInlining() || !tiVerificationNeeded);
18100 }
18101
18102 // Checks if a typeinfo (usually stored in the type stack) is a struct.
18103 // The invariant here is that if it's not a ref or a method and has a class handle
18104 // it's a valuetype
18105 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
18106 {
18107     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
18108     {
18109         return true;
18110     }
18111     else
18112     {
18113         return false;
18114     }
18115 }
18116
18117 /*****************************************************************************
18118  *  Check to see if the tree is the address of a local or
18119     the address of a field in a local.
18120
18121     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
18122
18123  */
18124
18125 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
18126 {
18127     if (tree->gtOper != GT_ADDR)
18128     {
18129         return FALSE;
18130     }
18131
18132     GenTree* op = tree->gtOp.gtOp1;
18133     while (op->gtOper == GT_FIELD)
18134     {
18135         op = op->gtField.gtFldObj;
18136         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
18137         {
18138             op = op->gtOp.gtOp1;
18139         }
18140         else
18141         {
18142             return false;
18143         }
18144     }
18145
18146     if (op->gtOper == GT_LCL_VAR)
18147     {
18148         *lclVarTreeOut = op;
18149         return TRUE;
18150     }
18151     else
18152     {
18153         return FALSE;
18154     }
18155 }
18156
18157 //------------------------------------------------------------------------
18158 // impMakeDiscretionaryInlineObservations: make observations that help
18159 // determine the profitability of a discretionary inline
18160 //
18161 // Arguments:
18162 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
18163 //    inlineResult -- InlineResult accumulating information about this inline
18164 //
18165 // Notes:
18166 //    If inlining or prejitting the root, this method also makes
18167 //    various observations about the method that factor into inline
18168 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
18169
18170 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
18171 {
18172     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
18173            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
18174            );
18175
18176     // If we're really inlining, we should just have one result in play.
18177     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
18178
18179     // If this is a "forceinline" method, the JIT probably shouldn't have gone
18180     // to the trouble of estimating the native code size. Even if it did, it
18181     // shouldn't be relying on the result of this method.
18182     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
18183
18184     // Note if the caller contains NEWOBJ or NEWARR.
18185     Compiler* rootCompiler = impInlineRoot();
18186
18187     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
18188     {
18189         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
18190     }
18191
18192     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
18193     {
18194         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
18195     }
18196
18197     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
18198     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
18199
18200     if (isSpecialMethod)
18201     {
18202         if (calleeIsStatic)
18203         {
18204             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
18205         }
18206         else
18207         {
18208             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
18209         }
18210     }
18211     else if (!calleeIsStatic)
18212     {
18213         // Callee is an instance method.
18214         //
18215         // Check if the callee has the same 'this' as the root.
18216         if (pInlineInfo != nullptr)
18217         {
18218             GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
18219             assert(thisArg);
18220             bool isSameThis = impIsThis(thisArg);
18221             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
18222         }
18223     }
18224
18225     // Note if the callee's class is a promotable struct
18226     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
18227     {
18228         assert(structPromotionHelper != nullptr);
18229         if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
18230         {
18231             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
18232         }
18233     }
18234
18235 #ifdef FEATURE_SIMD
18236
18237     // Note if this method is has SIMD args or return value
18238     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
18239     {
18240         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
18241     }
18242
18243 #endif // FEATURE_SIMD
18244
18245     // Roughly classify callsite frequency.
18246     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
18247
18248     // If this is a prejit root, or a maximally hot block...
18249     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
18250     {
18251         frequency = InlineCallsiteFrequency::HOT;
18252     }
18253     // No training data.  Look for loop-like things.
18254     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
18255     // However, give it to things nearby.
18256     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
18257              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
18258     {
18259         frequency = InlineCallsiteFrequency::LOOP;
18260     }
18261     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
18262     {
18263         frequency = InlineCallsiteFrequency::WARM;
18264     }
18265     // Now modify the multiplier based on where we're called from.
18266     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
18267     {
18268         frequency = InlineCallsiteFrequency::RARE;
18269     }
18270     else
18271     {
18272         frequency = InlineCallsiteFrequency::BORING;
18273     }
18274
18275     // Also capture the block weight of the call site.  In the prejit
18276     // root case, assume there's some hot call site for this method.
18277     unsigned weight = 0;
18278
18279     if (pInlineInfo != nullptr)
18280     {
18281         weight = pInlineInfo->iciBlock->bbWeight;
18282     }
18283     else
18284     {
18285         weight = BB_MAX_WEIGHT;
18286     }
18287
18288     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
18289     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
18290 }
18291
18292 /*****************************************************************************
18293  This method makes STATIC inlining decision based on the IL code.
18294  It should not make any inlining decision based on the context.
18295  If forceInline is true, then the inlining decision should not depend on
18296  performance heuristics (code size, etc.).
18297  */
18298
18299 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
18300                               CORINFO_METHOD_INFO*  methInfo,
18301                               bool                  forceInline,
18302                               InlineResult*         inlineResult)
18303 {
18304     unsigned codeSize = methInfo->ILCodeSize;
18305
18306     // We shouldn't have made up our minds yet...
18307     assert(!inlineResult->IsDecided());
18308
18309     if (methInfo->EHcount)
18310     {
18311         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
18312         return;
18313     }
18314
18315     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
18316     {
18317         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
18318         return;
18319     }
18320
18321     // For now we don't inline varargs (import code can't handle it)
18322
18323     if (methInfo->args.isVarArg())
18324     {
18325         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
18326         return;
18327     }
18328
18329     // Reject if it has too many locals.
18330     // This is currently an implementation limit due to fixed-size arrays in the
18331     // inline info, rather than a performance heuristic.
18332
18333     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
18334
18335     if (methInfo->locals.numArgs > MAX_INL_LCLS)
18336     {
18337         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
18338         return;
18339     }
18340
18341     // Make sure there aren't too many arguments.
18342     // This is currently an implementation limit due to fixed-size arrays in the
18343     // inline info, rather than a performance heuristic.
18344
18345     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
18346
18347     if (methInfo->args.numArgs > MAX_INL_ARGS)
18348     {
18349         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
18350         return;
18351     }
18352
18353     // Note force inline state
18354
18355     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
18356
18357     // Note IL code size
18358
18359     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
18360
18361     if (inlineResult->IsFailure())
18362     {
18363         return;
18364     }
18365
18366     // Make sure maxstack is not too big
18367
18368     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
18369
18370     if (inlineResult->IsFailure())
18371     {
18372         return;
18373     }
18374 }
18375
18376 /*****************************************************************************
18377  */
18378
18379 void Compiler::impCheckCanInline(GenTree*               call,
18380                                  CORINFO_METHOD_HANDLE  fncHandle,
18381                                  unsigned               methAttr,
18382                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
18383                                  InlineCandidateInfo**  ppInlineCandidateInfo,
18384                                  InlineResult*          inlineResult)
18385 {
18386     // Either EE or JIT might throw exceptions below.
18387     // If that happens, just don't inline the method.
18388
18389     struct Param
18390     {
18391         Compiler*              pThis;
18392         GenTree*               call;
18393         CORINFO_METHOD_HANDLE  fncHandle;
18394         unsigned               methAttr;
18395         CORINFO_CONTEXT_HANDLE exactContextHnd;
18396         InlineResult*          result;
18397         InlineCandidateInfo**  ppInlineCandidateInfo;
18398     } param;
18399     memset(&param, 0, sizeof(param));
18400
18401     param.pThis                 = this;
18402     param.call                  = call;
18403     param.fncHandle             = fncHandle;
18404     param.methAttr              = methAttr;
18405     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
18406     param.result                = inlineResult;
18407     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
18408
18409     bool success = eeRunWithErrorTrap<Param>(
18410         [](Param* pParam) {
18411             DWORD                  dwRestrictions = 0;
18412             CorInfoInitClassResult initClassResult;
18413
18414 #ifdef DEBUG
18415             const char* methodName;
18416             const char* className;
18417             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
18418
18419             if (JitConfig.JitNoInline())
18420             {
18421                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
18422                 goto _exit;
18423             }
18424 #endif
18425
18426             /* Try to get the code address/size for the method */
18427
18428             CORINFO_METHOD_INFO methInfo;
18429             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
18430             {
18431                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
18432                 goto _exit;
18433             }
18434
18435             bool forceInline;
18436             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
18437
18438             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
18439
18440             if (pParam->result->IsFailure())
18441             {
18442                 assert(pParam->result->IsNever());
18443                 goto _exit;
18444             }
18445
18446             // Speculatively check if initClass() can be done.
18447             // If it can be done, we will try to inline the method. If inlining
18448             // succeeds, then we will do the non-speculative initClass() and commit it.
18449             // If this speculative call to initClass() fails, there is no point
18450             // trying to inline this method.
18451             initClassResult =
18452                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
18453                                                            pParam->exactContextHnd /* context */,
18454                                                            TRUE /* speculative */);
18455
18456             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
18457             {
18458                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
18459                 goto _exit;
18460             }
18461
18462             // Given the EE the final say in whether to inline or not.
18463             // This should be last since for verifiable code, this can be expensive
18464
18465             /* VM Inline check also ensures that the method is verifiable if needed */
18466             CorInfoInline vmResult;
18467             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
18468                                                                   &dwRestrictions);
18469
18470             if (vmResult == INLINE_FAIL)
18471             {
18472                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
18473             }
18474             else if (vmResult == INLINE_NEVER)
18475             {
18476                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
18477             }
18478
18479             if (pParam->result->IsFailure())
18480             {
18481                 // Make sure not to report this one.  It was already reported by the VM.
18482                 pParam->result->SetReported();
18483                 goto _exit;
18484             }
18485
18486             // check for unsupported inlining restrictions
18487             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
18488
18489             if (dwRestrictions & INLINE_SAME_THIS)
18490             {
18491                 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
18492                 assert(thisArg);
18493
18494                 if (!pParam->pThis->impIsThis(thisArg))
18495                 {
18496                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
18497                     goto _exit;
18498                 }
18499             }
18500
18501             /* Get the method properties */
18502
18503             CORINFO_CLASS_HANDLE clsHandle;
18504             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
18505             unsigned clsAttr;
18506             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
18507
18508             /* Get the return type */
18509
18510             var_types fncRetType;
18511             fncRetType = pParam->call->TypeGet();
18512
18513 #ifdef DEBUG
18514             var_types fncRealRetType;
18515             fncRealRetType = JITtype2varType(methInfo.args.retType);
18516
18517             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
18518                    // <BUGNUM> VSW 288602 </BUGNUM>
18519                    // In case of IJW, we allow to assign a native pointer to a BYREF.
18520                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
18521                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
18522 #endif
18523
18524             //
18525             // Allocate an InlineCandidateInfo structure
18526             //
18527             InlineCandidateInfo* pInfo;
18528             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
18529
18530             pInfo->dwRestrictions       = dwRestrictions;
18531             pInfo->methInfo             = methInfo;
18532             pInfo->methAttr             = pParam->methAttr;
18533             pInfo->clsHandle            = clsHandle;
18534             pInfo->clsAttr              = clsAttr;
18535             pInfo->fncRetType           = fncRetType;
18536             pInfo->exactContextHnd      = pParam->exactContextHnd;
18537             pInfo->ilCallerHandle       = pParam->pThis->info.compMethodHnd;
18538             pInfo->initClassResult      = initClassResult;
18539             pInfo->preexistingSpillTemp = BAD_VAR_NUM;
18540
18541             *(pParam->ppInlineCandidateInfo) = pInfo;
18542
18543         _exit:;
18544         },
18545         &param);
18546     if (!success)
18547     {
18548         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18549     }
18550 }
18551
18552 //------------------------------------------------------------------------
18553 // impInlineRecordArgInfo: record information about an inline candidate argument
18554 //
18555 // Arguments:
18556 //   pInlineInfo - inline info for the inline candidate
18557 //   curArgVal - tree for the caller actual argument value
18558 //   argNum - logical index of this argument
18559 //   inlineResult - result of ongoing inline evaluation
18560 //
18561 // Notes:
18562 //
18563 //   Checks for various inline blocking conditions and makes notes in
18564 //   the inline info arg table about the properties of the actual. These
18565 //   properties are used later by impFetchArg to determine how best to
18566 //   pass the argument into the inlinee.
18567
18568 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
18569                                       GenTree*      curArgVal,
18570                                       unsigned      argNum,
18571                                       InlineResult* inlineResult)
18572 {
18573     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18574
18575     if (curArgVal->gtOper == GT_MKREFANY)
18576     {
18577         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18578         return;
18579     }
18580
18581     inlCurArgInfo->argNode = curArgVal;
18582
18583     GenTree* lclVarTree;
18584     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18585     {
18586         inlCurArgInfo->argIsByRefToStructLocal = true;
18587 #ifdef FEATURE_SIMD
18588         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18589         {
18590             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18591         }
18592 #endif // FEATURE_SIMD
18593     }
18594
18595     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18596     {
18597         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18598         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18599     }
18600
18601     if (curArgVal->gtOper == GT_LCL_VAR)
18602     {
18603         inlCurArgInfo->argIsLclVar = true;
18604
18605         /* Remember the "original" argument number */
18606         curArgVal->gtLclVar.gtLclILoffs = argNum;
18607     }
18608
18609     if ((curArgVal->OperKind() & GTK_CONST) ||
18610         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18611     {
18612         inlCurArgInfo->argIsInvariant = true;
18613         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18614         {
18615             // Abort inlining at this call site
18616             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18617             return;
18618         }
18619     }
18620
18621     // If the arg is a local that is address-taken, we can't safely
18622     // directly substitute it into the inlinee.
18623     //
18624     // Previously we'd accomplish this by setting "argHasLdargaOp" but
18625     // that has a stronger meaning: that the arg value can change in
18626     // the method body. Using that flag prevents type propagation,
18627     // which is safe in this case.
18628     //
18629     // Instead mark the arg as having a caller local ref.
18630     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18631     {
18632         inlCurArgInfo->argHasCallerLocalRef = true;
18633     }
18634
18635 #ifdef DEBUG
18636     if (verbose)
18637     {
18638         if (inlCurArgInfo->argIsThis)
18639         {
18640             printf("thisArg:");
18641         }
18642         else
18643         {
18644             printf("\nArgument #%u:", argNum);
18645         }
18646         if (inlCurArgInfo->argIsLclVar)
18647         {
18648             printf(" is a local var");
18649         }
18650         if (inlCurArgInfo->argIsInvariant)
18651         {
18652             printf(" is a constant");
18653         }
18654         if (inlCurArgInfo->argHasGlobRef)
18655         {
18656             printf(" has global refs");
18657         }
18658         if (inlCurArgInfo->argHasCallerLocalRef)
18659         {
18660             printf(" has caller local ref");
18661         }
18662         if (inlCurArgInfo->argHasSideEff)
18663         {
18664             printf(" has side effects");
18665         }
18666         if (inlCurArgInfo->argHasLdargaOp)
18667         {
18668             printf(" has ldarga effect");
18669         }
18670         if (inlCurArgInfo->argHasStargOp)
18671         {
18672             printf(" has starg effect");
18673         }
18674         if (inlCurArgInfo->argIsByRefToStructLocal)
18675         {
18676             printf(" is byref to a struct local");
18677         }
18678
18679         printf("\n");
18680         gtDispTree(curArgVal);
18681         printf("\n");
18682     }
18683 #endif
18684 }
18685
18686 //------------------------------------------------------------------------
18687 // impInlineInitVars: setup inline information for inlinee args and locals
18688 //
18689 // Arguments:
18690 //    pInlineInfo - inline info for the inline candidate
18691 //
18692 // Notes:
18693 //    This method primarily adds caller-supplied info to the inlArgInfo
18694 //    and sets up the lclVarInfo table.
18695 //
18696 //    For args, the inlArgInfo records properties of the actual argument
18697 //    including the tree node that produces the arg value. This node is
18698 //    usually the tree node present at the call, but may also differ in
18699 //    various ways:
18700 //    - when the call arg is a GT_RET_EXPR, we search back through the ret
18701 //      expr chain for the actual node. Note this will either be the original
18702 //      call (which will be a failed inline by this point), or the return
18703 //      expression from some set of inlines.
18704 //    - when argument type casting is needed the necessary casts are added
18705 //      around the argument node.
18706 //    - if an argment can be simplified by folding then the node here is the
18707 //      folded value.
18708 //
18709 //   The method may make observations that lead to marking this candidate as
18710 //   a failed inline. If this happens the initialization is abandoned immediately
18711 //   to try and reduce the jit time cost for a failed inline.
18712
18713 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18714 {
18715     assert(!compIsForInlining());
18716
18717     GenTree*             call         = pInlineInfo->iciCall;
18718     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
18719     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
18720     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
18721     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
18722     InlineResult*        inlineResult = pInlineInfo->inlineResult;
18723
18724     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18725
18726     /* init the argument stuct */
18727
18728     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18729
18730     /* Get hold of the 'this' pointer and the argument list proper */
18731
18732     GenTree* thisArg = call->gtCall.gtCallObjp;
18733     GenTree* argList = call->gtCall.gtCallArgs;
18734     unsigned argCnt  = 0; // Count of the arguments
18735
18736     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18737
18738     if (thisArg)
18739     {
18740         inlArgInfo[0].argIsThis = true;
18741         GenTree* actualThisArg  = thisArg->gtRetExprVal();
18742         impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18743
18744         if (inlineResult->IsFailure())
18745         {
18746             return;
18747         }
18748
18749         /* Increment the argument count */
18750         argCnt++;
18751     }
18752
18753     /* Record some information about each of the arguments */
18754     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18755
18756 #if USER_ARGS_COME_LAST
18757     unsigned typeCtxtArg = thisArg ? 1 : 0;
18758 #else  // USER_ARGS_COME_LAST
18759     unsigned typeCtxtArg = methInfo->args.totalILArgs();
18760 #endif // USER_ARGS_COME_LAST
18761
18762     for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18763     {
18764         if (argTmp == argList && hasRetBuffArg)
18765         {
18766             continue;
18767         }
18768
18769         // Ignore the type context argument
18770         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18771         {
18772             pInlineInfo->typeContextArg = typeCtxtArg;
18773             typeCtxtArg                 = 0xFFFFFFFF;
18774             continue;
18775         }
18776
18777         assert(argTmp->gtOper == GT_LIST);
18778         GenTree* arg       = argTmp->gtOp.gtOp1;
18779         GenTree* actualArg = arg->gtRetExprVal();
18780         impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18781
18782         if (inlineResult->IsFailure())
18783         {
18784             return;
18785         }
18786
18787         /* Increment the argument count */
18788         argCnt++;
18789     }
18790
18791     /* Make sure we got the arg number right */
18792     assert(argCnt == methInfo->args.totalILArgs());
18793
18794 #ifdef FEATURE_SIMD
18795     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18796 #endif // FEATURE_SIMD
18797
18798     /* We have typeless opcodes, get type information from the signature */
18799
18800     if (thisArg)
18801     {
18802         var_types sigType;
18803
18804         if (clsAttr & CORINFO_FLG_VALUECLASS)
18805         {
18806             sigType = TYP_BYREF;
18807         }
18808         else
18809         {
18810             sigType = TYP_REF;
18811         }
18812
18813         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18814         lclVarInfo[0].lclHasLdlocaOp = false;
18815
18816 #ifdef FEATURE_SIMD
18817         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18818         // the inlining multiplier) for anything in that assembly.
18819         // But we only need to normalize it if it is a TYP_STRUCT
18820         // (which we need to do even if we have already set foundSIMDType).
18821         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18822         {
18823             if (sigType == TYP_STRUCT)
18824             {
18825                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18826             }
18827             foundSIMDType = true;
18828         }
18829 #endif // FEATURE_SIMD
18830         lclVarInfo[0].lclTypeInfo = sigType;
18831
18832         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
18833                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18834                 (clsAttr & CORINFO_FLG_VALUECLASS)));
18835
18836         if (genActualType(thisArg->gtType) != genActualType(sigType))
18837         {
18838             if (sigType == TYP_REF)
18839             {
18840                 /* The argument cannot be bashed into a ref (see bug 750871) */
18841                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18842                 return;
18843             }
18844
18845             /* This can only happen with byrefs <-> ints/shorts */
18846
18847             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18848             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18849
18850             if (sigType == TYP_BYREF)
18851             {
18852                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18853             }
18854             else if (thisArg->gtType == TYP_BYREF)
18855             {
18856                 assert(sigType == TYP_I_IMPL);
18857
18858                 /* If possible change the BYREF to an int */
18859                 if (thisArg->IsVarAddr())
18860                 {
18861                     thisArg->gtType              = TYP_I_IMPL;
18862                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18863                 }
18864                 else
18865                 {
18866                     /* Arguments 'int <- byref' cannot be bashed */
18867                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18868                     return;
18869                 }
18870             }
18871         }
18872     }
18873
18874     /* Init the types of the arguments and make sure the types
18875      * from the trees match the types in the signature */
18876
18877     CORINFO_ARG_LIST_HANDLE argLst;
18878     argLst = methInfo->args.args;
18879
18880     unsigned i;
18881     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18882     {
18883         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18884
18885         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18886
18887 #ifdef FEATURE_SIMD
18888         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18889         {
18890             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18891             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18892             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18893             foundSIMDType = true;
18894             if (sigType == TYP_STRUCT)
18895             {
18896                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18897                 sigType              = structType;
18898             }
18899         }
18900 #endif // FEATURE_SIMD
18901
18902         lclVarInfo[i].lclTypeInfo    = sigType;
18903         lclVarInfo[i].lclHasLdlocaOp = false;
18904
18905         /* Does the tree type match the signature type? */
18906
18907         GenTree* inlArgNode = inlArgInfo[i].argNode;
18908
18909         if (sigType != inlArgNode->gtType)
18910         {
18911             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18912                but in bad IL cases with caller-callee signature mismatches we can see other types.
18913                Intentionally reject cases with mismatches so the jit is more flexible when
18914                encountering bad IL. */
18915
18916             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18917                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18918                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18919
18920             if (!isPlausibleTypeMatch)
18921             {
18922                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18923                 return;
18924             }
18925
18926             /* Is it a narrowing or widening cast?
18927              * Widening casts are ok since the value computed is already
18928              * normalized to an int (on the IL stack) */
18929
18930             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18931             {
18932                 if (sigType == TYP_BYREF)
18933                 {
18934                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18935                 }
18936                 else if (inlArgNode->gtType == TYP_BYREF)
18937                 {
18938                     assert(varTypeIsIntOrI(sigType));
18939
18940                     /* If possible bash the BYREF to an int */
18941                     if (inlArgNode->IsVarAddr())
18942                     {
18943                         inlArgNode->gtType           = TYP_I_IMPL;
18944                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18945                     }
18946                     else
18947                     {
18948                         /* Arguments 'int <- byref' cannot be changed */
18949                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18950                         return;
18951                     }
18952                 }
18953                 else if (genTypeSize(sigType) < EA_PTRSIZE)
18954                 {
18955                     /* Narrowing cast */
18956
18957                     if (inlArgNode->gtOper == GT_LCL_VAR &&
18958                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18959                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18960                     {
18961                         /* We don't need to insert a cast here as the variable
18962                            was assigned a normalized value of the right type */
18963
18964                         continue;
18965                     }
18966
18967                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18968
18969                     inlArgInfo[i].argIsLclVar = false;
18970
18971                     /* Try to fold the node in case we have constant arguments */
18972
18973                     if (inlArgInfo[i].argIsInvariant)
18974                     {
18975                         inlArgNode            = gtFoldExprConst(inlArgNode);
18976                         inlArgInfo[i].argNode = inlArgNode;
18977                         assert(inlArgNode->OperIsConst());
18978                     }
18979                 }
18980 #ifdef _TARGET_64BIT_
18981                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18982                 {
18983                     // This should only happen for int -> native int widening
18984                     inlArgNode = inlArgInfo[i].argNode =
18985                         gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18986
18987                     inlArgInfo[i].argIsLclVar = false;
18988
18989                     /* Try to fold the node in case we have constant arguments */
18990
18991                     if (inlArgInfo[i].argIsInvariant)
18992                     {
18993                         inlArgNode            = gtFoldExprConst(inlArgNode);
18994                         inlArgInfo[i].argNode = inlArgNode;
18995                         assert(inlArgNode->OperIsConst());
18996                     }
18997                 }
18998 #endif // _TARGET_64BIT_
18999             }
19000         }
19001     }
19002
19003     /* Init the types of the local variables */
19004
19005     CORINFO_ARG_LIST_HANDLE localsSig;
19006     localsSig = methInfo->locals.args;
19007
19008     for (i = 0; i < methInfo->locals.numArgs; i++)
19009     {
19010         bool      isPinned;
19011         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
19012
19013         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
19014         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
19015         lclVarInfo[i + argCnt].lclTypeInfo    = type;
19016
19017         if (varTypeIsGC(type))
19018         {
19019             pInlineInfo->numberOfGcRefLocals++;
19020         }
19021
19022         if (isPinned)
19023         {
19024             // Pinned locals may cause inlines to fail.
19025             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
19026             if (inlineResult->IsFailure())
19027             {
19028                 return;
19029             }
19030         }
19031
19032         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
19033
19034         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
19035         // out on the inline.
19036         if (type == TYP_STRUCT)
19037         {
19038             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
19039             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
19040             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
19041             {
19042                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
19043                 if (inlineResult->IsFailure())
19044                 {
19045                     return;
19046                 }
19047
19048                 // Do further notification in the case where the call site is rare; some policies do
19049                 // not track the relative hotness of call sites for "always" inline cases.
19050                 if (pInlineInfo->iciBlock->isRunRarely())
19051                 {
19052                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
19053                     if (inlineResult->IsFailure())
19054                     {
19055
19056                         return;
19057                     }
19058                 }
19059             }
19060         }
19061
19062         localsSig = info.compCompHnd->getArgNext(localsSig);
19063
19064 #ifdef FEATURE_SIMD
19065         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
19066         {
19067             foundSIMDType = true;
19068             if (featureSIMD && type == TYP_STRUCT)
19069             {
19070                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
19071                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
19072             }
19073         }
19074 #endif // FEATURE_SIMD
19075     }
19076
19077 #ifdef FEATURE_SIMD
19078     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
19079     {
19080         foundSIMDType = true;
19081     }
19082     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
19083 #endif // FEATURE_SIMD
19084 }
19085
19086 //------------------------------------------------------------------------
19087 // impInlineFetchLocal: get a local var that represents an inlinee local
19088 //
19089 // Arguments:
19090 //    lclNum -- number of the inlinee local
19091 //    reason -- debug string describing purpose of the local var
19092 //
19093 // Returns:
19094 //    Number of the local to use
19095 //
19096 // Notes:
19097 //    This method is invoked only for locals actually used in the
19098 //    inlinee body.
19099 //
19100 //    Allocates a new temp if necessary, and copies key properties
19101 //    over from the inlinee local var info.
19102
19103 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
19104 {
19105     assert(compIsForInlining());
19106
19107     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
19108
19109     if (tmpNum == BAD_VAR_NUM)
19110     {
19111         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
19112         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
19113
19114         // The lifetime of this local might span multiple BBs.
19115         // So it is a long lifetime local.
19116         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
19117
19118         // Copy over key info
19119         lvaTable[tmpNum].lvType                 = lclTyp;
19120         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
19121         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
19122         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
19123         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
19124
19125         // Copy over class handle for ref types. Note this may be a
19126         // shared type -- someday perhaps we can get the exact
19127         // signature and pass in a more precise type.
19128         if (lclTyp == TYP_REF)
19129         {
19130             assert(lvaTable[tmpNum].lvSingleDef == 0);
19131
19132             lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
19133             if (lvaTable[tmpNum].lvSingleDef)
19134             {
19135                 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19136             }
19137
19138             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
19139         }
19140
19141         if (inlineeLocal.lclVerTypeInfo.IsStruct())
19142         {
19143             if (varTypeIsStruct(lclTyp))
19144             {
19145                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19146             }
19147             else
19148             {
19149                 // This is a wrapped primitive.  Make sure the verstate knows that
19150                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
19151             }
19152         }
19153
19154 #ifdef DEBUG
19155         // Sanity check that we're properly prepared for gc ref locals.
19156         if (varTypeIsGC(lclTyp))
19157         {
19158             // Since there are gc locals we should have seen them earlier
19159             // and if there was a return value, set up the spill temp.
19160             assert(impInlineInfo->HasGcRefLocals());
19161             assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
19162         }
19163         else
19164         {
19165             // Make sure all pinned locals count as gc refs.
19166             assert(!inlineeLocal.lclIsPinned);
19167         }
19168 #endif // DEBUG
19169     }
19170
19171     return tmpNum;
19172 }
19173
19174 //------------------------------------------------------------------------
19175 // impInlineFetchArg: return tree node for argument value in an inlinee
19176 //
19177 // Arguments:
19178 //    lclNum -- argument number in inlinee IL
19179 //    inlArgInfo -- argument info for inlinee
19180 //    lclVarInfo -- var info for inlinee
19181 //
19182 // Returns:
19183 //    Tree for the argument's value. Often an inlinee-scoped temp
19184 //    GT_LCL_VAR but can be other tree kinds, if the argument
19185 //    expression from the caller can be directly substituted into the
19186 //    inlinee body.
19187 //
19188 // Notes:
19189 //    Must be used only for arguments -- use impInlineFetchLocal for
19190 //    inlinee locals.
19191 //
19192 //    Direct substitution is performed when the formal argument cannot
19193 //    change value in the inlinee body (no starg or ldarga), and the
19194 //    actual argument expression's value cannot be changed if it is
19195 //    substituted it into the inlinee body.
19196 //
19197 //    Even if an inlinee-scoped temp is returned here, it may later be
19198 //    "bashed" to a caller-supplied tree when arguments are actually
19199 //    passed (see fgInlinePrependStatements). Bashing can happen if
19200 //    the argument ends up being single use and other conditions are
19201 //    met. So the contents of the tree returned here may not end up
19202 //    being the ones ultimately used for the argument.
19203 //
19204 //    This method will side effect inlArgInfo. It should only be called
19205 //    for actual uses of the argument in the inlinee.
19206
19207 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
19208 {
19209     // Cache the relevant arg and lcl info for this argument.
19210     // We will modify argInfo but not lclVarInfo.
19211     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
19212     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
19213     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
19214     const var_types      lclTyp           = lclInfo.lclTypeInfo;
19215     GenTree*             op1              = nullptr;
19216
19217     if (argInfo.argIsInvariant && !argCanBeModified)
19218     {
19219         // Directly substitute constants or addresses of locals
19220         //
19221         // Clone the constant. Note that we cannot directly use
19222         // argNode in the trees even if !argInfo.argIsUsed as this
19223         // would introduce aliasing between inlArgInfo[].argNode and
19224         // impInlineExpr. Then gtFoldExpr() could change it, causing
19225         // further references to the argument working off of the
19226         // bashed copy.
19227         op1 = gtCloneExpr(argInfo.argNode);
19228         PREFIX_ASSUME(op1 != nullptr);
19229         argInfo.argTmpNum = BAD_VAR_NUM;
19230
19231         // We may need to retype to ensure we match the callee's view of the type.
19232         // Otherwise callee-pass throughs of arguments can create return type
19233         // mismatches that block inlining.
19234         //
19235         // Note argument type mismatches that prevent inlining should
19236         // have been caught in impInlineInitVars.
19237         if (op1->TypeGet() != lclTyp)
19238         {
19239             op1->gtType = genActualType(lclTyp);
19240         }
19241     }
19242     else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
19243     {
19244         // Directly substitute unaliased caller locals for args that cannot be modified
19245         //
19246         // Use the caller-supplied node if this is the first use.
19247         op1               = argInfo.argNode;
19248         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
19249
19250         // Use an equivalent copy if this is the second or subsequent
19251         // use, or if we need to retype.
19252         //
19253         // Note argument type mismatches that prevent inlining should
19254         // have been caught in impInlineInitVars.
19255         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
19256         {
19257             assert(op1->gtOper == GT_LCL_VAR);
19258             assert(lclNum == op1->gtLclVar.gtLclILoffs);
19259
19260             var_types newTyp = lclTyp;
19261
19262             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
19263             {
19264                 newTyp = genActualType(lclTyp);
19265             }
19266
19267             // Create a new lcl var node - remember the argument lclNum
19268             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
19269         }
19270     }
19271     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
19272     {
19273         /* Argument is a by-ref address to a struct, a normed struct, or its field.
19274            In these cases, don't spill the byref to a local, simply clone the tree and use it.
19275            This way we will increase the chance for this byref to be optimized away by
19276            a subsequent "dereference" operation.
19277
19278            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
19279            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
19280            For example, if the caller is:
19281                 ldloca.s   V_1  // V_1 is a local struct
19282                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
19283            and the callee being inlined has:
19284                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
19285                     ldarga.s   ptrToInts
19286                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
19287            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
19288            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
19289         */
19290         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
19291         op1 = gtCloneExpr(argInfo.argNode);
19292     }
19293     else
19294     {
19295         /* Argument is a complex expression - it must be evaluated into a temp */
19296
19297         if (argInfo.argHasTmp)
19298         {
19299             assert(argInfo.argIsUsed);
19300             assert(argInfo.argTmpNum < lvaCount);
19301
19302             /* Create a new lcl var node - remember the argument lclNum */
19303             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
19304
19305             /* This is the second or later use of the this argument,
19306             so we have to use the temp (instead of the actual arg) */
19307             argInfo.argBashTmpNode = nullptr;
19308         }
19309         else
19310         {
19311             /* First time use */
19312             assert(!argInfo.argIsUsed);
19313
19314             /* Reserve a temp for the expression.
19315             * Use a large size node as we may change it later */
19316
19317             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
19318
19319             lvaTable[tmpNum].lvType = lclTyp;
19320
19321             // For ref types, determine the type of the temp.
19322             if (lclTyp == TYP_REF)
19323             {
19324                 if (!argCanBeModified)
19325                 {
19326                     // If the arg can't be modified in the method
19327                     // body, use the type of the value, if
19328                     // known. Otherwise, use the declared type.
19329                     assert(lvaTable[tmpNum].lvSingleDef == 0);
19330                     lvaTable[tmpNum].lvSingleDef = 1;
19331                     JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19332                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19333                 }
19334                 else
19335                 {
19336                     // Arg might be modified, use the declared type of
19337                     // the argument.
19338                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19339                 }
19340             }
19341
19342             assert(lvaTable[tmpNum].lvAddrExposed == 0);
19343             if (argInfo.argHasLdargaOp)
19344             {
19345                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
19346             }
19347
19348             if (lclInfo.lclVerTypeInfo.IsStruct())
19349             {
19350                 if (varTypeIsStruct(lclTyp))
19351                 {
19352                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19353                     if (info.compIsVarArgs)
19354                     {
19355                         lvaSetStructUsedAsVarArg(tmpNum);
19356                     }
19357                 }
19358                 else
19359                 {
19360                     // This is a wrapped primitive.  Make sure the verstate knows that
19361                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
19362                 }
19363             }
19364
19365             argInfo.argHasTmp = true;
19366             argInfo.argTmpNum = tmpNum;
19367
19368             // If we require strict exception order, then arguments must
19369             // be evaluated in sequence before the body of the inlined method.
19370             // So we need to evaluate them to a temp.
19371             // Also, if arguments have global or local references, we need to
19372             // evaluate them to a temp before the inlined body as the
19373             // inlined body may be modifying the global ref.
19374             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
19375             // if it is a struct, because it requires some additional handling.
19376
19377             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
19378                 !argInfo.argHasCallerLocalRef)
19379             {
19380                 /* Get a *LARGE* LCL_VAR node */
19381                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
19382
19383                 /* Record op1 as the very first use of this argument.
19384                 If there are no further uses of the arg, we may be
19385                 able to use the actual arg node instead of the temp.
19386                 If we do see any further uses, we will clear this. */
19387                 argInfo.argBashTmpNode = op1;
19388             }
19389             else
19390             {
19391                 /* Get a small LCL_VAR node */
19392                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
19393                 /* No bashing of this argument */
19394                 argInfo.argBashTmpNode = nullptr;
19395             }
19396         }
19397     }
19398
19399     // Mark this argument as used.
19400     argInfo.argIsUsed = true;
19401
19402     return op1;
19403 }
19404
19405 /******************************************************************************
19406  Is this the original "this" argument to the call being inlined?
19407
19408  Note that we do not inline methods with "starg 0", and so we do not need to
19409  worry about it.
19410 */
19411
19412 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
19413 {
19414     assert(compIsForInlining());
19415     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
19416 }
19417
19418 //-----------------------------------------------------------------------------
19419 // This function checks if a dereference in the inlinee can guarantee that
19420 // the "this" is non-NULL.
19421 // If we haven't hit a branch or a side effect, and we are dereferencing
19422 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
19423 // then we can avoid a separate null pointer check.
19424 //
19425 // "additionalTreesToBeEvaluatedBefore"
19426 // is the set of pending trees that have not yet been added to the statement list,
19427 // and which have been removed from verCurrentState.esStack[]
19428
19429 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree*    additionalTreesToBeEvaluatedBefore,
19430                                                                   GenTree*    variableBeingDereferenced,
19431                                                                   InlArgInfo* inlArgInfo)
19432 {
19433     assert(compIsForInlining());
19434     assert(opts.OptEnabled(CLFLG_INLINING));
19435
19436     BasicBlock* block = compCurBB;
19437
19438     GenTree* stmt;
19439     GenTree* expr;
19440
19441     if (block != fgFirstBB)
19442     {
19443         return FALSE;
19444     }
19445
19446     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
19447     {
19448         return FALSE;
19449     }
19450
19451     if (additionalTreesToBeEvaluatedBefore &&
19452         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
19453     {
19454         return FALSE;
19455     }
19456
19457     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
19458     {
19459         expr = stmt->gtStmt.gtStmtExpr;
19460
19461         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
19462         {
19463             return FALSE;
19464         }
19465     }
19466
19467     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
19468     {
19469         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
19470         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
19471         {
19472             return FALSE;
19473         }
19474     }
19475
19476     return TRUE;
19477 }
19478
19479 //------------------------------------------------------------------------
19480 // impMarkInlineCandidate: determine if this call can be subsequently inlined
19481 //
19482 // Arguments:
19483 //    callNode -- call under scrutiny
19484 //    exactContextHnd -- context handle for inlining
19485 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
19486 //    callInfo -- call info from VM
19487 //
19488 // Notes:
19489 //    If callNode is an inline candidate, this method sets the flag
19490 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
19491 //    filled in the associated InlineCandidateInfo.
19492 //
19493 //    If callNode is not an inline candidate, and the reason is
19494 //    something that is inherent to the method being called, the
19495 //    method may be marked as "noinline" to short-circuit any
19496 //    future assessments of calls to this method.
19497
19498 void Compiler::impMarkInlineCandidate(GenTree*               callNode,
19499                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
19500                                       bool                   exactContextNeedsRuntimeLookup,
19501                                       CORINFO_CALL_INFO*     callInfo)
19502 {
19503     // Let the strategy know there's another call
19504     impInlineRoot()->m_inlineStrategy->NoteCall();
19505
19506     if (!opts.OptEnabled(CLFLG_INLINING))
19507     {
19508         /* XXX Mon 8/18/2008
19509          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
19510          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
19511          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
19512          * figure out why we did not set MAXOPT for this compile.
19513          */
19514         assert(!compIsForInlining());
19515         return;
19516     }
19517
19518     if (compIsForImportOnly())
19519     {
19520         // Don't bother creating the inline candidate during verification.
19521         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
19522         // that leads to the creation of multiple instances of Compiler.
19523         return;
19524     }
19525
19526     GenTreeCall* call = callNode->AsCall();
19527     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
19528
19529     // Don't inline if not optimizing root method
19530     if (opts.compDbgCode)
19531     {
19532         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
19533         return;
19534     }
19535
19536     // Don't inline if inlining into root method is disabled.
19537     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
19538     {
19539         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
19540         return;
19541     }
19542
19543     // Inlining candidate determination needs to honor only IL tail prefix.
19544     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
19545     if (call->IsTailPrefixedCall())
19546     {
19547         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
19548         return;
19549     }
19550
19551     // Tail recursion elimination takes precedence over inlining.
19552     // TODO: We may want to do some of the additional checks from fgMorphCall
19553     // here to reduce the chance we don't inline a call that won't be optimized
19554     // as a fast tail call or turned into a loop.
19555     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
19556     {
19557         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
19558         return;
19559     }
19560
19561     if (call->IsVirtual())
19562     {
19563         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19564         return;
19565     }
19566
19567     /* Ignore helper calls */
19568
19569     if (call->gtCallType == CT_HELPER)
19570     {
19571         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19572         return;
19573     }
19574
19575     /* Ignore indirect calls */
19576     if (call->gtCallType == CT_INDIRECT)
19577     {
19578         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19579         return;
19580     }
19581
19582     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
19583      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
19584      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
19585
19586     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19587     unsigned              methAttr;
19588
19589     // Reuse method flags from the original callInfo if possible
19590     if (fncHandle == callInfo->hMethod)
19591     {
19592         methAttr = callInfo->methodFlags;
19593     }
19594     else
19595     {
19596         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19597     }
19598
19599 #ifdef DEBUG
19600     if (compStressCompile(STRESS_FORCE_INLINE, 0))
19601     {
19602         methAttr |= CORINFO_FLG_FORCEINLINE;
19603     }
19604 #endif
19605
19606     // Check for COMPlus_AggressiveInlining
19607     if (compDoAggressiveInlining)
19608     {
19609         methAttr |= CORINFO_FLG_FORCEINLINE;
19610     }
19611
19612     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19613     {
19614         /* Don't bother inline blocks that are in the filter region */
19615         if (bbInCatchHandlerILRange(compCurBB))
19616         {
19617 #ifdef DEBUG
19618             if (verbose)
19619             {
19620                 printf("\nWill not inline blocks that are in the catch handler region\n");
19621             }
19622
19623 #endif
19624
19625             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19626             return;
19627         }
19628
19629         if (bbInFilterILRange(compCurBB))
19630         {
19631 #ifdef DEBUG
19632             if (verbose)
19633             {
19634                 printf("\nWill not inline blocks that are in the filter region\n");
19635             }
19636 #endif
19637
19638             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19639             return;
19640         }
19641     }
19642
19643     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19644
19645     if (opts.compNeedSecurityCheck)
19646     {
19647         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19648         return;
19649     }
19650
19651     /* Check if we tried to inline this method before */
19652
19653     if (methAttr & CORINFO_FLG_DONT_INLINE)
19654     {
19655         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19656         return;
19657     }
19658
19659     /* Cannot inline synchronized methods */
19660
19661     if (methAttr & CORINFO_FLG_SYNCH)
19662     {
19663         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19664         return;
19665     }
19666
19667     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19668
19669     if (methAttr & CORINFO_FLG_SECURITYCHECK)
19670     {
19671         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19672         return;
19673     }
19674
19675     /* Check legality of PInvoke callsite (for inlining of marshalling code) */
19676
19677     if (methAttr & CORINFO_FLG_PINVOKE)
19678     {
19679         // See comment in impCheckForPInvokeCall
19680         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
19681         if (!impCanPInvokeInlineCallSite(block))
19682         {
19683             inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
19684             return;
19685         }
19686     }
19687
19688     InlineCandidateInfo* inlineCandidateInfo = nullptr;
19689     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19690
19691     if (inlineResult.IsFailure())
19692     {
19693         return;
19694     }
19695
19696     // The old value should be NULL
19697     assert(call->gtInlineCandidateInfo == nullptr);
19698
19699     // The new value should not be NULL.
19700     assert(inlineCandidateInfo != nullptr);
19701     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19702
19703     call->gtInlineCandidateInfo = inlineCandidateInfo;
19704
19705     // Mark the call node as inline candidate.
19706     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19707
19708     // Let the strategy know there's another candidate.
19709     impInlineRoot()->m_inlineStrategy->NoteCandidate();
19710
19711     // Since we're not actually inlining yet, and this call site is
19712     // still just an inline candidate, there's nothing to report.
19713     inlineResult.SetReported();
19714 }
19715
19716 /******************************************************************************/
19717 // Returns true if the given intrinsic will be implemented by target-specific
19718 // instructions
19719
19720 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19721 {
19722 #if defined(_TARGET_XARCH_)
19723     switch (intrinsicId)
19724     {
19725         // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19726         // instructions to directly compute round/ceiling/floor.
19727         //
19728         // TODO: Because the x86 backend only targets SSE for floating-point code,
19729         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19730         //       implemented those intrinsics as x87 instructions). If this poses
19731         //       a CQ problem, it may be necessary to change the implementation of
19732         //       the helper calls to decrease call overhead or switch back to the
19733         //       x87 instructions. This is tracked by #7097.
19734         case CORINFO_INTRINSIC_Sqrt:
19735         case CORINFO_INTRINSIC_Abs:
19736             return true;
19737
19738         case CORINFO_INTRINSIC_Round:
19739         case CORINFO_INTRINSIC_Ceiling:
19740         case CORINFO_INTRINSIC_Floor:
19741             return compSupports(InstructionSet_SSE41);
19742
19743         default:
19744             return false;
19745     }
19746 #elif defined(_TARGET_ARM64_)
19747     switch (intrinsicId)
19748     {
19749         case CORINFO_INTRINSIC_Sqrt:
19750         case CORINFO_INTRINSIC_Abs:
19751         case CORINFO_INTRINSIC_Round:
19752         case CORINFO_INTRINSIC_Floor:
19753         case CORINFO_INTRINSIC_Ceiling:
19754             return true;
19755
19756         default:
19757             return false;
19758     }
19759 #elif defined(_TARGET_ARM_)
19760     switch (intrinsicId)
19761     {
19762         case CORINFO_INTRINSIC_Sqrt:
19763         case CORINFO_INTRINSIC_Abs:
19764         case CORINFO_INTRINSIC_Round:
19765             return true;
19766
19767         default:
19768             return false;
19769     }
19770 #else
19771     // TODO: This portion of logic is not implemented for other arch.
19772     // The reason for returning true is that on all other arch the only intrinsic
19773     // enabled are target intrinsics.
19774     return true;
19775 #endif
19776 }
19777
19778 /******************************************************************************/
19779 // Returns true if the given intrinsic will be implemented by calling System.Math
19780 // methods.
19781
19782 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19783 {
19784     // Currently, if a math intrinsic is not implemented by target-specific
19785     // instructions, it will be implemented by a System.Math call. In the
19786     // future, if we turn to implementing some of them with helper calls,
19787     // this predicate needs to be revisited.
19788     return !IsTargetIntrinsic(intrinsicId);
19789 }
19790
19791 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19792 {
19793     switch (intrinsicId)
19794     {
19795         case CORINFO_INTRINSIC_Sin:
19796         case CORINFO_INTRINSIC_Cbrt:
19797         case CORINFO_INTRINSIC_Sqrt:
19798         case CORINFO_INTRINSIC_Abs:
19799         case CORINFO_INTRINSIC_Cos:
19800         case CORINFO_INTRINSIC_Round:
19801         case CORINFO_INTRINSIC_Cosh:
19802         case CORINFO_INTRINSIC_Sinh:
19803         case CORINFO_INTRINSIC_Tan:
19804         case CORINFO_INTRINSIC_Tanh:
19805         case CORINFO_INTRINSIC_Asin:
19806         case CORINFO_INTRINSIC_Asinh:
19807         case CORINFO_INTRINSIC_Acos:
19808         case CORINFO_INTRINSIC_Acosh:
19809         case CORINFO_INTRINSIC_Atan:
19810         case CORINFO_INTRINSIC_Atan2:
19811         case CORINFO_INTRINSIC_Atanh:
19812         case CORINFO_INTRINSIC_Log10:
19813         case CORINFO_INTRINSIC_Pow:
19814         case CORINFO_INTRINSIC_Exp:
19815         case CORINFO_INTRINSIC_Ceiling:
19816         case CORINFO_INTRINSIC_Floor:
19817             return true;
19818         default:
19819             return false;
19820     }
19821 }
19822
19823 bool Compiler::IsMathIntrinsic(GenTree* tree)
19824 {
19825     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19826 }
19827
19828 //------------------------------------------------------------------------
19829 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19830 //   normal call
19831 //
19832 // Arguments:
19833 //     call -- the call node to examine/modify
19834 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19835 //     methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19836 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19837 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
19838 //
19839 // Notes:
19840 //     Virtual calls in IL will always "invoke" the base class method.
19841 //
19842 //     This transformation looks for evidence that the type of 'this'
19843 //     in the call is exactly known, is a final class or would invoke
19844 //     a final method, and if that and other safety checks pan out,
19845 //     modifies the call and the call info to create a direct call.
19846 //
19847 //     This transformation is initially done in the importer and not
19848 //     in some subsequent optimization pass because we want it to be
19849 //     upstream of inline candidate identification.
19850 //
19851 //     However, later phases may supply improved type information that
19852 //     can enable further devirtualization. We currently reinvoke this
19853 //     code after inlining, if the return value of the inlined call is
19854 //     the 'this obj' of a subsequent virtual call.
19855 //
19856 //     If devirtualization succeeds and the call's this object is the
19857 //     result of a box, the jit will ask the EE for the unboxed entry
19858 //     point. If this exists, the jit will see if it can rework the box
19859 //     to instead make a local copy. If that is doable, the call is
19860 //     updated to invoke the unboxed entry on the local copy.
19861 //
19862 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
19863                                    CORINFO_METHOD_HANDLE*  method,
19864                                    unsigned*               methodFlags,
19865                                    CORINFO_CONTEXT_HANDLE* contextHandle,
19866                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
19867 {
19868     assert(call != nullptr);
19869     assert(method != nullptr);
19870     assert(methodFlags != nullptr);
19871     assert(contextHandle != nullptr);
19872
19873     // This should be a virtual vtable or virtual stub call.
19874     assert(call->IsVirtual());
19875
19876     // Bail if not optimizing
19877     if (opts.MinOpts())
19878     {
19879         return;
19880     }
19881
19882     // Bail if debuggable codegen
19883     if (opts.compDbgCode)
19884     {
19885         return;
19886     }
19887
19888 #if defined(DEBUG)
19889     // Bail if devirt is disabled.
19890     if (JitConfig.JitEnableDevirtualization() == 0)
19891     {
19892         return;
19893     }
19894
19895     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19896 #endif // DEBUG
19897
19898     // Fetch information about the virtual method we're calling.
19899     CORINFO_METHOD_HANDLE baseMethod        = *method;
19900     unsigned              baseMethodAttribs = *methodFlags;
19901
19902     if (baseMethodAttribs == 0)
19903     {
19904         // For late devirt we may not have method attributes, so fetch them.
19905         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19906     }
19907     else
19908     {
19909 #if defined(DEBUG)
19910         // Validate that callInfo has up to date method flags
19911         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19912
19913         // All the base method attributes should agree, save that
19914         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19915         // because of concurrent jitting activity.
19916         //
19917         // Note we don't look at this particular flag bit below, and
19918         // later on (if we do try and inline) we will rediscover why
19919         // the method can't be inlined, so there's no danger here in
19920         // seeing this particular flag bit in different states between
19921         // the cached and fresh values.
19922         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19923         {
19924             assert(!"mismatched method attributes");
19925         }
19926 #endif // DEBUG
19927     }
19928
19929     // In R2R mode, we might see virtual stub calls to
19930     // non-virtuals. For instance cases where the non-virtual method
19931     // is in a different assembly but is called via CALLVIRT. For
19932     // verison resilience we must allow for the fact that the method
19933     // might become virtual in some update.
19934     //
19935     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19936     // regular call+nullcheck upstream, so we won't reach this
19937     // point.
19938     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19939     {
19940         assert(call->IsVirtualStub());
19941         assert(opts.IsReadyToRun());
19942         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19943         return;
19944     }
19945
19946     // See what we know about the type of 'this' in the call.
19947     GenTree*             thisObj       = call->gtCallObjp->gtEffectiveVal(false);
19948     GenTree*             actualThisObj = nullptr;
19949     bool                 isExact       = false;
19950     bool                 objIsNonNull  = false;
19951     CORINFO_CLASS_HANDLE objClass      = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19952
19953     // See if we have special knowlege that can get us a type or a better type.
19954     if ((objClass == nullptr) || !isExact)
19955     {
19956         // Walk back through any return expression placeholders
19957         actualThisObj = thisObj->gtRetExprVal();
19958
19959         // See if we landed on a call to a special intrinsic method
19960         if (actualThisObj->IsCall())
19961         {
19962             GenTreeCall* thisObjCall = actualThisObj->AsCall();
19963             if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19964             {
19965                 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19966                 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19967                 CORINFO_CLASS_HANDLE  specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19968                 if (specialObjClass != nullptr)
19969                 {
19970                     objClass     = specialObjClass;
19971                     isExact      = true;
19972                     objIsNonNull = true;
19973                 }
19974             }
19975         }
19976     }
19977
19978     // Bail if we know nothing.
19979     if (objClass == nullptr)
19980     {
19981         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19982         return;
19983     }
19984
19985     // Fetch information about the class that introduced the virtual method.
19986     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
19987     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19988
19989 #if !defined(FEATURE_CORECLR)
19990     // If base class is not beforefieldinit then devirtualizing may
19991     // cause us to miss a base class init trigger. Spec says we don't
19992     // need a trigger for ref class callvirts but desktop seems to
19993     // have one anyways. So defer.
19994     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19995     {
19996         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19997         return;
19998     }
19999 #endif // FEATURE_CORECLR
20000
20001     // Is the call an interface call?
20002     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
20003
20004     // If the objClass is sealed (final), then we may be able to devirtualize.
20005     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
20006     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
20007
20008 #if defined(DEBUG)
20009     const char* callKind       = isInterface ? "interface" : "virtual";
20010     const char* objClassNote   = "[?]";
20011     const char* objClassName   = "?objClass";
20012     const char* baseClassName  = "?baseClass";
20013     const char* baseMethodName = "?baseMethod";
20014
20015     if (verbose || doPrint)
20016     {
20017         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
20018         objClassName   = info.compCompHnd->getClassName(objClass);
20019         baseClassName  = info.compCompHnd->getClassName(baseClass);
20020         baseMethodName = eeGetMethodName(baseMethod, nullptr);
20021
20022         if (verbose)
20023         {
20024             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
20025                    "    class for 'this' is %s%s (attrib %08x)\n"
20026                    "    base method is %s::%s\n",
20027                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
20028         }
20029     }
20030 #endif // defined(DEBUG)
20031
20032     // Bail if obj class is an interface.
20033     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
20034     //   IL_021d:  ldloc.0
20035     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
20036     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
20037     {
20038         JITDUMP("--- obj class is interface, sorry\n");
20039         return;
20040     }
20041
20042     if (isInterface)
20043     {
20044         assert(call->IsVirtualStub());
20045         JITDUMP("--- base class is interface\n");
20046     }
20047
20048     // Fetch the method that would be called based on the declared type of 'this'
20049     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
20050     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
20051
20052     // If we failed to get a handle, we can't devirtualize.  This can
20053     // happen when prejitting, if the devirtualization crosses
20054     // servicing bubble boundaries.
20055     if (derivedMethod == nullptr)
20056     {
20057         JITDUMP("--- no derived method, sorry\n");
20058         return;
20059     }
20060
20061     // Fetch method attributes to see if method is marked final.
20062     DWORD      derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
20063     const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
20064
20065 #if defined(DEBUG)
20066     const char* derivedClassName  = "?derivedClass";
20067     const char* derivedMethodName = "?derivedMethod";
20068
20069     const char* note = "speculative";
20070     if (isExact)
20071     {
20072         note = "exact";
20073     }
20074     else if (objClassIsFinal)
20075     {
20076         note = "final class";
20077     }
20078     else if (derivedMethodIsFinal)
20079     {
20080         note = "final method";
20081     }
20082
20083     if (verbose || doPrint)
20084     {
20085         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
20086         if (verbose)
20087         {
20088             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
20089             gtDispTree(call);
20090         }
20091     }
20092 #endif // defined(DEBUG)
20093
20094     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
20095     {
20096         // Type is not exact, and neither class or method is final.
20097         //
20098         // We could speculatively devirtualize, but there's no
20099         // reason to believe the derived method is the one that
20100         // is likely to be invoked.
20101         //
20102         // If there's currently no further overriding (that is, at
20103         // the time of jitting, objClass has no subclasses that
20104         // override this method), then perhaps we'd be willing to
20105         // make a bet...?
20106         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
20107         return;
20108     }
20109
20110     // For interface calls we must have an exact type or final class.
20111     if (isInterface && !isExact && !objClassIsFinal)
20112     {
20113         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
20114         return;
20115     }
20116
20117     JITDUMP("    %s; can devirtualize\n", note);
20118
20119     // Make the updates.
20120     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
20121     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
20122     call->gtCallMethHnd = derivedMethod;
20123     call->gtCallType    = CT_USER_FUNC;
20124     call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
20125
20126     // Virtual calls include an implicit null check, which we may
20127     // now need to make explicit.
20128     if (!objIsNonNull)
20129     {
20130         call->gtFlags |= GTF_CALL_NULLCHECK;
20131     }
20132
20133     // Clear the inline candidate info (may be non-null since
20134     // it's a union field used for other things by virtual
20135     // stubs)
20136     call->gtInlineCandidateInfo = nullptr;
20137
20138 #if defined(DEBUG)
20139     if (verbose)
20140     {
20141         printf("... after devirt...\n");
20142         gtDispTree(call);
20143     }
20144
20145     if (doPrint)
20146     {
20147         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
20148                baseMethodName, derivedClassName, derivedMethodName, note);
20149     }
20150 #endif // defined(DEBUG)
20151
20152     // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
20153     if (thisObj->IsBoxedValue())
20154     {
20155         JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
20156
20157         // Note for some shared methods the unboxed entry point requires an extra parameter.
20158         bool                  requiresInstMethodTableArg = false;
20159         CORINFO_METHOD_HANDLE unboxedEntryMethod =
20160             info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
20161
20162         if (unboxedEntryMethod != nullptr)
20163         {
20164             // Since the call is the only consumer of the box, we know the box can't escape
20165             // since it is being passed an interior pointer.
20166             //
20167             // So, revise the box to simply create a local copy, use the address of that copy
20168             // as the this pointer, and update the entry point to the unboxed entry.
20169             //
20170             // Ideally, we then inline the boxed method and and if it turns out not to modify
20171             // the copy, we can undo the copy too.
20172             if (requiresInstMethodTableArg)
20173             {
20174                 // Perform a trial box removal and ask for the type handle tree.
20175                 JITDUMP("Unboxed entry needs method table arg...\n");
20176                 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
20177
20178                 if (methodTableArg != nullptr)
20179                 {
20180                     // If that worked, turn the box into a copy to a local var
20181                     JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
20182                     GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20183
20184                     if (localCopyThis != nullptr)
20185                     {
20186                         // Pass the local var as this and the type handle as a new arg
20187                         JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
20188                         call->gtCallObjp = localCopyThis;
20189                         call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20190
20191                         // Prepend for R2L arg passing or empty L2R passing
20192                         if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
20193                         {
20194                             call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
20195                         }
20196                         // Append for non-empty L2R
20197                         else
20198                         {
20199                             GenTreeArgList* beforeArg = call->gtCallArgs;
20200                             while (beforeArg->Rest() != nullptr)
20201                             {
20202                                 beforeArg = beforeArg->Rest();
20203                             }
20204
20205                             beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
20206                         }
20207
20208                         call->gtCallMethHnd = unboxedEntryMethod;
20209                         derivedMethod       = unboxedEntryMethod;
20210
20211                         // Method attributes will differ because unboxed entry point is shared
20212                         const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
20213                         JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
20214                                 unboxedMethodAttribs);
20215                         derivedMethodAttribs = unboxedMethodAttribs;
20216                     }
20217                     else
20218                     {
20219                         JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
20220                     }
20221                 }
20222                 else
20223                 {
20224                     JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
20225                 }
20226             }
20227             else
20228             {
20229                 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
20230                 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20231
20232                 if (localCopyThis != nullptr)
20233                 {
20234                     JITDUMP("Success! invoking unboxed entry point on local copy\n");
20235                     call->gtCallObjp    = localCopyThis;
20236                     call->gtCallMethHnd = unboxedEntryMethod;
20237                     call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20238                     derivedMethod = unboxedEntryMethod;
20239                 }
20240                 else
20241                 {
20242                     JITDUMP("Sorry, failed to undo the box\n");
20243                 }
20244             }
20245         }
20246         else
20247         {
20248             // Many of the low-level methods on value classes won't have unboxed entries,
20249             // as they need access to the type of the object.
20250             //
20251             // Note this may be a cue for us to stack allocate the boxed object, since
20252             // we probably know that these objects don't escape.
20253             JITDUMP("Sorry, failed to find unboxed entry point\n");
20254         }
20255     }
20256
20257     // Fetch the class that introduced the derived method.
20258     //
20259     // Note this may not equal objClass, if there is a
20260     // final method that objClass inherits.
20261     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
20262
20263     // Need to update call info too. This is fragile
20264     // but hopefully the derived method conforms to
20265     // the base in most other ways.
20266     *method        = derivedMethod;
20267     *methodFlags   = derivedMethodAttribs;
20268     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
20269
20270     // Update context handle.
20271     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
20272     {
20273         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
20274     }
20275
20276 #ifdef FEATURE_READYTORUN_COMPILER
20277     if (opts.IsReadyToRun())
20278     {
20279         // For R2R, getCallInfo triggers bookkeeping on the zap
20280         // side so we need to call it here.
20281         //
20282         // First, cons up a suitable resolved token.
20283         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
20284
20285         derivedResolvedToken.tokenScope   = info.compScopeHnd;
20286         derivedResolvedToken.tokenContext = *contextHandle;
20287         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
20288         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
20289         derivedResolvedToken.hClass       = derivedClass;
20290         derivedResolvedToken.hMethod      = derivedMethod;
20291
20292         // Look up the new call info.
20293         CORINFO_CALL_INFO derivedCallInfo;
20294         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
20295
20296         // Update the call.
20297         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
20298         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
20299         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
20300     }
20301 #endif // FEATURE_READYTORUN_COMPILER
20302 }
20303
20304 //------------------------------------------------------------------------
20305 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
20306 //   to an intrinsic returns an exact type
20307 //
20308 // Arguments:
20309 //     methodHnd -- handle for the special intrinsic method
20310 //
20311 // Returns:
20312 //     Exact class handle returned by the intrinsic call, if known.
20313 //     Nullptr if not known, or not likely to lead to beneficial optimization.
20314
20315 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
20316 {
20317     JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
20318
20319     CORINFO_CLASS_HANDLE result = nullptr;
20320
20321     // See what intrinisc we have...
20322     const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
20323     switch (ni)
20324     {
20325         case NI_System_Collections_Generic_EqualityComparer_get_Default:
20326         {
20327             // Expect one class generic parameter; figure out which it is.
20328             CORINFO_SIG_INFO sig;
20329             info.compCompHnd->getMethodSig(methodHnd, &sig);
20330             assert(sig.sigInst.classInstCount == 1);
20331             CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
20332             assert(typeHnd != nullptr);
20333
20334             // Lookup can incorrect when we have __Canon as it won't appear
20335             // to implement any interface types.
20336             //
20337             // And if we do not have a final type, devirt & inlining is
20338             // unlikely to result in much simplification.
20339             //
20340             // We can use CORINFO_FLG_FINAL to screen out both of these cases.
20341             const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
20342             const bool  isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
20343
20344             if (isFinalType)
20345             {
20346                 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
20347                 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
20348                         result != nullptr ? eeGetClassName(result) : "unknown");
20349             }
20350             else
20351             {
20352                 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
20353             }
20354
20355             break;
20356         }
20357
20358         default:
20359         {
20360             JITDUMP("This special intrinsic not handled, sorry...\n");
20361             break;
20362         }
20363     }
20364
20365     return result;
20366 }
20367
20368 //------------------------------------------------------------------------
20369 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
20370 //
20371 // Arguments:
20372 //    token - init value for the allocated token.
20373 //
20374 // Return Value:
20375 //    pointer to token into jit-allocated memory.
20376 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
20377 {
20378     CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
20379     *memory                        = token;
20380     return memory;
20381 }
20382
20383 //------------------------------------------------------------------------
20384 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
20385 //
20386 class SpillRetExprHelper
20387 {
20388 public:
20389     SpillRetExprHelper(Compiler* comp) : comp(comp)
20390     {
20391     }
20392
20393     void StoreRetExprResultsInArgs(GenTreeCall* call)
20394     {
20395         GenTreeArgList** pArgs = &call->gtCallArgs;
20396         if (*pArgs != nullptr)
20397         {
20398             comp->fgWalkTreePre((GenTree**)pArgs, SpillRetExprVisitor, this);
20399         }
20400
20401         GenTree** pThisArg = &call->gtCallObjp;
20402         if (*pThisArg != nullptr)
20403         {
20404             comp->fgWalkTreePre(pThisArg, SpillRetExprVisitor, this);
20405         }
20406     }
20407
20408 private:
20409     static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
20410     {
20411         assert((pTree != nullptr) && (*pTree != nullptr));
20412         GenTree* tree = *pTree;
20413         if ((tree->gtFlags & GTF_CALL) == 0)
20414         {
20415             // Trees with ret_expr are marked as GTF_CALL.
20416             return Compiler::WALK_SKIP_SUBTREES;
20417         }
20418         if (tree->OperGet() == GT_RET_EXPR)
20419         {
20420             SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
20421             walker->StoreRetExprAsLocalVar(pTree);
20422         }
20423         return Compiler::WALK_CONTINUE;
20424     }
20425
20426     void StoreRetExprAsLocalVar(GenTree** pRetExpr)
20427     {
20428         GenTree* retExpr = *pRetExpr;
20429         assert(retExpr->OperGet() == GT_RET_EXPR);
20430         JITDUMP("Store return expression %u  as a local var.\n", retExpr->gtTreeID);
20431         unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
20432         comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
20433         *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
20434     }
20435
20436 private:
20437     Compiler* comp;
20438 };
20439
20440 //------------------------------------------------------------------------
20441 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
20442 //                         Spill ret_expr in the call node, because they can't be cloned.
20443 //
20444 // Arguments:
20445 //    call - fat calli candidate
20446 //
20447 void Compiler::addFatPointerCandidate(GenTreeCall* call)
20448 {
20449     setMethodHasFatPointer();
20450     call->SetFatPointerCandidate();
20451     SpillRetExprHelper helper(this);
20452     helper.StoreRetExprResultsInArgs(call);
20453 }