do not use = {nullptr} initialization. (#11153)
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (lclTyp > TYP_INT)
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTreePtr tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTreePtr tree   = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTreePtr firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTreePtr stmtPrev = stmtBefore->gtPrev;
676     stmt->gtPrev        = stmtPrev;
677     stmt->gtNext        = stmtBefore;
678     stmtPrev->gtNext    = stmt;
679     stmtBefore->gtPrev  = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTreePtr expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTreePtr expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTreePtr  val,
729                                 unsigned    curLevel,
730                                 GenTreePtr* pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTreePtr asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTreePtr           val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTreePtr*          pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTreePtr asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned          count,
829                                      unsigned*         flagsPtr,
830                                      CORINFO_SIG_INFO* sig,
831                                      GenTreeArgList*   prefixTree)
832 {
833     assert(sig == nullptr || count == sig->numArgs);
834
835     unsigned             flags = 0;
836     CORINFO_CLASS_HANDLE structType;
837     GenTreeArgList*      treeList;
838
839     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
840     {
841         treeList = nullptr;
842     }
843     else
844     { // ARG_ORDER_L2R
845         treeList = prefixTree;
846     }
847
848     while (count--)
849     {
850         StackEntry se   = impPopStack();
851         typeInfo   ti   = se.seTypeInfo;
852         GenTreePtr temp = se.val;
853
854         if (varTypeIsStruct(temp))
855         {
856             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
857             assert(ti.IsType(TI_STRUCT));
858             structType = ti.GetClassHandleForValueClass();
859             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
860         }
861
862         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
863         flags |= temp->gtFlags;
864         treeList = gtNewListNode(temp, treeList);
865     }
866
867     *flagsPtr = flags;
868
869     if (sig != nullptr)
870     {
871         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
872             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
873         {
874             // Make sure that all valuetypes (including enums) that we push are loaded.
875             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
876             // all valuetypes in the method signature are already loaded.
877             // We need to be able to find the size of the valuetypes, but we cannot
878             // do a class-load from within GC.
879             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
880         }
881
882         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
883         CORINFO_CLASS_HANDLE    argClass;
884         CORINFO_CLASS_HANDLE    argRealClass;
885         GenTreeArgList*         args;
886         unsigned                sigSize;
887
888         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
889         {
890             PREFIX_ASSUME(args != nullptr);
891
892             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
893
894             // insert implied casts (from float to double or double to float)
895
896             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
897             {
898                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
899             }
900             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
901             {
902                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
903             }
904
905             // insert any widening or narrowing casts for backwards compatibility
906
907             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
908
909             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
910                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
911             {
912                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
913                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
914                 // primitive types.
915                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
916                 // details).
917                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
918                 {
919                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
920                 }
921
922                 // Make sure that all valuetypes (including enums) that we push are loaded.
923                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
924                 // all valuetypes in the method signature are already loaded.
925                 // We need to be able to find the size of the valuetypes, but we cannot
926                 // do a class-load from within GC.
927                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
928             }
929
930             argLst = info.compCompHnd->getArgNext(argLst);
931         }
932     }
933
934     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
935     {
936         // Prepend the prefixTree
937
938         // Simple in-place reversal to place treeList
939         // at the end of a reversed prefixTree
940         while (prefixTree != nullptr)
941         {
942             GenTreeArgList* next = prefixTree->Rest();
943             prefixTree->Rest()   = treeList;
944             treeList             = prefixTree;
945             prefixTree           = next;
946         }
947     }
948     return treeList;
949 }
950
951 /*****************************************************************************
952  *
953  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
954  *  The first "skipReverseCount" items are not reversed.
955  */
956
957 GenTreeArgList* Compiler::impPopRevList(unsigned          count,
958                                         unsigned*         flagsPtr,
959                                         CORINFO_SIG_INFO* sig,
960                                         unsigned          skipReverseCount)
961
962 {
963     assert(skipReverseCount <= count);
964
965     GenTreeArgList* list = impPopList(count, flagsPtr, sig);
966
967     // reverse the list
968     if (list == nullptr || skipReverseCount == count)
969     {
970         return list;
971     }
972
973     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
974     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
975
976     if (skipReverseCount == 0)
977     {
978         ptr = list;
979     }
980     else
981     {
982         lastSkipNode = list;
983         // Get to the first node that needs to be reversed
984         for (unsigned i = 0; i < skipReverseCount - 1; i++)
985         {
986             lastSkipNode = lastSkipNode->Rest();
987         }
988
989         PREFIX_ASSUME(lastSkipNode != nullptr);
990         ptr = lastSkipNode->Rest();
991     }
992
993     GenTreeArgList* reversedList = nullptr;
994
995     do
996     {
997         GenTreeArgList* tmp = ptr->Rest();
998         ptr->Rest()         = reversedList;
999         reversedList        = ptr;
1000         ptr                 = tmp;
1001     } while (ptr != nullptr);
1002
1003     if (skipReverseCount)
1004     {
1005         lastSkipNode->Rest() = reversedList;
1006         return list;
1007     }
1008     else
1009     {
1010         return reversedList;
1011     }
1012 }
1013
1014 /*****************************************************************************
1015    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1016    class of type 'clsHnd'.  It returns the tree that should be appended to the
1017    statement list that represents the assignment.
1018    Temp assignments may be appended to impTreeList if spilling is necessary.
1019    curLevel is the stack level for which a spill may be being done.
1020  */
1021
1022 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1023                                      GenTreePtr           src,
1024                                      CORINFO_CLASS_HANDLE structHnd,
1025                                      unsigned             curLevel,
1026                                      GenTreePtr*          pAfterStmt, /* = NULL */
1027                                      BasicBlock*          block       /* = NULL */
1028                                      )
1029 {
1030     assert(varTypeIsStruct(dest));
1031
1032     while (dest->gtOper == GT_COMMA)
1033     {
1034         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1035
1036         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1037         if (pAfterStmt)
1038         {
1039             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1040         }
1041         else
1042         {
1043             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1044         }
1045
1046         // set dest to the second thing
1047         dest = dest->gtOp.gtOp2;
1048     }
1049
1050     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1051            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1052
1053     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1054         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1055     {
1056         // Make this a NOP
1057         return gtNewNothingNode();
1058     }
1059
1060     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1061     // or re-creating a Blk node if it is.
1062     GenTreePtr destAddr;
1063
1064     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1065     {
1066         destAddr = dest->gtOp.gtOp1;
1067     }
1068     else
1069     {
1070         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1071     }
1072
1073     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1074 }
1075
1076 /*****************************************************************************/
1077
1078 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1079                                         GenTreePtr           src,
1080                                         CORINFO_CLASS_HANDLE structHnd,
1081                                         unsigned             curLevel,
1082                                         GenTreePtr*          pAfterStmt, /* = NULL */
1083                                         BasicBlock*          block       /* = NULL */
1084                                         )
1085 {
1086     var_types  destType;
1087     GenTreePtr dest      = nullptr;
1088     unsigned   destFlags = 0;
1089
1090 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1091     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1092     // TODO-ARM-BUG: Does ARM need this?
1093     // TODO-ARM64-BUG: Does ARM64 need this?
1094     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1095            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1096            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1097            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1098 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1099     assert(varTypeIsStruct(src));
1100
1101     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1102            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1103            src->gtOper == GT_COMMA ||
1104            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1105 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1106     if (destAddr->OperGet() == GT_ADDR)
1107     {
1108         GenTree* destNode = destAddr->gtGetOp1();
1109         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1110         // will be morphed, don't insert an OBJ(ADDR).
1111         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1112 #ifndef LEGACY_BACKEND
1113             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1114 #endif // !LEGACY_BACKEND
1115                 )
1116         {
1117             dest = destNode;
1118         }
1119         destType = destNode->TypeGet();
1120     }
1121     else
1122     {
1123         destType = src->TypeGet();
1124     }
1125
1126     var_types asgType = src->TypeGet();
1127
1128     if (src->gtOper == GT_CALL)
1129     {
1130         if (src->AsCall()->TreatAsHasRetBufArg(this))
1131         {
1132             // Case of call returning a struct via hidden retbuf arg
1133
1134             // insert the return value buffer into the argument list as first byref parameter
1135             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1136
1137             // now returns void, not a struct
1138             src->gtType = TYP_VOID;
1139
1140             // return the morphed call node
1141             return src;
1142         }
1143         else
1144         {
1145             // Case of call returning a struct in one or more registers.
1146
1147             var_types returnType = (var_types)src->gtCall.gtReturnType;
1148
1149             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1150             src->gtType = genActualType(returnType);
1151
1152             // First we try to change this to "LclVar/LclFld = call"
1153             //
1154             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1155             {
1156                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1157                 // That is, the IR will be of the form lclVar = call for multi-reg return
1158                 //
1159                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1160                 if (src->AsCall()->HasMultiRegRetVal())
1161                 {
1162                     // Mark the struct LclVar as used in a MultiReg return context
1163                     //  which currently makes it non promotable.
1164                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1165                     // handle multireg returns.
1166                     lcl->gtFlags |= GTF_DONT_CSE;
1167                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1168                 }
1169                 else // The call result is not a multireg return
1170                 {
1171                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1172                     lcl->ChangeOper(GT_LCL_FLD);
1173                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1174                 }
1175
1176                 lcl->gtType = src->gtType;
1177                 asgType     = src->gtType;
1178                 dest        = lcl;
1179
1180 #if defined(_TARGET_ARM_)
1181                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1182                 // but that method has not been updadted to include ARM.
1183                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1184                 lcl->gtFlags |= GTF_DONT_CSE;
1185 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1186                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1187                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1188
1189                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1190                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191                 // handle multireg returns.
1192                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1193                 // non-multireg returns.
1194                 lcl->gtFlags |= GTF_DONT_CSE;
1195                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1196 #endif
1197             }
1198             else // we don't have a GT_ADDR of a GT_LCL_VAR
1199             {
1200                 // !!! The destination could be on stack. !!!
1201                 // This flag will let us choose the correct write barrier.
1202                 asgType   = returnType;
1203                 destFlags = GTF_IND_TGTANYWHERE;
1204             }
1205         }
1206     }
1207     else if (src->gtOper == GT_RET_EXPR)
1208     {
1209         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1210         noway_assert(call->gtOper == GT_CALL);
1211
1212         if (call->HasRetBufArg())
1213         {
1214             // insert the return value buffer into the argument list as first byref parameter
1215             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1216
1217             // now returns void, not a struct
1218             src->gtType  = TYP_VOID;
1219             call->gtType = TYP_VOID;
1220
1221             // We already have appended the write to 'dest' GT_CALL's args
1222             // So now we just return an empty node (pruning the GT_RET_EXPR)
1223             return src;
1224         }
1225         else
1226         {
1227             // Case of inline method returning a struct in one or more registers.
1228             //
1229             var_types returnType = (var_types)call->gtReturnType;
1230
1231             // We won't need a return buffer
1232             asgType      = returnType;
1233             src->gtType  = genActualType(returnType);
1234             call->gtType = src->gtType;
1235
1236             // If we've changed the type, and it no longer matches a local destination,
1237             // we must use an indirection.
1238             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1239             {
1240                 dest = nullptr;
1241             }
1242
1243             // !!! The destination could be on stack. !!!
1244             // This flag will let us choose the correct write barrier.
1245             destFlags = GTF_IND_TGTANYWHERE;
1246         }
1247     }
1248     else if (src->OperIsBlk())
1249     {
1250         asgType = impNormStructType(structHnd);
1251         if (src->gtOper == GT_OBJ)
1252         {
1253             assert(src->gtObj.gtClass == structHnd);
1254         }
1255     }
1256     else if (src->gtOper == GT_INDEX)
1257     {
1258         asgType = impNormStructType(structHnd);
1259         assert(src->gtIndex.gtStructElemClass == structHnd);
1260     }
1261     else if (src->gtOper == GT_MKREFANY)
1262     {
1263         // Since we are assigning the result of a GT_MKREFANY,
1264         // "destAddr" must point to a refany.
1265
1266         GenTreePtr destAddrClone;
1267         destAddr =
1268             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1269
1270         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1271         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1272         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1273         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1274         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1275         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1276         GenTreePtr typeSlot =
1277             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1278
1279         // append the assign of the pointer value
1280         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1281         if (pAfterStmt)
1282         {
1283             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1284         }
1285         else
1286         {
1287             impAppendTree(asg, curLevel, impCurStmtOffs);
1288         }
1289
1290         // return the assign of the type value, to be appended
1291         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1292     }
1293     else if (src->gtOper == GT_COMMA)
1294     {
1295         // The second thing is the struct or its address.
1296         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1297         if (pAfterStmt)
1298         {
1299             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1300         }
1301         else
1302         {
1303             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1304         }
1305
1306         // Evaluate the second thing using recursion.
1307         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1308     }
1309     else if (src->IsLocal())
1310     {
1311         asgType = src->TypeGet();
1312     }
1313     else if (asgType == TYP_STRUCT)
1314     {
1315         asgType     = impNormStructType(structHnd);
1316         src->gtType = asgType;
1317 #ifdef LEGACY_BACKEND
1318         if (asgType == TYP_STRUCT)
1319         {
1320             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1321             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1322         }
1323 #endif
1324     }
1325     if (dest == nullptr)
1326     {
1327         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1328         // if this is a known struct type.
1329         if (asgType == TYP_STRUCT)
1330         {
1331             dest = gtNewObjNode(structHnd, destAddr);
1332             gtSetObjGcInfo(dest->AsObj());
1333             // Although an obj as a call argument was always assumed to be a globRef
1334             // (which is itself overly conservative), that is not true of the operands
1335             // of a block assignment.
1336             dest->gtFlags &= ~GTF_GLOB_REF;
1337             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1338         }
1339         else if (varTypeIsStruct(asgType))
1340         {
1341             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1342         }
1343         else
1344         {
1345             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1346         }
1347     }
1348     else
1349     {
1350         dest->gtType = asgType;
1351     }
1352
1353     dest->gtFlags |= destFlags;
1354     destFlags = dest->gtFlags;
1355
1356     // return an assignment node, to be appended
1357     GenTree* asgNode = gtNewAssignNode(dest, src);
1358     gtBlockOpInit(asgNode, dest, src, false);
1359
1360     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1361     // of assignments.
1362     if ((destFlags & GTF_DONT_CSE) == 0)
1363     {
1364         dest->gtFlags &= ~(GTF_DONT_CSE);
1365     }
1366     return asgNode;
1367 }
1368
1369 /*****************************************************************************
1370    Given a struct value, and the class handle for that structure, return
1371    the expression for the address for that structure value.
1372
1373    willDeref - does the caller guarantee to dereference the pointer.
1374 */
1375
1376 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1377                                       CORINFO_CLASS_HANDLE structHnd,
1378                                       unsigned             curLevel,
1379                                       bool                 willDeref)
1380 {
1381     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1382
1383     var_types type = structVal->TypeGet();
1384
1385     genTreeOps oper = structVal->gtOper;
1386
1387     if (oper == GT_OBJ && willDeref)
1388     {
1389         assert(structVal->gtObj.gtClass == structHnd);
1390         return (structVal->gtObj.Addr());
1391     }
1392     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1393     {
1394         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1395
1396         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1397
1398         // The 'return value' is now the temp itself
1399
1400         type            = genActualType(lvaTable[tmpNum].TypeGet());
1401         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1402         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1403         return temp;
1404     }
1405     else if (oper == GT_COMMA)
1406     {
1407         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1408
1409         GenTreePtr oldTreeLast = impTreeLast;
1410         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1411         structVal->gtType      = TYP_BYREF;
1412
1413         if (oldTreeLast != impTreeLast)
1414         {
1415             // Some temp assignment statement was placed on the statement list
1416             // for Op2, but that would be out of order with op1, so we need to
1417             // spill op1 onto the statement list after whatever was last
1418             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1419             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1420             structVal->gtOp.gtOp1 = gtNewNothingNode();
1421         }
1422
1423         return (structVal);
1424     }
1425
1426     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1427 }
1428
1429 //------------------------------------------------------------------------
1430 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1431 //                    and optionally determine the GC layout of the struct.
1432 //
1433 // Arguments:
1434 //    structHnd       - The class handle for the struct type of interest.
1435 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1436 //                      into which the gcLayout will be written.
1437 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1438 //                      which will be set to the number of GC fields in the struct.
1439 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1440 //                      type, set to the SIMD base type
1441 //
1442 // Return Value:
1443 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1444 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1445 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1446 //
1447 // Assumptions:
1448 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1449 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1450 //
1451 // Notes:
1452 //    Normalizing the type involves examining the struct type to determine if it should
1453 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1454 //    for full enregistration, e.g. TYP_SIMD16.
1455
1456 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1457                                       BYTE*                gcLayout,
1458                                       unsigned*            pNumGCVars,
1459                                       var_types*           pSimdBaseType)
1460 {
1461     assert(structHnd != NO_CLASS_HANDLE);
1462
1463     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1464     var_types   structType  = TYP_STRUCT;
1465
1466     // On coreclr the check for GC includes a "may" to account for the special
1467     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1468     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1469     // pointer.
1470     const bool mayContainGCPtrs =
1471         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1472
1473 #ifdef FEATURE_SIMD
1474     // Check to see if this is a SIMD type.
1475     if (featureSIMD && !mayContainGCPtrs)
1476     {
1477         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1478
1479         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1480         {
1481             unsigned int sizeBytes;
1482             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1483             if (simdBaseType != TYP_UNKNOWN)
1484             {
1485                 assert(sizeBytes == originalSize);
1486                 structType = getSIMDTypeForSize(sizeBytes);
1487                 if (pSimdBaseType != nullptr)
1488                 {
1489                     *pSimdBaseType = simdBaseType;
1490                 }
1491                 // Also indicate that we use floating point registers.
1492                 compFloatingPointUsed = true;
1493             }
1494         }
1495     }
1496 #endif // FEATURE_SIMD
1497
1498     // Fetch GC layout info if requested
1499     if (gcLayout != nullptr)
1500     {
1501         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1502
1503         // Verify that the quick test up above via the class attributes gave a
1504         // safe view of the type's GCness.
1505         //
1506         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1507         // does not report any gc fields.
1508
1509         assert(mayContainGCPtrs || (numGCVars == 0));
1510
1511         if (pNumGCVars != nullptr)
1512         {
1513             *pNumGCVars = numGCVars;
1514         }
1515     }
1516     else
1517     {
1518         // Can't safely ask for number of GC pointers without also
1519         // asking for layout.
1520         assert(pNumGCVars == nullptr);
1521     }
1522
1523     return structType;
1524 }
1525
1526 //****************************************************************************
1527 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1528 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1529 //
1530 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1531                                       CORINFO_CLASS_HANDLE structHnd,
1532                                       unsigned             curLevel,
1533                                       bool                 forceNormalization /*=false*/)
1534 {
1535     assert(forceNormalization || varTypeIsStruct(structVal));
1536     assert(structHnd != NO_CLASS_HANDLE);
1537     var_types structType = structVal->TypeGet();
1538     bool      makeTemp   = false;
1539     if (structType == TYP_STRUCT)
1540     {
1541         structType = impNormStructType(structHnd);
1542     }
1543     bool                 alreadyNormalized = false;
1544     GenTreeLclVarCommon* structLcl         = nullptr;
1545
1546     genTreeOps oper = structVal->OperGet();
1547     switch (oper)
1548     {
1549         // GT_RETURN and GT_MKREFANY don't capture the handle.
1550         case GT_RETURN:
1551             break;
1552         case GT_MKREFANY:
1553             alreadyNormalized = true;
1554             break;
1555
1556         case GT_CALL:
1557             structVal->gtCall.gtRetClsHnd = structHnd;
1558             makeTemp                      = true;
1559             break;
1560
1561         case GT_RET_EXPR:
1562             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1563             makeTemp                         = true;
1564             break;
1565
1566         case GT_ARGPLACE:
1567             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1568             break;
1569
1570         case GT_INDEX:
1571             // This will be transformed to an OBJ later.
1572             alreadyNormalized                    = true;
1573             structVal->gtIndex.gtStructElemClass = structHnd;
1574             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1575             break;
1576
1577         case GT_FIELD:
1578             // Wrap it in a GT_OBJ.
1579             structVal->gtType = structType;
1580             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1581             break;
1582
1583         case GT_LCL_VAR:
1584         case GT_LCL_FLD:
1585             structLcl = structVal->AsLclVarCommon();
1586             // Wrap it in a GT_OBJ.
1587             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1588             __fallthrough;
1589
1590         case GT_OBJ:
1591         case GT_BLK:
1592         case GT_DYN_BLK:
1593         case GT_ASG:
1594             // These should already have the appropriate type.
1595             assert(structVal->gtType == structType);
1596             alreadyNormalized = true;
1597             break;
1598
1599         case GT_IND:
1600             assert(structVal->gtType == structType);
1601             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1602             alreadyNormalized = true;
1603             break;
1604
1605 #ifdef FEATURE_SIMD
1606         case GT_SIMD:
1607             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1608             break;
1609 #endif // FEATURE_SIMD
1610
1611         case GT_COMMA:
1612         {
1613             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1614             GenTree* blockNode = structVal->gtOp.gtOp2;
1615             assert(blockNode->gtType == structType);
1616
1617             // Is this GT_COMMA(op1, GT_COMMA())?
1618             GenTree* parent = structVal;
1619             if (blockNode->OperGet() == GT_COMMA)
1620             {
1621                 // Find the last node in the comma chain.
1622                 do
1623                 {
1624                     assert(blockNode->gtType == structType);
1625                     parent    = blockNode;
1626                     blockNode = blockNode->gtOp.gtOp2;
1627                 } while (blockNode->OperGet() == GT_COMMA);
1628             }
1629
1630 #ifdef FEATURE_SIMD
1631             if (blockNode->OperGet() == GT_SIMD)
1632             {
1633                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1634                 alreadyNormalized  = true;
1635             }
1636             else
1637 #endif
1638             {
1639                 assert(blockNode->OperIsBlk());
1640
1641                 // Sink the GT_COMMA below the blockNode addr.
1642                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1643                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1644                 //
1645                 // In case of a chained GT_COMMA case, we sink the last
1646                 // GT_COMMA below the blockNode addr.
1647                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1648                 assert(blockNodeAddr->gtType == TYP_BYREF);
1649                 GenTree* commaNode    = parent;
1650                 commaNode->gtType     = TYP_BYREF;
1651                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1652                 blockNode->gtOp.gtOp1 = commaNode;
1653                 if (parent == structVal)
1654                 {
1655                     structVal = blockNode;
1656                 }
1657                 alreadyNormalized = true;
1658             }
1659         }
1660         break;
1661
1662         default:
1663             assert(!"Unexpected node in impNormStructVal()");
1664             break;
1665     }
1666     structVal->gtType  = structType;
1667     GenTree* structObj = structVal;
1668
1669     if (!alreadyNormalized || forceNormalization)
1670     {
1671         if (makeTemp)
1672         {
1673             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1674
1675             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1676
1677             // The structVal is now the temp itself
1678
1679             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1680             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1681             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1682         }
1683         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1684         {
1685             // Wrap it in a GT_OBJ
1686             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1687         }
1688     }
1689
1690     if (structLcl != nullptr)
1691     {
1692         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1693         // so we don't set GTF_EXCEPT here.
1694         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1695         {
1696             structObj->gtFlags &= ~GTF_GLOB_REF;
1697         }
1698     }
1699     else
1700     {
1701         // In general a OBJ is an indirection and could raise an exception.
1702         structObj->gtFlags |= GTF_EXCEPT;
1703     }
1704     return (structObj);
1705 }
1706
1707 /******************************************************************************/
1708 // Given a type token, generate code that will evaluate to the correct
1709 // handle representation of that token (type handle, field handle, or method handle)
1710 //
1711 // For most cases, the handle is determined at compile-time, and the code
1712 // generated is simply an embedded handle.
1713 //
1714 // Run-time lookup is required if the enclosing method is shared between instantiations
1715 // and the token refers to formal type parameters whose instantiation is not known
1716 // at compile-time.
1717 //
1718 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1719                                       BOOL*                   pRuntimeLookup /* = NULL */,
1720                                       BOOL                    mustRestoreHandle /* = FALSE */,
1721                                       BOOL                    importParent /* = FALSE */)
1722 {
1723     assert(!fgGlobalMorph);
1724
1725     CORINFO_GENERICHANDLE_RESULT embedInfo;
1726     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1727
1728     if (pRuntimeLookup)
1729     {
1730         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1731     }
1732
1733     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1734     {
1735         switch (embedInfo.handleType)
1736         {
1737             case CORINFO_HANDLETYPE_CLASS:
1738                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1739                 break;
1740
1741             case CORINFO_HANDLETYPE_METHOD:
1742                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1743                 break;
1744
1745             case CORINFO_HANDLETYPE_FIELD:
1746                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1747                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1748                 break;
1749
1750             default:
1751                 break;
1752         }
1753     }
1754
1755     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1756                            embedInfo.compileTimeHandle);
1757 }
1758
1759 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1760                                      CORINFO_LOOKUP*         pLookup,
1761                                      unsigned                handleFlags,
1762                                      void*                   compileTimeHandle)
1763 {
1764     if (!pLookup->lookupKind.needsRuntimeLookup)
1765     {
1766         // No runtime lookup is required.
1767         // Access is direct or memory-indirect (of a fixed address) reference
1768
1769         CORINFO_GENERIC_HANDLE handle       = nullptr;
1770         void*                  pIndirection = nullptr;
1771         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1772
1773         if (pLookup->constLookup.accessType == IAT_VALUE)
1774         {
1775             handle = pLookup->constLookup.handle;
1776         }
1777         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1778         {
1779             pIndirection = pLookup->constLookup.addr;
1780         }
1781         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1782     }
1783     else if (compIsForInlining())
1784     {
1785         // Don't import runtime lookups when inlining
1786         // Inlining has to be aborted in such a case
1787         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1788         return nullptr;
1789     }
1790     else
1791     {
1792         // Need to use dictionary-based access which depends on the typeContext
1793         // which is only available at runtime, not at compile-time.
1794
1795         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1796     }
1797 }
1798
1799 #ifdef FEATURE_READYTORUN_COMPILER
1800 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1801                                                unsigned              handleFlags,
1802                                                void*                 compileTimeHandle)
1803 {
1804     CORINFO_GENERIC_HANDLE handle       = nullptr;
1805     void*                  pIndirection = nullptr;
1806     assert(pLookup->accessType != IAT_PPVALUE);
1807
1808     if (pLookup->accessType == IAT_VALUE)
1809     {
1810         handle = pLookup->handle;
1811     }
1812     else if (pLookup->accessType == IAT_PVALUE)
1813     {
1814         pIndirection = pLookup->addr;
1815     }
1816     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1817 }
1818
1819 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1820     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1821     CorInfoHelpFunc         helper,
1822     var_types               type,
1823     GenTreeArgList*         args /* =NULL*/,
1824     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1825 {
1826     CORINFO_CONST_LOOKUP lookup;
1827     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1828     {
1829         return nullptr;
1830     }
1831
1832     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1833
1834     op1->setEntryPoint(lookup);
1835
1836     return op1;
1837 }
1838 #endif
1839
1840 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1841 {
1842     GenTreePtr op1 = nullptr;
1843
1844     switch (pCallInfo->kind)
1845     {
1846         case CORINFO_CALL:
1847             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1848
1849 #ifdef FEATURE_READYTORUN_COMPILER
1850             if (opts.IsReadyToRun())
1851             {
1852                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1853             }
1854             else
1855             {
1856                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1857             }
1858 #endif
1859             break;
1860
1861         case CORINFO_CALL_CODE_POINTER:
1862             if (compIsForInlining())
1863             {
1864                 // Don't import runtime lookups when inlining
1865                 // Inlining has to be aborted in such a case
1866                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1867                 return nullptr;
1868             }
1869
1870             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1871             break;
1872
1873         default:
1874             noway_assert(!"unknown call kind");
1875             break;
1876     }
1877
1878     return op1;
1879 }
1880
1881 //------------------------------------------------------------------------
1882 // getRuntimeContextTree: find pointer to context for runtime lookup.
1883 //
1884 // Arguments:
1885 //    kind - lookup kind.
1886 //
1887 // Return Value:
1888 //    Return GenTree pointer to generic shared context.
1889 //
1890 // Notes:
1891 //    Reports about generic context using.
1892
1893 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1894 {
1895     GenTreePtr ctxTree = nullptr;
1896
1897     // Collectible types requires that for shared generic code, if we use the generic context parameter
1898     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1899     // context parameter is this that we don't need the eager reporting logic.)
1900     lvaGenericsContextUseCount++;
1901
1902     if (kind == CORINFO_LOOKUP_THISOBJ)
1903     {
1904         // this Object
1905         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1906
1907         // Vtable pointer of this object
1908         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1909         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1910         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1911     }
1912     else
1913     {
1914         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1915
1916         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1917     }
1918     return ctxTree;
1919 }
1920
1921 /*****************************************************************************/
1922 /* Import a dictionary lookup to access a handle in code shared between
1923    generic instantiations.
1924    The lookup depends on the typeContext which is only available at
1925    runtime, and not at compile-time.
1926    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1927    The cases are:
1928
1929    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1930       instantiation-specific handle, and the tokens to lookup the handle.
1931    2. pLookup->indirections != CORINFO_USEHELPER :
1932       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1933           to get the handle.
1934       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1935           If it is non-NULL, it is the handle required. Else, call a helper
1936           to lookup the handle.
1937  */
1938
1939 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1940                                             CORINFO_LOOKUP*         pLookup,
1941                                             void*                   compileTimeHandle)
1942 {
1943
1944     // This method can only be called from the importer instance of the Compiler.
1945     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1946     assert(!compIsForInlining());
1947
1948     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1949
1950 #ifdef FEATURE_READYTORUN_COMPILER
1951     if (opts.IsReadyToRun())
1952     {
1953         return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1954                                          gtNewArgList(ctxTree), &pLookup->lookupKind);
1955     }
1956 #endif
1957
1958     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1959     // It's available only via the run-time helper function
1960     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1961     {
1962         GenTreeArgList* helperArgs =
1963             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1964                                                       nullptr, compileTimeHandle));
1965
1966         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1967     }
1968
1969     // Slot pointer
1970     GenTreePtr slotPtrTree = ctxTree;
1971
1972     if (pRuntimeLookup->testForNull)
1973     {
1974         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1975                                    nullptr DEBUGARG("impRuntimeLookup slot"));
1976     }
1977
1978     // Applied repeated indirections
1979     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1980     {
1981         if (i != 0)
1982         {
1983             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1984             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1985             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1986         }
1987         if (pRuntimeLookup->offsets[i] != 0)
1988         {
1989             slotPtrTree =
1990                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1991         }
1992     }
1993
1994     // No null test required
1995     if (!pRuntimeLookup->testForNull)
1996     {
1997         if (pRuntimeLookup->indirections == 0)
1998         {
1999             return slotPtrTree;
2000         }
2001
2002         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2003         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2004
2005         if (!pRuntimeLookup->testForFixup)
2006         {
2007             return slotPtrTree;
2008         }
2009
2010         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2011
2012         GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2013                                       nullptr DEBUGARG("impRuntimeLookup test"));
2014         op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2015
2016         // Use a GT_AND to check for the lowest bit and indirect if it is set
2017         GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2018         GenTreePtr relop    = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2019         relop->gtFlags |= GTF_RELOP_QMARK;
2020
2021         op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2022                            nullptr DEBUGARG("impRuntimeLookup indir"));
2023         op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2024         GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2025         GenTreePtr colon     = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2026
2027         GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2028
2029         unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2030         impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2031         return gtNewLclvNode(tmp, TYP_I_IMPL);
2032     }
2033
2034     assert(pRuntimeLookup->indirections != 0);
2035
2036     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2037
2038     // Extract the handle
2039     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2040     handle->gtFlags |= GTF_IND_NONFAULTING;
2041
2042     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2043                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2044
2045     // Call to helper
2046     GenTreeArgList* helperArgs =
2047         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2048                                                   compileTimeHandle));
2049     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2050
2051     // Check for null and possibly call helper
2052     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2053     relop->gtFlags |= GTF_RELOP_QMARK;
2054
2055     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2056                                                          gtNewNothingNode(), // do nothing if nonnull
2057                                                          helperCall);
2058
2059     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2060
2061     unsigned tmp;
2062     if (handleCopy->IsLocal())
2063     {
2064         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2065     }
2066     else
2067     {
2068         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2069     }
2070
2071     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2072     return gtNewLclvNode(tmp, TYP_I_IMPL);
2073 }
2074
2075 /******************************************************************************
2076  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2077  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2078  *     else, grab a new temp.
2079  *  For structs (which can be pushed on the stack using obj, etc),
2080  *  special handling is needed
2081  */
2082
2083 struct RecursiveGuard
2084 {
2085 public:
2086     RecursiveGuard()
2087     {
2088         m_pAddress = nullptr;
2089     }
2090
2091     ~RecursiveGuard()
2092     {
2093         if (m_pAddress)
2094         {
2095             *m_pAddress = false;
2096         }
2097     }
2098
2099     void Init(bool* pAddress, bool bInitialize)
2100     {
2101         assert(pAddress && *pAddress == false && "Recursive guard violation");
2102         m_pAddress = pAddress;
2103
2104         if (bInitialize)
2105         {
2106             *m_pAddress = true;
2107         }
2108     }
2109
2110 protected:
2111     bool* m_pAddress;
2112 };
2113
2114 bool Compiler::impSpillStackEntry(unsigned level,
2115                                   unsigned tnum
2116 #ifdef DEBUG
2117                                   ,
2118                                   bool        bAssertOnRecursion,
2119                                   const char* reason
2120 #endif
2121                                   )
2122 {
2123
2124 #ifdef DEBUG
2125     RecursiveGuard guard;
2126     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2127 #endif
2128
2129     GenTreePtr tree = verCurrentState.esStack[level].val;
2130
2131     /* Allocate a temp if we haven't been asked to use a particular one */
2132
2133     if (tiVerificationNeeded)
2134     {
2135         // Ignore bad temp requests (they will happen with bad code and will be
2136         // catched when importing the destblock)
2137         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2138         {
2139             return false;
2140         }
2141     }
2142     else
2143     {
2144         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2145         {
2146             return false;
2147         }
2148     }
2149
2150     bool isNewTemp = false;
2151
2152     if (tnum == BAD_VAR_NUM)
2153     {
2154         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2155         isNewTemp = true;
2156     }
2157     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2158     {
2159         // if verification is needed and tnum's type is incompatible with
2160         // type on that stack, we grab a new temp. This is safe since
2161         // we will throw a verification exception in the dest block.
2162
2163         var_types valTyp = tree->TypeGet();
2164         var_types dstTyp = lvaTable[tnum].TypeGet();
2165
2166         // if the two types are different, we return. This will only happen with bad code and will
2167         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2168         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2169             !(
2170 #ifndef _TARGET_64BIT_
2171                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2172 #endif // !_TARGET_64BIT_
2173                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2174         {
2175             if (verNeedsVerification())
2176             {
2177                 return false;
2178             }
2179         }
2180     }
2181
2182     /* Assign the spilled entry to the temp */
2183     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2184
2185     // If temp is newly introduced and a ref type, grab what type info we can.
2186     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2187     {
2188         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2189         lvaSetClass(tnum, tree, stkHnd);
2190     }
2191
2192     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2193     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2194     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2195     verCurrentState.esStack[level].val = temp;
2196
2197     return true;
2198 }
2199
2200 /*****************************************************************************
2201  *
2202  *  Ensure that the stack has only spilled values
2203  */
2204
2205 void Compiler::impSpillStackEnsure(bool spillLeaves)
2206 {
2207     assert(!spillLeaves || opts.compDbgCode);
2208
2209     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2210     {
2211         GenTreePtr tree = verCurrentState.esStack[level].val;
2212
2213         if (!spillLeaves && tree->OperIsLeaf())
2214         {
2215             continue;
2216         }
2217
2218         // Temps introduced by the importer itself don't need to be spilled
2219
2220         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2221
2222         if (isTempLcl)
2223         {
2224             continue;
2225         }
2226
2227         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2228     }
2229 }
2230
2231 void Compiler::impSpillEvalStack()
2232 {
2233     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2234     {
2235         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2236     }
2237 }
2238
2239 /*****************************************************************************
2240  *
2241  *  If the stack contains any trees with side effects in them, assign those
2242  *  trees to temps and append the assignments to the statement list.
2243  *  On return the stack is guaranteed to be empty.
2244  */
2245
2246 inline void Compiler::impEvalSideEffects()
2247 {
2248     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2249     verCurrentState.esStackDepth = 0;
2250 }
2251
2252 /*****************************************************************************
2253  *
2254  *  If the stack contains any trees with side effects in them, assign those
2255  *  trees to temps and replace them on the stack with refs to their temps.
2256  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2257  */
2258
2259 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2260 {
2261     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2262
2263     /* Before we make any appends to the tree list we must spill the
2264      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2265
2266     impSpillSpecialSideEff();
2267
2268     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2269     {
2270         chkLevel = verCurrentState.esStackDepth;
2271     }
2272
2273     assert(chkLevel <= verCurrentState.esStackDepth);
2274
2275     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2276
2277     for (unsigned i = 0; i < chkLevel; i++)
2278     {
2279         GenTreePtr tree = verCurrentState.esStack[i].val;
2280
2281         GenTreePtr lclVarTree;
2282
2283         if ((tree->gtFlags & spillFlags) != 0 ||
2284             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2285              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2286              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2287                                            // lvAddrTaken flag.
2288         {
2289             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2290         }
2291     }
2292 }
2293
2294 /*****************************************************************************
2295  *
2296  *  If the stack contains any trees with special side effects in them, assign
2297  *  those trees to temps and replace them on the stack with refs to their temps.
2298  */
2299
2300 inline void Compiler::impSpillSpecialSideEff()
2301 {
2302     // Only exception objects need to be carefully handled
2303
2304     if (!compCurBB->bbCatchTyp)
2305     {
2306         return;
2307     }
2308
2309     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2310     {
2311         GenTreePtr tree = verCurrentState.esStack[level].val;
2312         // Make sure if we have an exception object in the sub tree we spill ourselves.
2313         if (gtHasCatchArg(tree))
2314         {
2315             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2316         }
2317     }
2318 }
2319
2320 /*****************************************************************************
2321  *
2322  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2323  */
2324
2325 void Compiler::impSpillValueClasses()
2326 {
2327     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2328     {
2329         GenTreePtr tree = verCurrentState.esStack[level].val;
2330
2331         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2332         {
2333             // Tree walk was aborted, which means that we found a
2334             // value class on the stack.  Need to spill that
2335             // stack entry.
2336
2337             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2338         }
2339     }
2340 }
2341
2342 /*****************************************************************************
2343  *
2344  *  Callback that checks if a tree node is TYP_STRUCT
2345  */
2346
2347 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2348 {
2349     fgWalkResult walkResult = WALK_CONTINUE;
2350
2351     if ((*pTree)->gtType == TYP_STRUCT)
2352     {
2353         // Abort the walk and indicate that we found a value class
2354
2355         walkResult = WALK_ABORT;
2356     }
2357
2358     return walkResult;
2359 }
2360
2361 /*****************************************************************************
2362  *
2363  *  If the stack contains any trees with references to local #lclNum, assign
2364  *  those trees to temps and replace their place on the stack with refs to
2365  *  their temps.
2366  */
2367
2368 void Compiler::impSpillLclRefs(ssize_t lclNum)
2369 {
2370     /* Before we make any appends to the tree list we must spill the
2371      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2372
2373     impSpillSpecialSideEff();
2374
2375     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2376     {
2377         GenTreePtr tree = verCurrentState.esStack[level].val;
2378
2379         /* If the tree may throw an exception, and the block has a handler,
2380            then we need to spill assignments to the local if the local is
2381            live on entry to the handler.
2382            Just spill 'em all without considering the liveness */
2383
2384         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2385
2386         /* Skip the tree if it doesn't have an affected reference,
2387            unless xcptnCaught */
2388
2389         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2390         {
2391             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2392         }
2393     }
2394 }
2395
2396 /*****************************************************************************
2397  *
2398  *  Push catch arg onto the stack.
2399  *  If there are jumps to the beginning of the handler, insert basic block
2400  *  and spill catch arg to a temp. Update the handler block if necessary.
2401  *
2402  *  Returns the basic block of the actual handler.
2403  */
2404
2405 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2406 {
2407     // Do not inject the basic block twice on reimport. This should be
2408     // hit only under JIT stress. See if the block is the one we injected.
2409     // Note that EH canonicalization can inject internal blocks here. We might
2410     // be able to re-use such a block (but we don't, right now).
2411     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2412         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2413     {
2414         GenTreePtr tree = hndBlk->bbTreeList;
2415
2416         if (tree != nullptr && tree->gtOper == GT_STMT)
2417         {
2418             tree = tree->gtStmt.gtStmtExpr;
2419             assert(tree != nullptr);
2420
2421             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2422                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2423             {
2424                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2425
2426                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2427
2428                 return hndBlk->bbNext;
2429             }
2430         }
2431
2432         // If we get here, it must have been some other kind of internal block. It's possible that
2433         // someone prepended something to our injected block, but that's unlikely.
2434     }
2435
2436     /* Push the exception address value on the stack */
2437     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2438
2439     /* Mark the node as having a side-effect - i.e. cannot be
2440      * moved around since it is tied to a fixed location (EAX) */
2441     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2442
2443     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2444     if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2445     {
2446         if (hndBlk->bbRefs == 1)
2447         {
2448             hndBlk->bbRefs++;
2449         }
2450
2451         /* Create extra basic block for the spill */
2452         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2453         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2454         newBlk->setBBWeight(hndBlk->bbWeight);
2455         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2456
2457         /* Account for the new link we are about to create */
2458         hndBlk->bbRefs++;
2459
2460         /* Spill into a temp */
2461         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2462         lvaTable[tempNum].lvType = TYP_REF;
2463         arg                      = gtNewTempAssign(tempNum, arg);
2464
2465         hndBlk->bbStkTempsIn = tempNum;
2466
2467         /* Report the debug info. impImportBlockCode won't treat
2468          * the actual handler as exception block and thus won't do it for us. */
2469         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2470         {
2471             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2472             arg            = gtNewStmt(arg, impCurStmtOffs);
2473         }
2474
2475         fgInsertStmtAtEnd(newBlk, arg);
2476
2477         arg = gtNewLclvNode(tempNum, TYP_REF);
2478     }
2479
2480     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2481
2482     return hndBlk;
2483 }
2484
2485 /*****************************************************************************
2486  *
2487  *  Given a tree, clone it. *pClone is set to the cloned tree.
2488  *  Returns the original tree if the cloning was easy,
2489  *   else returns the temp to which the tree had to be spilled to.
2490  *  If the tree has side-effects, it will be spilled to a temp.
2491  */
2492
2493 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2494                                   GenTreePtr*          pClone,
2495                                   CORINFO_CLASS_HANDLE structHnd,
2496                                   unsigned             curLevel,
2497                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2498 {
2499     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2500     {
2501         GenTreePtr clone = gtClone(tree, true);
2502
2503         if (clone)
2504         {
2505             *pClone = clone;
2506             return tree;
2507         }
2508     }
2509
2510     /* Store the operand in a temp and return the temp */
2511
2512     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2513
2514     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2515     // return a struct type. It also may modify the struct type to a more
2516     // specialized type (e.g. a SIMD type).  So we will get the type from
2517     // the lclVar AFTER calling impAssignTempGen().
2518
2519     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2520     var_types type = genActualType(lvaTable[temp].TypeGet());
2521
2522     *pClone = gtNewLclvNode(temp, type);
2523     return gtNewLclvNode(temp, type);
2524 }
2525
2526 /*****************************************************************************
2527  * Remember the IL offset (including stack-empty info) for the trees we will
2528  * generate now.
2529  */
2530
2531 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2532 {
2533     if (compIsForInlining())
2534     {
2535         GenTreePtr callStmt = impInlineInfo->iciStmt;
2536         assert(callStmt->gtOper == GT_STMT);
2537         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2538     }
2539     else
2540     {
2541         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2542         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2543         impCurStmtOffs    = offs | stkBit;
2544     }
2545 }
2546
2547 /*****************************************************************************
2548  * Returns current IL offset with stack-empty and call-instruction info incorporated
2549  */
2550 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2551 {
2552     if (compIsForInlining())
2553     {
2554         return BAD_IL_OFFSET;
2555     }
2556     else
2557     {
2558         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2559         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2560         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2561         return offs | stkBit | callInstructionBit;
2562     }
2563 }
2564
2565 //------------------------------------------------------------------------
2566 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2567 //
2568 // Arguments:
2569 //    prevOpcode - last importer opcode
2570 //
2571 // Return Value:
2572 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2573 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2574 {
2575     // Don't spill after ldtoken, because it could be a part of the InitializeArray sequence.
2576     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2577     return prevOpcode != CEE_LDTOKEN;
2578 }
2579
2580 /*****************************************************************************
2581  *
2582  *  Remember the instr offset for the statements
2583  *
2584  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2585  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2586  *  as some of the trees corresponding to code up to impCurOpcOffs might
2587  *  still be sitting on the stack.
2588  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2589  *  This should be called when an opcode finally/explicitly causes
2590  *  impAppendTree(tree) to be called (as opposed to being called because of
2591  *  a spill caused by the opcode)
2592  */
2593
2594 #ifdef DEBUG
2595
2596 void Compiler::impNoteLastILoffs()
2597 {
2598     if (impLastILoffsStmt == nullptr)
2599     {
2600         // We should have added a statement for the current basic block
2601         // Is this assert correct ?
2602
2603         assert(impTreeLast);
2604         assert(impTreeLast->gtOper == GT_STMT);
2605
2606         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2607     }
2608     else
2609     {
2610         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2611         impLastILoffsStmt                          = nullptr;
2612     }
2613 }
2614
2615 #endif // DEBUG
2616
2617 /*****************************************************************************
2618  * We don't create any GenTree (excluding spills) for a branch.
2619  * For debugging info, we need a placeholder so that we can note
2620  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2621  */
2622
2623 void Compiler::impNoteBranchOffs()
2624 {
2625     if (opts.compDbgCode)
2626     {
2627         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2628     }
2629 }
2630
2631 /*****************************************************************************
2632  * Locate the next stmt boundary for which we need to record info.
2633  * We will have to spill the stack at such boundaries if it is not
2634  * already empty.
2635  * Returns the next stmt boundary (after the start of the block)
2636  */
2637
2638 unsigned Compiler::impInitBlockLineInfo()
2639 {
2640     /* Assume the block does not correspond with any IL offset. This prevents
2641        us from reporting extra offsets. Extra mappings can cause confusing
2642        stepping, especially if the extra mapping is a jump-target, and the
2643        debugger does not ignore extra mappings, but instead rewinds to the
2644        nearest known offset */
2645
2646     impCurStmtOffsSet(BAD_IL_OFFSET);
2647
2648     if (compIsForInlining())
2649     {
2650         return ~0;
2651     }
2652
2653     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2654
2655     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2656     {
2657         impCurStmtOffsSet(blockOffs);
2658     }
2659
2660     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2661     {
2662         impCurStmtOffsSet(blockOffs);
2663     }
2664
2665     /* Always report IL offset 0 or some tests get confused.
2666        Probably a good idea anyways */
2667
2668     if (blockOffs == 0)
2669     {
2670         impCurStmtOffsSet(blockOffs);
2671     }
2672
2673     if (!info.compStmtOffsetsCount)
2674     {
2675         return ~0;
2676     }
2677
2678     /* Find the lowest explicit stmt boundary within the block */
2679
2680     /* Start looking at an entry that is based on our instr offset */
2681
2682     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2683
2684     if (index >= info.compStmtOffsetsCount)
2685     {
2686         index = info.compStmtOffsetsCount - 1;
2687     }
2688
2689     /* If we've guessed too far, back up */
2690
2691     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2692     {
2693         index--;
2694     }
2695
2696     /* If we guessed short, advance ahead */
2697
2698     while (info.compStmtOffsets[index] < blockOffs)
2699     {
2700         index++;
2701
2702         if (index == info.compStmtOffsetsCount)
2703         {
2704             return info.compStmtOffsetsCount;
2705         }
2706     }
2707
2708     assert(index < info.compStmtOffsetsCount);
2709
2710     if (info.compStmtOffsets[index] == blockOffs)
2711     {
2712         /* There is an explicit boundary for the start of this basic block.
2713            So we will start with bbCodeOffs. Else we will wait until we
2714            get to the next explicit boundary */
2715
2716         impCurStmtOffsSet(blockOffs);
2717
2718         index++;
2719     }
2720
2721     return index;
2722 }
2723
2724 /*****************************************************************************/
2725
2726 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2727 {
2728     switch (opcode)
2729     {
2730         case CEE_CALL:
2731         case CEE_CALLI:
2732         case CEE_CALLVIRT:
2733             return true;
2734
2735         default:
2736             return false;
2737     }
2738 }
2739
2740 /*****************************************************************************/
2741
2742 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2743 {
2744     switch (opcode)
2745     {
2746         case CEE_CALL:
2747         case CEE_CALLI:
2748         case CEE_CALLVIRT:
2749         case CEE_JMP:
2750         case CEE_NEWOBJ:
2751         case CEE_NEWARR:
2752             return true;
2753
2754         default:
2755             return false;
2756     }
2757 }
2758
2759 /*****************************************************************************/
2760
2761 // One might think it is worth caching these values, but results indicate
2762 // that it isn't.
2763 // In addition, caching them causes SuperPMI to be unable to completely
2764 // encapsulate an individual method context.
2765 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2766 {
2767     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2768     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2769     return refAnyClass;
2770 }
2771
2772 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2773 {
2774     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2775     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2776     return typeHandleClass;
2777 }
2778
2779 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2780 {
2781     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2782     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2783     return argIteratorClass;
2784 }
2785
2786 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2787 {
2788     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2789     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2790     return stringClass;
2791 }
2792
2793 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2794 {
2795     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2796     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2797     return objectClass;
2798 }
2799
2800 /*****************************************************************************
2801  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2802  *  set its type to TYP_BYREF when we create it. We know if it can be
2803  *  changed to TYP_I_IMPL only at the point where we use it
2804  */
2805
2806 /* static */
2807 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2808 {
2809     if (tree1->IsVarAddr())
2810     {
2811         tree1->gtType = TYP_I_IMPL;
2812     }
2813
2814     if (tree2 && tree2->IsVarAddr())
2815     {
2816         tree2->gtType = TYP_I_IMPL;
2817     }
2818 }
2819
2820 /*****************************************************************************
2821  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2822  *  to make that an explicit cast in our trees, so any implicit casts that
2823  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2824  *  turned into explicit casts here.
2825  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2826  */
2827
2828 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2829 {
2830     var_types currType   = genActualType(tree->gtType);
2831     var_types wantedType = genActualType(dstTyp);
2832
2833     if (wantedType != currType)
2834     {
2835         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2836         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2837         {
2838             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2839             {
2840                 tree->gtType = TYP_I_IMPL;
2841             }
2842         }
2843 #ifdef _TARGET_64BIT_
2844         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2845         {
2846             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2847             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2848         }
2849         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2850         {
2851             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2852             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2853         }
2854 #endif // _TARGET_64BIT_
2855     }
2856
2857     return tree;
2858 }
2859
2860 /*****************************************************************************
2861  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2862  *  but we want to make that an explicit cast in our trees, so any implicit casts
2863  *  that exist in the IL are turned into explicit casts here.
2864  */
2865
2866 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2867 {
2868 #ifndef LEGACY_BACKEND
2869     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2870     {
2871         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2872     }
2873 #endif // !LEGACY_BACKEND
2874
2875     return tree;
2876 }
2877
2878 //------------------------------------------------------------------------
2879 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2880 //    with a GT_COPYBLK node.
2881 //
2882 // Arguments:
2883 //    sig - The InitializeArray signature.
2884 //
2885 // Return Value:
2886 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2887 //    nullptr otherwise.
2888 //
2889 // Notes:
2890 //    The function recognizes the following IL pattern:
2891 //      ldc <length> or a list of ldc <lower bound>/<length>
2892 //      newarr or newobj
2893 //      dup
2894 //      ldtoken <field handle>
2895 //      call InitializeArray
2896 //    The lower bounds need not be constant except when the array rank is 1.
2897 //    The function recognizes all kinds of arrays thus enabling a small runtime
2898 //    such as CoreRT to skip providing an implementation for InitializeArray.
2899
2900 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2901 {
2902     assert(sig->numArgs == 2);
2903
2904     GenTreePtr fieldTokenNode = impStackTop(0).val;
2905     GenTreePtr arrayLocalNode = impStackTop(1).val;
2906
2907     //
2908     // Verify that the field token is known and valid.  Note that It's also
2909     // possible for the token to come from reflection, in which case we cannot do
2910     // the optimization and must therefore revert to calling the helper.  You can
2911     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2912     //
2913
2914     // Check to see if the ldtoken helper call is what we see here.
2915     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2916         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2917     {
2918         return nullptr;
2919     }
2920
2921     // Strip helper call away
2922     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2923
2924     if (fieldTokenNode->gtOper == GT_IND)
2925     {
2926         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2927     }
2928
2929     // Check for constant
2930     if (fieldTokenNode->gtOper != GT_CNS_INT)
2931     {
2932         return nullptr;
2933     }
2934
2935     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2936     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2937     {
2938         return nullptr;
2939     }
2940
2941     //
2942     // We need to get the number of elements in the array and the size of each element.
2943     // We verify that the newarr statement is exactly what we expect it to be.
2944     // If it's not then we just return NULL and we don't optimize this call
2945     //
2946
2947     //
2948     // It is possible the we don't have any statements in the block yet
2949     //
2950     if (impTreeLast->gtOper != GT_STMT)
2951     {
2952         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2953         return nullptr;
2954     }
2955
2956     //
2957     // We start by looking at the last statement, making sure it's an assignment, and
2958     // that the target of the assignment is the array passed to InitializeArray.
2959     //
2960     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2961     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2962         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2963         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2964     {
2965         return nullptr;
2966     }
2967
2968     //
2969     // Make sure that the object being assigned is a helper call.
2970     //
2971
2972     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2973     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2974     {
2975         return nullptr;
2976     }
2977
2978     //
2979     // Verify that it is one of the new array helpers.
2980     //
2981
2982     bool isMDArray = false;
2983
2984     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2985         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2986         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2987         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2988 #ifdef FEATURE_READYTORUN_COMPILER
2989         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2990 #endif
2991             )
2992     {
2993         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
2994         {
2995             return nullptr;
2996         }
2997
2998         isMDArray = true;
2999     }
3000
3001     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3002
3003     //
3004     // Make sure we found a compile time handle to the array
3005     //
3006
3007     if (!arrayClsHnd)
3008     {
3009         return nullptr;
3010     }
3011
3012     unsigned rank = 0;
3013     S_UINT32 numElements;
3014
3015     if (isMDArray)
3016     {
3017         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3018
3019         if (rank == 0)
3020         {
3021             return nullptr;
3022         }
3023
3024         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3025         assert(tokenArg != nullptr);
3026         GenTreeArgList* numArgsArg = tokenArg->Rest();
3027         assert(numArgsArg != nullptr);
3028         GenTreeArgList* argsArg = numArgsArg->Rest();
3029         assert(argsArg != nullptr);
3030
3031         //
3032         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3033         // so at least one length must be present and the rank can't exceed 32 so there can
3034         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3035         //
3036
3037         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3038             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3039         {
3040             return nullptr;
3041         }
3042
3043         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3044         bool     lowerBoundsSpecified;
3045
3046         if (numArgs == rank * 2)
3047         {
3048             lowerBoundsSpecified = true;
3049         }
3050         else if (numArgs == rank)
3051         {
3052             lowerBoundsSpecified = false;
3053
3054             //
3055             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3056             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3057             // we get a SDArray as well, see the for loop below.
3058             //
3059
3060             if (rank == 1)
3061             {
3062                 isMDArray = false;
3063             }
3064         }
3065         else
3066         {
3067             return nullptr;
3068         }
3069
3070         //
3071         // The rank is known to be at least 1 so we can start with numElements being 1
3072         // to avoid the need to special case the first dimension.
3073         //
3074
3075         numElements = S_UINT32(1);
3076
3077         struct Match
3078         {
3079             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3080             {
3081                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3082                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3083             }
3084
3085             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3086             {
3087                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3088                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3089                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3090             }
3091
3092             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3093             {
3094                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3095                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3096             }
3097
3098             static bool IsComma(GenTree* tree)
3099             {
3100                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3101             }
3102         };
3103
3104         unsigned argIndex = 0;
3105         GenTree* comma;
3106
3107         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3108         {
3109             if (lowerBoundsSpecified)
3110             {
3111                 //
3112                 // In general lower bounds can be ignored because they're not needed to
3113                 // calculate the total number of elements. But for single dimensional arrays
3114                 // we need to know if the lower bound is 0 because in this case the runtime
3115                 // creates a SDArray and this affects the way the array data offset is calculated.
3116                 //
3117
3118                 if (rank == 1)
3119                 {
3120                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3121                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3122                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3123
3124                     if (lowerBoundNode->IsIntegralConst(0))
3125                     {
3126                         isMDArray = false;
3127                     }
3128                 }
3129
3130                 comma = comma->gtGetOp2();
3131                 argIndex++;
3132             }
3133
3134             GenTree* lengthNodeAssign = comma->gtGetOp1();
3135             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3136             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3137
3138             if (!lengthNode->IsCnsIntOrI())
3139             {
3140                 return nullptr;
3141             }
3142
3143             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3144             argIndex++;
3145         }
3146
3147         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3148
3149         if (argIndex != numArgs)
3150         {
3151             return nullptr;
3152         }
3153     }
3154     else
3155     {
3156         //
3157         // Make sure there are exactly two arguments:  the array class and
3158         // the number of elements.
3159         //
3160
3161         GenTreePtr arrayLengthNode;
3162
3163         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3164 #ifdef FEATURE_READYTORUN_COMPILER
3165         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3166         {
3167             // Array length is 1st argument for readytorun helper
3168             arrayLengthNode = args->Current();
3169         }
3170         else
3171 #endif
3172         {
3173             // Array length is 2nd argument for regular helper
3174             arrayLengthNode = args->Rest()->Current();
3175         }
3176
3177         //
3178         // Make sure that the number of elements look valid.
3179         //
3180         if (arrayLengthNode->gtOper != GT_CNS_INT)
3181         {
3182             return nullptr;
3183         }
3184
3185         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3186
3187         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3188         {
3189             return nullptr;
3190         }
3191     }
3192
3193     CORINFO_CLASS_HANDLE elemClsHnd;
3194     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3195
3196     //
3197     // Note that genTypeSize will return zero for non primitive types, which is exactly
3198     // what we want (size will then be 0, and we will catch this in the conditional below).
3199     // Note that we don't expect this to fail for valid binaries, so we assert in the
3200     // non-verification case (the verification case should not assert but rather correctly
3201     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3202     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3203     // why.
3204     //
3205
3206     S_UINT32 elemSize(genTypeSize(elementType));
3207     S_UINT32 size = elemSize * S_UINT32(numElements);
3208
3209     if (size.IsOverflow())
3210     {
3211         return nullptr;
3212     }
3213
3214     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3215     {
3216         assert(verNeedsVerification());
3217         return nullptr;
3218     }
3219
3220     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3221     if (!initData)
3222     {
3223         return nullptr;
3224     }
3225
3226     //
3227     // At this point we are ready to commit to implementing the InitializeArray
3228     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3229     // return the struct assignment node.
3230     //
3231
3232     impPopStack();
3233     impPopStack();
3234
3235     const unsigned blkSize = size.Value();
3236     GenTreePtr     dst;
3237
3238     if (isMDArray)
3239     {
3240         unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3241
3242         dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3243     }
3244     else
3245     {
3246         dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3247     }
3248     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3249     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3250     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3251
3252     return gtNewBlkOpNode(blk,     // dst
3253                           src,     // src
3254                           blkSize, // size
3255                           false,   // volatil
3256                           true);   // copyBlock
3257 }
3258
3259 /*****************************************************************************/
3260 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3261 // Returns NULL if an intrinsic cannot be used
3262
3263 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3264                                   CORINFO_CLASS_HANDLE  clsHnd,
3265                                   CORINFO_METHOD_HANDLE method,
3266                                   CORINFO_SIG_INFO*     sig,
3267                                   int                   memberRef,
3268                                   bool                  readonlyCall,
3269                                   bool                  tailCall,
3270                                   CorInfoIntrinsics*    pIntrinsicID)
3271 {
3272     bool              mustExpand  = false;
3273     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3274     *pIntrinsicID                 = intrinsicID;
3275
3276 #ifndef _TARGET_ARM_
3277     genTreeOps interlockedOperator;
3278 #endif
3279
3280     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3281     {
3282         // must be done regardless of DbgCode and MinOpts
3283         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3284     }
3285 #ifdef _TARGET_64BIT_
3286     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3287     {
3288         // must be done regardless of DbgCode and MinOpts
3289         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3290     }
3291 #else
3292     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3293 #endif
3294
3295     GenTreePtr retNode = nullptr;
3296
3297     //
3298     // We disable the inlining of instrinsics for MinOpts.
3299     //
3300     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3301     {
3302         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3303         return retNode;
3304     }
3305
3306     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3307     // seem to work properly for Infinity values, we don't do
3308     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3309
3310     var_types callType = JITtype2varType(sig->retType);
3311
3312     /* First do the intrinsics which are always smaller than a call */
3313
3314     switch (intrinsicID)
3315     {
3316         GenTreePtr op1, op2;
3317
3318         case CORINFO_INTRINSIC_Sin:
3319         case CORINFO_INTRINSIC_Sqrt:
3320         case CORINFO_INTRINSIC_Abs:
3321         case CORINFO_INTRINSIC_Cos:
3322         case CORINFO_INTRINSIC_Round:
3323         case CORINFO_INTRINSIC_Cosh:
3324         case CORINFO_INTRINSIC_Sinh:
3325         case CORINFO_INTRINSIC_Tan:
3326         case CORINFO_INTRINSIC_Tanh:
3327         case CORINFO_INTRINSIC_Asin:
3328         case CORINFO_INTRINSIC_Acos:
3329         case CORINFO_INTRINSIC_Atan:
3330         case CORINFO_INTRINSIC_Atan2:
3331         case CORINFO_INTRINSIC_Log10:
3332         case CORINFO_INTRINSIC_Pow:
3333         case CORINFO_INTRINSIC_Exp:
3334         case CORINFO_INTRINSIC_Ceiling:
3335         case CORINFO_INTRINSIC_Floor:
3336
3337             // These are math intrinsics
3338
3339             assert(callType != TYP_STRUCT);
3340
3341             op1 = nullptr;
3342
3343 #if defined(LEGACY_BACKEND)
3344             if (IsTargetIntrinsic(intrinsicID))
3345 #elif !defined(_TARGET_X86_)
3346             // Intrinsics that are not implemented directly by target instructions will
3347             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3348             // don't do this optimization, because
3349             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3350             //  b) It will be non-trivial task or too late to re-materialize a surviving
3351             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3352             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3353 #else
3354             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3355             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3356             // code generation for certain EH constructs.
3357             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3358 #endif
3359             {
3360                 switch (sig->numArgs)
3361                 {
3362                     case 1:
3363                         op1 = impPopStack().val;
3364
3365 #if FEATURE_X87_DOUBLES
3366
3367                         // X87 stack doesn't differentiate between float/double
3368                         // so it doesn't need a cast, but everybody else does
3369                         // Just double check it is at least a FP type
3370                         noway_assert(varTypeIsFloating(op1));
3371
3372 #else // FEATURE_X87_DOUBLES
3373
3374                         if (op1->TypeGet() != callType)
3375                         {
3376                             op1 = gtNewCastNode(callType, op1, callType);
3377                         }
3378
3379 #endif // FEATURE_X87_DOUBLES
3380
3381                         op1 = new (this, GT_INTRINSIC)
3382                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3383                         break;
3384
3385                     case 2:
3386                         op2 = impPopStack().val;
3387                         op1 = impPopStack().val;
3388
3389 #if FEATURE_X87_DOUBLES
3390
3391                         // X87 stack doesn't differentiate between float/double
3392                         // so it doesn't need a cast, but everybody else does
3393                         // Just double check it is at least a FP type
3394                         noway_assert(varTypeIsFloating(op2));
3395                         noway_assert(varTypeIsFloating(op1));
3396
3397 #else // FEATURE_X87_DOUBLES
3398
3399                         if (op2->TypeGet() != callType)
3400                         {
3401                             op2 = gtNewCastNode(callType, op2, callType);
3402                         }
3403                         if (op1->TypeGet() != callType)
3404                         {
3405                             op1 = gtNewCastNode(callType, op1, callType);
3406                         }
3407
3408 #endif // FEATURE_X87_DOUBLES
3409
3410                         op1 = new (this, GT_INTRINSIC)
3411                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3412                         break;
3413
3414                     default:
3415                         NO_WAY("Unsupported number of args for Math Instrinsic");
3416                 }
3417
3418 #ifndef LEGACY_BACKEND
3419                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3420                 {
3421                     op1->gtFlags |= GTF_CALL;
3422                 }
3423 #endif
3424             }
3425
3426             retNode = op1;
3427             break;
3428
3429 #ifdef _TARGET_XARCH_
3430         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3431         case CORINFO_INTRINSIC_InterlockedAdd32:
3432             interlockedOperator = GT_LOCKADD;
3433             goto InterlockedBinOpCommon;
3434         case CORINFO_INTRINSIC_InterlockedXAdd32:
3435             interlockedOperator = GT_XADD;
3436             goto InterlockedBinOpCommon;
3437         case CORINFO_INTRINSIC_InterlockedXchg32:
3438             interlockedOperator = GT_XCHG;
3439             goto InterlockedBinOpCommon;
3440
3441 #ifdef _TARGET_AMD64_
3442         case CORINFO_INTRINSIC_InterlockedAdd64:
3443             interlockedOperator = GT_LOCKADD;
3444             goto InterlockedBinOpCommon;
3445         case CORINFO_INTRINSIC_InterlockedXAdd64:
3446             interlockedOperator = GT_XADD;
3447             goto InterlockedBinOpCommon;
3448         case CORINFO_INTRINSIC_InterlockedXchg64:
3449             interlockedOperator = GT_XCHG;
3450             goto InterlockedBinOpCommon;
3451 #endif // _TARGET_AMD64_
3452
3453         InterlockedBinOpCommon:
3454             assert(callType != TYP_STRUCT);
3455             assert(sig->numArgs == 2);
3456
3457             op2 = impPopStack().val;
3458             op1 = impPopStack().val;
3459
3460             // This creates:
3461             //   val
3462             // XAdd
3463             //   addr
3464             //     field (for example)
3465             //
3466             // In the case where the first argument is the address of a local, we might
3467             // want to make this *not* make the var address-taken -- but atomic instructions
3468             // on a local are probably pretty useless anyway, so we probably don't care.
3469
3470             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3471             op1->gtFlags |= GTF_GLOB_EFFECT;
3472             retNode = op1;
3473             break;
3474 #endif // _TARGET_XARCH_
3475
3476         case CORINFO_INTRINSIC_MemoryBarrier:
3477
3478             assert(sig->numArgs == 0);
3479
3480             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3481             op1->gtFlags |= GTF_GLOB_EFFECT;
3482             retNode = op1;
3483             break;
3484
3485 #ifdef _TARGET_XARCH_
3486         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3487         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3488 #ifdef _TARGET_AMD64_
3489         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3490 #endif
3491         {
3492             assert(callType != TYP_STRUCT);
3493             assert(sig->numArgs == 3);
3494             GenTreePtr op3;
3495
3496             op3 = impPopStack().val; // comparand
3497             op2 = impPopStack().val; // value
3498             op1 = impPopStack().val; // location
3499
3500             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3501
3502             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3503             retNode = node;
3504             break;
3505         }
3506 #endif
3507
3508         case CORINFO_INTRINSIC_StringLength:
3509             op1 = impPopStack().val;
3510             if (!opts.MinOpts() && !opts.compDbgCode)
3511             {
3512                 GenTreeArrLen* arrLen =
3513                     new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3514                 op1 = arrLen;
3515             }
3516             else
3517             {
3518                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3519                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3520                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3521                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3522             }
3523             retNode = op1;
3524             break;
3525
3526         case CORINFO_INTRINSIC_StringGetChar:
3527             op2 = impPopStack().val;
3528             op1 = impPopStack().val;
3529             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3530             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3531             retNode = op1;
3532             break;
3533
3534         case CORINFO_INTRINSIC_InitializeArray:
3535             retNode = impInitializeArrayIntrinsic(sig);
3536             break;
3537
3538         case CORINFO_INTRINSIC_Array_Address:
3539         case CORINFO_INTRINSIC_Array_Get:
3540         case CORINFO_INTRINSIC_Array_Set:
3541             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3542             break;
3543
3544         case CORINFO_INTRINSIC_GetTypeFromHandle:
3545             op1 = impStackTop(0).val;
3546             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3547                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3548             {
3549                 op1 = impPopStack().val;
3550                 // Change call to return RuntimeType directly.
3551                 op1->gtType = TYP_REF;
3552                 retNode     = op1;
3553             }
3554             // Call the regular function.
3555             break;
3556
3557         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3558             op1 = impStackTop(0).val;
3559             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3560                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3561             {
3562                 // Old tree
3563                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3564                 //
3565                 // New tree
3566                 // TreeToGetNativeTypeHandle
3567
3568                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3569                 // to that helper.
3570
3571                 op1 = impPopStack().val;
3572
3573                 // Get native TypeHandle argument to old helper
3574                 op1 = op1->gtCall.gtCallArgs;
3575                 assert(op1->OperIsList());
3576                 assert(op1->gtOp.gtOp2 == nullptr);
3577                 op1     = op1->gtOp.gtOp1;
3578                 retNode = op1;
3579             }
3580             // Call the regular function.
3581             break;
3582
3583 #ifndef LEGACY_BACKEND
3584         case CORINFO_INTRINSIC_Object_GetType:
3585
3586             op1 = impPopStack().val;
3587             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3588
3589             // Set the CALL flag to indicate that the operator is implemented by a call.
3590             // Set also the EXCEPTION flag because the native implementation of
3591             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3592             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3593             retNode = op1;
3594             break;
3595 #endif
3596         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3597         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3598         // substitution.  The parameter byref will be assigned into the newly allocated object.
3599         case CORINFO_INTRINSIC_ByReference_Ctor:
3600         {
3601             // Remove call to constructor and directly assign the byref passed
3602             // to the call to the first slot of the ByReference struct.
3603             op1                                    = impPopStack().val;
3604             GenTreePtr           thisptr           = newobjThis;
3605             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3606             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3607             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3608             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3609             assert(byReferenceStruct != nullptr);
3610             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3611             retNode = assign;
3612             break;
3613         }
3614         // Implement ptr value getter for ByReference struct.
3615         case CORINFO_INTRINSIC_ByReference_Value:
3616         {
3617             op1                         = impPopStack().val;
3618             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3619             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3620             retNode                     = field;
3621             break;
3622         }
3623         case CORINFO_INTRINSIC_Span_GetItem:
3624         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3625         {
3626             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3627             //
3628             // For Span<T>
3629             //   Comma
3630             //     BoundsCheck(index, s->_length)
3631             //     s->_pointer + index * sizeof(T)
3632             //
3633             // For ReadOnlySpan<T>
3634             //   Comma
3635             //     BoundsCheck(index, s->_length)
3636             //     *(s->_pointer + index * sizeof(T))
3637             //
3638             // Signature should show one class type parameter, which
3639             // we need to examine.
3640             assert(sig->sigInst.classInstCount == 1);
3641             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3642             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3643             assert(elemSize > 0);
3644
3645             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3646
3647             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3648                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3649
3650             GenTreePtr index          = impPopStack().val;
3651             GenTreePtr ptrToSpan      = impPopStack().val;
3652             GenTreePtr indexClone     = nullptr;
3653             GenTreePtr ptrToSpanClone = nullptr;
3654
3655 #if defined(DEBUG)
3656             if (verbose)
3657             {
3658                 printf("with ptr-to-span\n");
3659                 gtDispTree(ptrToSpan);
3660                 printf("and index\n");
3661                 gtDispTree(index);
3662             }
3663 #endif // defined(DEBUG)
3664
3665             // We need to use both index and ptr-to-span twice, so clone or spill.
3666             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3667                                  nullptr DEBUGARG("Span.get_Item index"));
3668             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3669                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3670
3671             // Bounds check
3672             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3673             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3674             GenTreePtr           length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3675             GenTreePtr           boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3676                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3677
3678             // Element access
3679             GenTreePtr           indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3680             GenTreePtr           sizeofNode  = gtNewIconNode(elemSize);
3681             GenTreePtr           mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3682             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3683             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3684             GenTreePtr           data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3685             GenTreePtr           result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3686
3687             // Prepare result
3688             var_types resultType = JITtype2varType(sig->retType);
3689
3690             if (isReadOnly)
3691             {
3692                 result = gtNewOperNode(GT_IND, resultType, result);
3693             }
3694             else
3695             {
3696                 assert(resultType == result->TypeGet());
3697             }
3698
3699             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3700
3701             break;
3702         }
3703
3704         default:
3705             /* Unknown intrinsic */
3706             break;
3707     }
3708
3709     if (mustExpand)
3710     {
3711         if (retNode == nullptr)
3712         {
3713             NO_WAY("JIT must expand the intrinsic!");
3714         }
3715     }
3716
3717     return retNode;
3718 }
3719
3720 /*****************************************************************************/
3721
3722 GenTreePtr Compiler::impArrayAccessIntrinsic(
3723     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3724 {
3725     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3726        the following, as it generates fatter code.
3727     */
3728
3729     if (compCodeOpt() == SMALL_CODE)
3730     {
3731         return nullptr;
3732     }
3733
3734     /* These intrinsics generate fatter (but faster) code and are only
3735        done if we don't need SMALL_CODE */
3736
3737     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3738
3739     // The rank 1 case is special because it has to handle two array formats
3740     // we will simply not do that case
3741     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3742     {
3743         return nullptr;
3744     }
3745
3746     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3747     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3748
3749     // For the ref case, we will only be able to inline if the types match
3750     // (verifier checks for this, we don't care for the nonverified case and the
3751     // type is final (so we don't need to do the cast)
3752     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3753     {
3754         // Get the call site signature
3755         CORINFO_SIG_INFO LocalSig;
3756         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3757         assert(LocalSig.hasThis());
3758
3759         CORINFO_CLASS_HANDLE actualElemClsHnd;
3760
3761         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3762         {
3763             // Fetch the last argument, the one that indicates the type we are setting.
3764             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3765             for (unsigned r = 0; r < rank; r++)
3766             {
3767                 argType = info.compCompHnd->getArgNext(argType);
3768             }
3769
3770             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3771             actualElemClsHnd = argInfo.GetClassHandle();
3772         }
3773         else
3774         {
3775             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3776
3777             // Fetch the return type
3778             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3779             assert(retInfo.IsByRef());
3780             actualElemClsHnd = retInfo.GetClassHandle();
3781         }
3782
3783         // if it's not final, we can't do the optimization
3784         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3785         {
3786             return nullptr;
3787         }
3788     }
3789
3790     unsigned arrayElemSize;
3791     if (elemType == TYP_STRUCT)
3792     {
3793         assert(arrElemClsHnd);
3794
3795         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3796     }
3797     else
3798     {
3799         arrayElemSize = genTypeSize(elemType);
3800     }
3801
3802     if ((unsigned char)arrayElemSize != arrayElemSize)
3803     {
3804         // arrayElemSize would be truncated as an unsigned char.
3805         // This means the array element is too large. Don't do the optimization.
3806         return nullptr;
3807     }
3808
3809     GenTreePtr val = nullptr;
3810
3811     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3812     {
3813         // Assignment of a struct is more work, and there are more gets than sets.
3814         if (elemType == TYP_STRUCT)
3815         {
3816             return nullptr;
3817         }
3818
3819         val = impPopStack().val;
3820         assert(genActualType(elemType) == genActualType(val->gtType) ||
3821                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3822                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3823                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3824     }
3825
3826     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3827
3828     GenTreePtr inds[GT_ARR_MAX_RANK];
3829     for (unsigned k = rank; k > 0; k--)
3830     {
3831         inds[k - 1] = impPopStack().val;
3832     }
3833
3834     GenTreePtr arr = impPopStack().val;
3835     assert(arr->gtType == TYP_REF);
3836
3837     GenTreePtr arrElem =
3838         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3839                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3840
3841     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3842     {
3843         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3844     }
3845
3846     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3847     {
3848         assert(val != nullptr);
3849         return gtNewAssignNode(arrElem, val);
3850     }
3851     else
3852     {
3853         return arrElem;
3854     }
3855 }
3856
3857 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3858 {
3859     unsigned i;
3860
3861     // do some basic checks first
3862     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3863     {
3864         return FALSE;
3865     }
3866
3867     if (verCurrentState.esStackDepth > 0)
3868     {
3869         // merge stack types
3870         StackEntry* parentStack = block->bbStackOnEntry();
3871         StackEntry* childStack  = verCurrentState.esStack;
3872
3873         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3874         {
3875             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3876             {
3877                 return FALSE;
3878             }
3879         }
3880     }
3881
3882     // merge initialization status of this ptr
3883
3884     if (verTrackObjCtorInitState)
3885     {
3886         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3887         assert(verCurrentState.thisInitialized != TIS_Bottom);
3888
3889         // If the successor block's thisInit state is unknown, copy it from the current state.
3890         if (block->bbThisOnEntry() == TIS_Bottom)
3891         {
3892             *changed = true;
3893             verSetThisInit(block, verCurrentState.thisInitialized);
3894         }
3895         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3896         {
3897             if (block->bbThisOnEntry() != TIS_Top)
3898             {
3899                 *changed = true;
3900                 verSetThisInit(block, TIS_Top);
3901
3902                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3903                 {
3904                     // The block is bad. Control can flow through the block to any handler that catches the
3905                     // verification exception, but the importer ignores bad blocks and therefore won't model
3906                     // this flow in the normal way. To complete the merge into the bad block, the new state
3907                     // needs to be manually pushed to the handlers that may be reached after the verification
3908                     // exception occurs.
3909                     //
3910                     // Usually, the new state was already propagated to the relevant handlers while processing
3911                     // the predecessors of the bad block. The exception is when the bad block is at the start
3912                     // of a try region, meaning it is protected by additional handlers that do not protect its
3913                     // predecessors.
3914                     //
3915                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3916                     {
3917                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3918                         // recursive calls back into this code path (if successors of the current bad block are
3919                         // also bad blocks).
3920                         //
3921                         ThisInitState origTIS           = verCurrentState.thisInitialized;
3922                         verCurrentState.thisInitialized = TIS_Top;
3923                         impVerifyEHBlock(block, true);
3924                         verCurrentState.thisInitialized = origTIS;
3925                     }
3926                 }
3927             }
3928         }
3929     }
3930     else
3931     {
3932         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3933     }
3934
3935     return TRUE;
3936 }
3937
3938 /*****************************************************************************
3939  * 'logMsg' is true if a log message needs to be logged. false if the caller has
3940  *   already logged it (presumably in a more detailed fashion than done here)
3941  * 'bVerificationException' is true for a verification exception, false for a
3942  *   "call unauthorized by host" exception.
3943  */
3944
3945 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3946 {
3947     block->bbJumpKind = BBJ_THROW;
3948     block->bbFlags |= BBF_FAILED_VERIFICATION;
3949
3950     impCurStmtOffsSet(block->bbCodeOffs);
3951
3952 #ifdef DEBUG
3953     // we need this since BeginTreeList asserts otherwise
3954     impTreeList = impTreeLast = nullptr;
3955     block->bbFlags &= ~BBF_IMPORTED;
3956
3957     if (logMsg)
3958     {
3959         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3960                 block->bbCodeOffs, block->bbCodeOffsEnd));
3961         if (verbose)
3962         {
3963             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3964         }
3965     }
3966
3967     if (JitConfig.DebugBreakOnVerificationFailure())
3968     {
3969         DebugBreak();
3970     }
3971 #endif
3972
3973     impBeginTreeList();
3974
3975     // if the stack is non-empty evaluate all the side-effects
3976     if (verCurrentState.esStackDepth > 0)
3977     {
3978         impEvalSideEffects();
3979     }
3980     assert(verCurrentState.esStackDepth == 0);
3981
3982     GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3983                                          gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3984     // verCurrentState.esStackDepth = 0;
3985     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3986
3987     // The inliner is not able to handle methods that require throw block, so
3988     // make sure this methods never gets inlined.
3989     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3990 }
3991
3992 /*****************************************************************************
3993  *
3994  */
3995 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3996
3997 {
3998     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3999     // slightly different mechanism in which it calls the JIT to perform IL verification:
4000     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4001     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4002     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4003     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4004     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4005     // to fail upon runtime of the jitted method.
4006     //
4007     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4008     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4009     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4010     // we detect these two conditions, instead of generating a throw statement inside the offending
4011     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4012     // to return false and make RyuJIT behave the same way JIT64 does.
4013     //
4014     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4015     // RyuJIT for the time being until we completely replace JIT64.
4016     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4017
4018     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4019     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4020     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4021     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4022     // be turned off during importation).
4023     CLANG_FORMAT_COMMENT_ANCHOR;
4024
4025 #ifdef _TARGET_64BIT_
4026
4027 #ifdef DEBUG
4028     bool canSkipVerificationResult =
4029         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4030     assert(tiVerificationNeeded || canSkipVerificationResult);
4031 #endif // DEBUG
4032
4033     // Add the non verifiable flag to the compiler
4034     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4035     {
4036         tiIsVerifiableCode = FALSE;
4037     }
4038 #endif //_TARGET_64BIT_
4039     verResetCurrentState(block, &verCurrentState);
4040     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4041
4042 #ifdef DEBUG
4043     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4044 #endif                   // DEBUG
4045 }
4046
4047 /******************************************************************************/
4048 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4049 {
4050     assert(ciType < CORINFO_TYPE_COUNT);
4051
4052     typeInfo tiResult;
4053     switch (ciType)
4054     {
4055         case CORINFO_TYPE_STRING:
4056         case CORINFO_TYPE_CLASS:
4057             tiResult = verMakeTypeInfo(clsHnd);
4058             if (!tiResult.IsType(TI_REF))
4059             { // type must be consistent with element type
4060                 return typeInfo();
4061             }
4062             break;
4063
4064 #ifdef _TARGET_64BIT_
4065         case CORINFO_TYPE_NATIVEINT:
4066         case CORINFO_TYPE_NATIVEUINT:
4067             if (clsHnd)
4068             {
4069                 // If we have more precise information, use it
4070                 return verMakeTypeInfo(clsHnd);
4071             }
4072             else
4073             {
4074                 return typeInfo::nativeInt();
4075             }
4076             break;
4077 #endif // _TARGET_64BIT_
4078
4079         case CORINFO_TYPE_VALUECLASS:
4080         case CORINFO_TYPE_REFANY:
4081             tiResult = verMakeTypeInfo(clsHnd);
4082             // type must be constant with element type;
4083             if (!tiResult.IsValueClass())
4084             {
4085                 return typeInfo();
4086             }
4087             break;
4088         case CORINFO_TYPE_VAR:
4089             return verMakeTypeInfo(clsHnd);
4090
4091         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4092         case CORINFO_TYPE_VOID:
4093             return typeInfo();
4094             break;
4095
4096         case CORINFO_TYPE_BYREF:
4097         {
4098             CORINFO_CLASS_HANDLE childClassHandle;
4099             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4100             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4101         }
4102         break;
4103
4104         default:
4105             if (clsHnd)
4106             { // If we have more precise information, use it
4107                 return typeInfo(TI_STRUCT, clsHnd);
4108             }
4109             else
4110             {
4111                 return typeInfo(JITtype2tiType(ciType));
4112             }
4113     }
4114     return tiResult;
4115 }
4116
4117 /******************************************************************************/
4118
4119 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4120 {
4121     if (clsHnd == nullptr)
4122     {
4123         return typeInfo();
4124     }
4125
4126     // Byrefs should only occur in method and local signatures, which are accessed
4127     // using ICorClassInfo and ICorClassInfo.getChildType.
4128     // So findClass() and getClassAttribs() should not be called for byrefs
4129
4130     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4131     {
4132         assert(!"Did findClass() return a Byref?");
4133         return typeInfo();
4134     }
4135
4136     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4137
4138     if (attribs & CORINFO_FLG_VALUECLASS)
4139     {
4140         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4141
4142         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4143         // not occur here, so we may want to change this to an assert instead.
4144         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4145         {
4146             return typeInfo();
4147         }
4148
4149 #ifdef _TARGET_64BIT_
4150         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4151         {
4152             return typeInfo::nativeInt();
4153         }
4154 #endif // _TARGET_64BIT_
4155
4156         if (t != CORINFO_TYPE_UNDEF)
4157         {
4158             return (typeInfo(JITtype2tiType(t)));
4159         }
4160         else if (bashStructToRef)
4161         {
4162             return (typeInfo(TI_REF, clsHnd));
4163         }
4164         else
4165         {
4166             return (typeInfo(TI_STRUCT, clsHnd));
4167         }
4168     }
4169     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4170     {
4171         // See comment in _typeInfo.h for why we do it this way.
4172         return (typeInfo(TI_REF, clsHnd, true));
4173     }
4174     else
4175     {
4176         return (typeInfo(TI_REF, clsHnd));
4177     }
4178 }
4179
4180 /******************************************************************************/
4181 BOOL Compiler::verIsSDArray(typeInfo ti)
4182 {
4183     if (ti.IsNullObjRef())
4184     { // nulls are SD arrays
4185         return TRUE;
4186     }
4187
4188     if (!ti.IsType(TI_REF))
4189     {
4190         return FALSE;
4191     }
4192
4193     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4194     {
4195         return FALSE;
4196     }
4197     return TRUE;
4198 }
4199
4200 /******************************************************************************/
4201 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4202 /* Returns an error type if anything goes wrong */
4203
4204 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4205 {
4206     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4207
4208     if (!verIsSDArray(arrayObjectType))
4209     {
4210         return typeInfo();
4211     }
4212
4213     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4214     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4215
4216     return verMakeTypeInfo(ciType, childClassHandle);
4217 }
4218
4219 /*****************************************************************************
4220  */
4221 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4222 {
4223     CORINFO_CLASS_HANDLE classHandle;
4224     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4225
4226     var_types type = JITtype2varType(ciType);
4227     if (varTypeIsGC(type))
4228     {
4229         // For efficiency, getArgType only returns something in classHandle for
4230         // value types.  For other types that have addition type info, you
4231         // have to call back explicitly
4232         classHandle = info.compCompHnd->getArgClass(sig, args);
4233         if (!classHandle)
4234         {
4235             NO_WAY("Could not figure out Class specified in argument or local signature");
4236         }
4237     }
4238
4239     return verMakeTypeInfo(ciType, classHandle);
4240 }
4241
4242 /*****************************************************************************/
4243
4244 // This does the expensive check to figure out whether the method
4245 // needs to be verified. It is called only when we fail verification,
4246 // just before throwing the verification exception.
4247
4248 BOOL Compiler::verNeedsVerification()
4249 {
4250     // If we have previously determined that verification is NOT needed
4251     // (for example in Compiler::compCompile), that means verification is really not needed.
4252     // Return the same decision we made before.
4253     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4254
4255     if (!tiVerificationNeeded)
4256     {
4257         return tiVerificationNeeded;
4258     }
4259
4260     assert(tiVerificationNeeded);
4261
4262     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4263     // obtain the answer.
4264     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4265         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4266
4267     // canSkipVerification will return one of the following three values:
4268     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4269     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4270     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4271     //     but need to insert a callout to the VM to ask during runtime
4272     //     whether to skip verification or not.
4273
4274     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4275     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4276     {
4277         tiRuntimeCalloutNeeded = true;
4278     }
4279
4280     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4281     {
4282         // Dev10 706080 - Testers don't like the assert, so just silence it
4283         // by not using the macros that invoke debugAssert.
4284         badCode();
4285     }
4286
4287     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4288     // The following line means we will NOT do jit time verification if canSkipVerification
4289     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4290     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4291     return tiVerificationNeeded;
4292 }
4293
4294 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4295 {
4296     if (ti.IsByRef())
4297     {
4298         return TRUE;
4299     }
4300     if (!ti.IsType(TI_STRUCT))
4301     {
4302         return FALSE;
4303     }
4304     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4305 }
4306
4307 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4308 {
4309     if (ti.IsPermanentHomeByRef())
4310     {
4311         return TRUE;
4312     }
4313     else
4314     {
4315         return FALSE;
4316     }
4317 }
4318
4319 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4320 {
4321     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4322             || ti.IsUnboxedGenericTypeVar() ||
4323             (ti.IsType(TI_STRUCT) &&
4324              // exclude byreflike structs
4325              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4326 }
4327
4328 // Is it a boxed value type?
4329 bool Compiler::verIsBoxedValueType(typeInfo ti)
4330 {
4331     if (ti.GetType() == TI_REF)
4332     {
4333         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4334         return !!eeIsValueClass(clsHnd);
4335     }
4336     else
4337     {
4338         return false;
4339     }
4340 }
4341
4342 /*****************************************************************************
4343  *
4344  *  Check if a TailCall is legal.
4345  */
4346
4347 bool Compiler::verCheckTailCallConstraint(
4348     OPCODE                  opcode,
4349     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4350     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4351     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4352                                                        // return false to the caller.
4353                                                        // If false, it will throw.
4354     )
4355 {
4356     DWORD            mflags;
4357     CORINFO_SIG_INFO sig;
4358     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4359                                    // this counter is used to keep track of how many items have been
4360                                    // virtually popped
4361
4362     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4363     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4364     unsigned              methodClassFlgs = 0;
4365
4366     assert(impOpcodeIsCallOpcode(opcode));
4367
4368     if (compIsForInlining())
4369     {
4370         return false;
4371     }
4372
4373     // for calli, VerifyOrReturn that this is not a virtual method
4374     if (opcode == CEE_CALLI)
4375     {
4376         /* Get the call sig */
4377         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4378
4379         // We don't know the target method, so we have to infer the flags, or
4380         // assume the worst-case.
4381         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4382     }
4383     else
4384     {
4385         methodHnd = pResolvedToken->hMethod;
4386
4387         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4388
4389         // When verifying generic code we pair the method handle with its
4390         // owning class to get the exact method signature.
4391         methodClassHnd = pResolvedToken->hClass;
4392         assert(methodClassHnd);
4393
4394         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4395
4396         // opcode specific check
4397         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4398     }
4399
4400     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4401     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4402
4403     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4404     {
4405         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4406     }
4407
4408     // check compatibility of the arguments
4409     unsigned int argCount;
4410     argCount = sig.numArgs;
4411     CORINFO_ARG_LIST_HANDLE args;
4412     args = sig.args;
4413     while (argCount--)
4414     {
4415         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4416
4417         // check that the argument is not a byref for tailcalls
4418         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4419
4420         // For unsafe code, we might have parameters containing pointer to the stack location.
4421         // Disallow the tailcall for this kind.
4422         CORINFO_CLASS_HANDLE classHandle;
4423         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4424         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4425
4426         args = info.compCompHnd->getArgNext(args);
4427     }
4428
4429     // update popCount
4430     popCount += sig.numArgs;
4431
4432     // check for 'this' which is on non-static methods, not called via NEWOBJ
4433     if (!(mflags & CORINFO_FLG_STATIC))
4434     {
4435         // Always update the popCount.
4436         // This is crucial for the stack calculation to be correct.
4437         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4438         popCount++;
4439
4440         if (opcode == CEE_CALLI)
4441         {
4442             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4443             // on the stack.
4444             if (tiThis.IsValueClass())
4445             {
4446                 tiThis.MakeByRef();
4447             }
4448             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4449         }
4450         else
4451         {
4452             // Check type compatibility of the this argument
4453             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4454             if (tiDeclaredThis.IsValueClass())
4455             {
4456                 tiDeclaredThis.MakeByRef();
4457             }
4458
4459             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4460         }
4461     }
4462
4463     // Tail calls on constrained calls should be illegal too:
4464     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4465     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4466
4467     // Get the exact view of the signature for an array method
4468     if (sig.retType != CORINFO_TYPE_VOID)
4469     {
4470         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4471         {
4472             assert(opcode != CEE_CALLI);
4473             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4474         }
4475     }
4476
4477     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4478     typeInfo tiCallerRetType =
4479         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4480
4481     // void return type gets morphed into the error type, so we have to treat them specially here
4482     if (sig.retType == CORINFO_TYPE_VOID)
4483     {
4484         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4485                                   speculative);
4486     }
4487     else
4488     {
4489         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4490                                                    NormaliseForStack(tiCallerRetType), true),
4491                                   "tailcall return mismatch", speculative);
4492     }
4493
4494     // for tailcall, stack must be empty
4495     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4496
4497     return true; // Yes, tailcall is legal
4498 }
4499
4500 /*****************************************************************************
4501  *
4502  *  Checks the IL verification rules for the call
4503  */
4504
4505 void Compiler::verVerifyCall(OPCODE                  opcode,
4506                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4507                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4508                              bool                    tailCall,
4509                              bool                    readonlyCall,
4510                              const BYTE*             delegateCreateStart,
4511                              const BYTE*             codeAddr,
4512                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4513 {
4514     DWORD             mflags;
4515     CORINFO_SIG_INFO* sig      = nullptr;
4516     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4517                                     // this counter is used to keep track of how many items have been
4518                                     // virtually popped
4519
4520     // for calli, VerifyOrReturn that this is not a virtual method
4521     if (opcode == CEE_CALLI)
4522     {
4523         Verify(false, "Calli not verifiable");
4524         return;
4525     }
4526
4527     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4528     mflags = callInfo->verMethodFlags;
4529
4530     sig = &callInfo->verSig;
4531
4532     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4533     {
4534         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4535     }
4536
4537     // opcode specific check
4538     unsigned methodClassFlgs = callInfo->classFlags;
4539     switch (opcode)
4540     {
4541         case CEE_CALLVIRT:
4542             // cannot do callvirt on valuetypes
4543             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4544             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4545             break;
4546
4547         case CEE_NEWOBJ:
4548         {
4549             assert(!tailCall); // Importer should not allow this
4550             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4551                            "newobj must be on instance");
4552
4553             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4554             {
4555                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4556                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4557                 typeInfo tiDeclaredFtn =
4558                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4559                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4560
4561                 assert(popCount == 0);
4562                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4563                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4564
4565                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4566                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4567                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4568                                "delegate object type mismatch");
4569
4570                 CORINFO_CLASS_HANDLE objTypeHandle =
4571                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4572
4573                 // the method signature must be compatible with the delegate's invoke method
4574
4575                 // check that for virtual functions, the type of the object used to get the
4576                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4577                 // since this is a bit of work to determine in general, we pattern match stylized
4578                 // code sequences
4579
4580                 // the delegate creation code check, which used to be done later, is now done here
4581                 // so we can read delegateMethodRef directly from
4582                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4583                 // we then use it in our call to isCompatibleDelegate().
4584
4585                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4586                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4587                                "must create delegates with certain IL");
4588
4589                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4590                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4591                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4592                 delegateResolvedToken.token        = delegateMethodRef;
4593                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4594                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4595
4596                 CORINFO_CALL_INFO delegateCallInfo;
4597                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4598                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4599
4600                 BOOL isOpenDelegate = FALSE;
4601                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4602                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4603                                                                       &isOpenDelegate),
4604                                "function incompatible with delegate");
4605
4606                 // check the constraints on the target method
4607                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4608                                "delegate target has unsatisfied class constraints");
4609                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4610                                                                             tiActualFtn.GetMethod()),
4611                                "delegate target has unsatisfied method constraints");
4612
4613                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4614                 // for additional verification rules for delegates
4615                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4616                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4617                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4618                 {
4619
4620                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4621 #ifdef DEBUG
4622                         && StrictCheckForNonVirtualCallToVirtualMethod()
4623 #endif
4624                             )
4625                     {
4626                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4627                         {
4628                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4629                                                verIsBoxedValueType(tiActualObj),
4630                                            "The 'this' parameter to the call must be either the calling method's "
4631                                            "'this' parameter or "
4632                                            "a boxed value type.");
4633                         }
4634                     }
4635                 }
4636
4637                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4638                 {
4639                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4640
4641                     Verify(targetIsStatic || !isOpenDelegate,
4642                            "Unverifiable creation of an open instance delegate for a protected member.");
4643
4644                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4645                                                                 ? info.compClassHnd
4646                                                                 : tiActualObj.GetClassHandleForObjRef();
4647
4648                     // In the case of protected methods, it is a requirement that the 'this'
4649                     // pointer be a subclass of the current context.  Perform this check.
4650                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4651                            "Accessing protected method through wrong type.");
4652                 }
4653                 goto DONE_ARGS;
4654             }
4655         }
4656         // fall thru to default checks
4657         default:
4658             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4659     }
4660     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4661                    "can only newobj a delegate constructor");
4662
4663     // check compatibility of the arguments
4664     unsigned int argCount;
4665     argCount = sig->numArgs;
4666     CORINFO_ARG_LIST_HANDLE args;
4667     args = sig->args;
4668     while (argCount--)
4669     {
4670         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4671
4672         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4673         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4674
4675         args = info.compCompHnd->getArgNext(args);
4676     }
4677
4678 DONE_ARGS:
4679
4680     // update popCount
4681     popCount += sig->numArgs;
4682
4683     // check for 'this' which are is non-static methods, not called via NEWOBJ
4684     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4685     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4686     {
4687         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4688         popCount++;
4689
4690         // If it is null, we assume we can access it (since it will AV shortly)
4691         // If it is anything but a reference class, there is no hierarchy, so
4692         // again, we don't need the precise instance class to compute 'protected' access
4693         if (tiThis.IsType(TI_REF))
4694         {
4695             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4696         }
4697
4698         // Check type compatibility of the this argument
4699         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4700         if (tiDeclaredThis.IsValueClass())
4701         {
4702             tiDeclaredThis.MakeByRef();
4703         }
4704
4705         // If this is a call to the base class .ctor, set thisPtr Init for
4706         // this block.
4707         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4708         {
4709             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4710                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4711             {
4712                 assert(verCurrentState.thisInitialized !=
4713                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4714                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4715                                "Call to base class constructor when 'this' is possibly initialized");
4716                 // Otherwise, 'this' is now initialized.
4717                 verCurrentState.thisInitialized = TIS_Init;
4718                 tiThis.SetInitialisedObjRef();
4719             }
4720             else
4721             {
4722                 // We allow direct calls to value type constructors
4723                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4724                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4725                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4726                                "Bad call to a constructor");
4727             }
4728         }
4729
4730         if (pConstrainedResolvedToken != nullptr)
4731         {
4732             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4733
4734             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4735
4736             // We just dereference this and test for equality
4737             tiThis.DereferenceByRef();
4738             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4739                            "this type mismatch with constrained type operand");
4740
4741             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4742             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4743         }
4744
4745         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4746         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4747         {
4748             tiDeclaredThis.SetIsReadonlyByRef();
4749         }
4750
4751         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4752
4753         if (tiThis.IsByRef())
4754         {
4755             // Find the actual type where the method exists (as opposed to what is declared
4756             // in the metadata). This is to prevent passing a byref as the "this" argument
4757             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4758
4759             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4760             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4761                            "Call to base type of valuetype (which is never a valuetype)");
4762         }
4763
4764         // Rules for non-virtual call to a non-final virtual method:
4765
4766         // Define:
4767         // The "this" pointer is considered to be "possibly written" if
4768         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4769         //   (or)
4770         //   2. It has been stored to (STARG.0) anywhere in the method.
4771
4772         // A non-virtual call to a non-final virtual method is only allowed if
4773         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4774         //   (or)
4775         //   2. The this pointer passed to the callee is the current method's this pointer.
4776         //      (and) The current method's this pointer is not "possibly written".
4777
4778         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4779         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4780         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4781         // hard and more error prone.
4782
4783         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4784 #ifdef DEBUG
4785             && StrictCheckForNonVirtualCallToVirtualMethod()
4786 #endif
4787                 )
4788         {
4789             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4790             {
4791                 VerifyOrReturn(
4792                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4793                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4794                     "a boxed value type.");
4795             }
4796         }
4797     }
4798
4799     // check any constraints on the callee's class and type parameters
4800     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4801                    "method has unsatisfied class constraints");
4802     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4803                    "method has unsatisfied method constraints");
4804
4805     if (mflags & CORINFO_FLG_PROTECTED)
4806     {
4807         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4808                        "Can't access protected method");
4809     }
4810
4811     // Get the exact view of the signature for an array method
4812     if (sig->retType != CORINFO_TYPE_VOID)
4813     {
4814         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4815     }
4816
4817     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4818     // The methods supported by array types are under the control of the EE
4819     // so we can trust that only the Address operation returns a byref.
4820     if (readonlyCall)
4821     {
4822         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4823         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4824                        "unexpected use of readonly prefix");
4825     }
4826
4827     // Verify the tailcall
4828     if (tailCall)
4829     {
4830         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4831     }
4832 }
4833
4834 /*****************************************************************************
4835  *  Checks that a delegate creation is done using the following pattern:
4836  *     dup
4837  *     ldvirtftn targetMemberRef
4838  *  OR
4839  *     ldftn targetMemberRef
4840  *
4841  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4842  *  not in this basic block)
4843  *
4844  *  targetMemberRef is read from the code sequence.
4845  *  targetMemberRef is validated iff verificationNeeded.
4846  */
4847
4848 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4849                                         const BYTE*  codeAddr,
4850                                         mdMemberRef& targetMemberRef)
4851 {
4852     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4853     {
4854         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4855         return TRUE;
4856     }
4857     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4858     {
4859         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4860         return TRUE;
4861     }
4862
4863     return FALSE;
4864 }
4865
4866 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4867 {
4868     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4869     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4870     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4871     if (!tiCompatibleWith(value, normPtrVal, true))
4872     {
4873         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4874         compUnsafeCastUsed = true;
4875     }
4876     return ptrVal;
4877 }
4878
4879 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4880 {
4881     assert(!instrType.IsStruct());
4882
4883     typeInfo ptrVal;
4884     if (ptr.IsByRef())
4885     {
4886         ptrVal = DereferenceByRef(ptr);
4887         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4888         {
4889             Verify(false, "bad pointer");
4890             compUnsafeCastUsed = true;
4891         }
4892         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4893         {
4894             Verify(false, "pointer not consistent with instr");
4895             compUnsafeCastUsed = true;
4896         }
4897     }
4898     else
4899     {
4900         Verify(false, "pointer not byref");
4901         compUnsafeCastUsed = true;
4902     }
4903
4904     return ptrVal;
4905 }
4906
4907 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4908 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4909 // ld*flda or a st*fld.
4910 // 'enclosingClass' is given if we are accessing a field in some specific type.
4911
4912 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4913                               const CORINFO_FIELD_INFO& fieldInfo,
4914                               const typeInfo*           tiThis,
4915                               BOOL                      mutator,
4916                               BOOL                      allowPlainStructAsThis)
4917 {
4918     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4919     unsigned             fieldFlags     = fieldInfo.fieldFlags;
4920     CORINFO_CLASS_HANDLE instanceClass =
4921         info.compClassHnd; // for statics, we imagine the instance is the current class.
4922
4923     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4924     if (mutator)
4925     {
4926         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4927         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4928         {
4929             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4930                        info.compIsStatic == isStaticField,
4931                    "bad use of initonly field (set or address taken)");
4932         }
4933     }
4934
4935     if (tiThis == nullptr)
4936     {
4937         Verify(isStaticField, "used static opcode with non-static field");
4938     }
4939     else
4940     {
4941         typeInfo tThis = *tiThis;
4942
4943         if (allowPlainStructAsThis && tThis.IsValueClass())
4944         {
4945             tThis.MakeByRef();
4946         }
4947
4948         // If it is null, we assume we can access it (since it will AV shortly)
4949         // If it is anything but a refernce class, there is no hierarchy, so
4950         // again, we don't need the precise instance class to compute 'protected' access
4951         if (tiThis->IsType(TI_REF))
4952         {
4953             instanceClass = tiThis->GetClassHandleForObjRef();
4954         }
4955
4956         // Note that even if the field is static, we require that the this pointer
4957         // satisfy the same constraints as a non-static field  This happens to
4958         // be simpler and seems reasonable
4959         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4960         if (tiDeclaredThis.IsValueClass())
4961         {
4962             tiDeclaredThis.MakeByRef();
4963
4964             // we allow read-only tThis, on any field access (even stores!), because if the
4965             // class implementor wants to prohibit stores he should make the field private.
4966             // we do this by setting the read-only bit on the type we compare tThis to.
4967             tiDeclaredThis.SetIsReadonlyByRef();
4968         }
4969         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4970         {
4971             // Any field access is legal on "uninitialized" this pointers.
4972             // The easiest way to implement this is to simply set the
4973             // initialized bit for the duration of the type check on the
4974             // field access only.  It does not change the state of the "this"
4975             // for the function as a whole. Note that the "tThis" is a copy
4976             // of the original "this" type (*tiThis) passed in.
4977             tThis.SetInitialisedObjRef();
4978         }
4979
4980         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4981     }
4982
4983     // Presently the JIT does not check that we don't store or take the address of init-only fields
4984     // since we cannot guarantee their immutability and it is not a security issue.
4985
4986     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4987     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4988                    "field has unsatisfied class constraints");
4989     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4990     {
4991         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4992                "Accessing protected method through wrong type.");
4993     }
4994 }
4995
4996 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4997 {
4998     if (tiOp1.IsNumberType())
4999     {
5000 #ifdef _TARGET_64BIT_
5001         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5002 #else  // _TARGET_64BIT
5003         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5004         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5005         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5006         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5007 #endif // !_TARGET_64BIT_
5008     }
5009     else if (tiOp1.IsObjRef())
5010     {
5011         switch (opcode)
5012         {
5013             case CEE_BEQ_S:
5014             case CEE_BEQ:
5015             case CEE_BNE_UN_S:
5016             case CEE_BNE_UN:
5017             case CEE_CEQ:
5018             case CEE_CGT_UN:
5019                 break;
5020             default:
5021                 Verify(FALSE, "Cond not allowed on object types");
5022         }
5023         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5024     }
5025     else if (tiOp1.IsByRef())
5026     {
5027         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5028     }
5029     else
5030     {
5031         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5032     }
5033 }
5034
5035 void Compiler::verVerifyThisPtrInitialised()
5036 {
5037     if (verTrackObjCtorInitState)
5038     {
5039         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5040     }
5041 }
5042
5043 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5044 {
5045     // Either target == context, in this case calling an alternate .ctor
5046     // Or target is the immediate parent of context
5047
5048     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5049 }
5050
5051 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
5052                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
5053                                         CORINFO_CALL_INFO*      pCallInfo)
5054 {
5055     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5056     {
5057         NO_WAY("Virtual call to a function added via EnC is not supported");
5058     }
5059
5060     // CoreRT generic virtual method
5061     if (((pCallInfo->sig.callConv & CORINFO_CALLCONV_GENERIC) != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5062     {
5063         GenTreePtr runtimeMethodHandle = nullptr;
5064         if (pCallInfo->exactContextNeedsRuntimeLookup)
5065         {
5066             runtimeMethodHandle =
5067                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5068         }
5069         else
5070         {
5071             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5072         }
5073         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, GTF_EXCEPT,
5074                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5075     }
5076
5077 #ifdef FEATURE_READYTORUN_COMPILER
5078     if (opts.IsReadyToRun())
5079     {
5080         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5081         {
5082             GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
5083                                                     gtNewArgList(thisPtr));
5084
5085             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5086
5087             return call;
5088         }
5089
5090         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5091         if (IsTargetAbi(CORINFO_CORERT_ABI))
5092         {
5093             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5094
5095             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5096                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5097         }
5098     }
5099 #endif
5100
5101     // Get the exact descriptor for the static callsite
5102     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5103     if (exactTypeDesc == nullptr)
5104     { // compDonotInline()
5105         return nullptr;
5106     }
5107
5108     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5109     if (exactMethodDesc == nullptr)
5110     { // compDonotInline()
5111         return nullptr;
5112     }
5113
5114     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5115
5116     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5117
5118     helpArgs = gtNewListNode(thisPtr, helpArgs);
5119
5120     // Call helper function.  This gets the target address of the final destination callsite.
5121
5122     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5123 }
5124
5125 /*****************************************************************************
5126  *
5127  *  Build and import a box node
5128  */
5129
5130 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5131 {
5132     // Get the tree for the type handle for the boxed object.  In the case
5133     // of shared generic code or ngen'd code this might be an embedded
5134     // computation.
5135     // Note we can only box do it if the class construtor has been called
5136     // We can always do it on primitive types
5137
5138     GenTreePtr op1 = nullptr;
5139     GenTreePtr op2 = nullptr;
5140     var_types  lclTyp;
5141
5142     impSpillSpecialSideEff();
5143
5144     // Now get the expression to box from the stack.
5145     StackEntry           se        = impPopStack();
5146     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5147     GenTreePtr           exprToBox = se.val;
5148
5149     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5150     if (boxHelper == CORINFO_HELP_BOX)
5151     {
5152         // we are doing 'normal' boxing.  This means that we can inline the box operation
5153         // Box(expr) gets morphed into
5154         // temp = new(clsHnd)
5155         // cpobj(temp+4, expr, clsHnd)
5156         // push temp
5157         // The code paths differ slightly below for structs and primitives because
5158         // "cpobj" differs in these cases.  In one case you get
5159         //    impAssignStructPtr(temp+4, expr, clsHnd)
5160         // and the other you get
5161         //    *(temp+4) = expr
5162
5163         if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5164         {
5165             impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5166         }
5167
5168         // needs to stay in use until this box expression is appended
5169         // some other node.  We approximate this by keeping it alive until
5170         // the opcode stack becomes empty
5171         impBoxTempInUse = true;
5172
5173 #ifdef FEATURE_READYTORUN_COMPILER
5174         bool usingReadyToRunHelper = false;
5175
5176         if (opts.IsReadyToRun())
5177         {
5178             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5179             usingReadyToRunHelper = (op1 != nullptr);
5180         }
5181
5182         if (!usingReadyToRunHelper)
5183 #endif
5184         {
5185             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5186             // and the newfast call with a single call to a dynamic R2R cell that will:
5187             //      1) Load the context
5188             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5189             //      3) Allocate and return the new object for boxing
5190             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5191
5192             // Ensure that the value class is restored
5193             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5194             if (op2 == nullptr)
5195             { // compDonotInline()
5196                 return;
5197             }
5198
5199             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5200                                       gtNewArgList(op2));
5201         }
5202
5203         /* Remember that this basic block contains 'new' of an array */
5204         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5205
5206         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5207
5208         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5209
5210         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5211         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5212         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5213
5214         if (varTypeIsStruct(exprToBox))
5215         {
5216             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5217             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5218         }
5219         else
5220         {
5221             lclTyp = exprToBox->TypeGet();
5222             if (lclTyp == TYP_BYREF)
5223             {
5224                 lclTyp = TYP_I_IMPL;
5225             }
5226             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5227             if (impIsPrimitive(jitType))
5228             {
5229                 lclTyp = JITtype2varType(jitType);
5230             }
5231             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5232                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5233             var_types srcTyp = exprToBox->TypeGet();
5234             var_types dstTyp = lclTyp;
5235
5236             if (srcTyp != dstTyp)
5237             {
5238                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5239                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5240                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5241             }
5242             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5243         }
5244
5245         op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5246         op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5247
5248         // Record that this is a "box" node.
5249         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5250
5251         // If it is a value class, mark the "box" node.  We can use this information
5252         // to optimise several cases:
5253         //    "box(x) == null" --> false
5254         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5255         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5256
5257         op1->gtFlags |= GTF_BOX_VALUE;
5258         assert(op1->IsBoxedValue());
5259         assert(asg->gtOper == GT_ASG);
5260     }
5261     else
5262     {
5263         // Don't optimize, just call the helper and be done with it
5264
5265         // Ensure that the value class is restored
5266         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5267         if (op2 == nullptr)
5268         { // compDonotInline()
5269             return;
5270         }
5271
5272         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5273         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5274     }
5275
5276     /* Push the result back on the stack, */
5277     /* even if clsHnd is a value class we want the TI_REF */
5278     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5279     impPushOnStack(op1, tiRetVal);
5280 }
5281
5282 //------------------------------------------------------------------------
5283 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5284 //
5285 // Arguments:
5286 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5287 //                     by a call to CEEInfo::resolveToken().
5288 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5289 //                by a call to CEEInfo::getCallInfo().
5290 //
5291 // Assumptions:
5292 //    The multi-dimensional array constructor arguments (array dimensions) are
5293 //    pushed on the IL stack on entry to this method.
5294 //
5295 // Notes:
5296 //    Multi-dimensional array constructors are imported as calls to a JIT
5297 //    helper, not as regular calls.
5298
5299 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5300 {
5301     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5302     if (classHandle == nullptr)
5303     { // compDonotInline()
5304         return;
5305     }
5306
5307     assert(pCallInfo->sig.numArgs);
5308
5309     GenTreePtr      node;
5310     GenTreeArgList* args;
5311
5312     //
5313     // There are two different JIT helpers that can be used to allocate
5314     // multi-dimensional arrays:
5315     //
5316     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5317     //      This variant is deprecated. It should be eventually removed.
5318     //
5319     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5320     //      pointer to block of int32s. This variant is more portable.
5321     //
5322     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5323     // unconditionally would require ReadyToRun version bump.
5324     //
5325     CLANG_FORMAT_COMMENT_ANCHOR;
5326
5327     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5328     {
5329         LclVarDsc* newObjArrayArgsVar;
5330
5331         // Reuse the temp used to pass the array dimensions to avoid bloating
5332         // the stack frame in case there are multiple calls to multi-dim array
5333         // constructors within a single method.
5334         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5335         {
5336             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5337             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5338             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5339         }
5340
5341         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5342         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5343         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5344             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5345
5346         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5347         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5348         // to one allocation at a time.
5349         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5350
5351         //
5352         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5353         //  - Array class handle
5354         //  - Number of dimension arguments
5355         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5356         //
5357
5358         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5359         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5360
5361         // Pop dimension arguments from the stack one at a time and store it
5362         // into lvaNewObjArrayArgs temp.
5363         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5364         {
5365             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5366
5367             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5368             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5369             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5370                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5371             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5372
5373             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5374         }
5375
5376         args = gtNewArgList(node);
5377
5378         // pass number of arguments to the helper
5379         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5380
5381         args = gtNewListNode(classHandle, args);
5382
5383         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5384     }
5385     else
5386     {
5387         //
5388         // The varargs helper needs the type and method handles as last
5389         // and  last-1 param (this is a cdecl call, so args will be
5390         // pushed in reverse order on the CPU stack)
5391         //
5392
5393         args = gtNewArgList(classHandle);
5394
5395         // pass number of arguments to the helper
5396         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5397
5398         unsigned argFlags = 0;
5399         args              = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5400
5401         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5402
5403         // varargs, so we pop the arguments
5404         node->gtFlags |= GTF_CALL_POP_ARGS;
5405
5406 #ifdef DEBUG
5407         // At the present time we don't track Caller pop arguments
5408         // that have GC references in them
5409         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5410         {
5411             assert(temp->Current()->gtType != TYP_REF);
5412         }
5413 #endif
5414     }
5415
5416     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5417     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5418
5419     // Remember that this basic block contains 'new' of a md array
5420     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5421
5422     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5423 }
5424
5425 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5426                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5427                                       CORINFO_THIS_TRANSFORM  transform)
5428 {
5429     switch (transform)
5430     {
5431         case CORINFO_DEREF_THIS:
5432         {
5433             GenTreePtr obj = thisPtr;
5434
5435             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5436             impBashVarAddrsToI(obj);
5437             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5438             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5439
5440             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5441             // ldind could point anywhere, example a boxed class static int
5442             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5443
5444             return obj;
5445         }
5446
5447         case CORINFO_BOX_THIS:
5448         {
5449             // Constraint calls where there might be no
5450             // unboxed entry point require us to implement the call via helper.
5451             // These only occur when a possible target of the call
5452             // may have inherited an implementation of an interface
5453             // method from System.Object or System.ValueType.  The EE does not provide us with
5454             // "unboxed" versions of these methods.
5455
5456             GenTreePtr obj = thisPtr;
5457
5458             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5459             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5460             obj->gtFlags |= GTF_EXCEPT;
5461
5462             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5463             var_types   objType = JITtype2varType(jitTyp);
5464             if (impIsPrimitive(jitTyp))
5465             {
5466                 if (obj->OperIsBlk())
5467                 {
5468                     obj->ChangeOperUnchecked(GT_IND);
5469
5470                     // Obj could point anywhere, example a boxed class static int
5471                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5472                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5473                 }
5474
5475                 obj->gtType = JITtype2varType(jitTyp);
5476                 assert(varTypeIsArithmetic(obj->gtType));
5477             }
5478
5479             // This pushes on the dereferenced byref
5480             // This is then used immediately to box.
5481             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5482
5483             // This pops off the byref-to-a-value-type remaining on the stack and
5484             // replaces it with a boxed object.
5485             // This is then used as the object to the virtual call immediately below.
5486             impImportAndPushBox(pConstrainedResolvedToken);
5487             if (compDonotInline())
5488             {
5489                 return nullptr;
5490             }
5491
5492             obj = impPopStack().val;
5493             return obj;
5494         }
5495         case CORINFO_NO_THIS_TRANSFORM:
5496         default:
5497             return thisPtr;
5498     }
5499 }
5500
5501 //------------------------------------------------------------------------
5502 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5503 //
5504 // Return Value:
5505 //    true if PInvoke inlining should be enabled in current method, false otherwise
5506 //
5507 // Notes:
5508 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5509
5510 bool Compiler::impCanPInvokeInline()
5511 {
5512     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5513            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5514         ;
5515 }
5516
5517 //------------------------------------------------------------------------
5518 // impCanPInvokeInlineCallSite: basic legality checks using information
5519 // from a call to see if the call qualifies as an inline pinvoke.
5520 //
5521 // Arguments:
5522 //    block      - block contaning the call, or for inlinees, block
5523 //                 containing the call being inlined
5524 //
5525 // Return Value:
5526 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5527 //
5528 // Notes:
5529 //    For runtimes that support exception handling interop there are
5530 //    restrictions on using inline pinvoke in handler regions.
5531 //
5532 //    * We have to disable pinvoke inlining inside of filters because
5533 //    in case the main execution (i.e. in the try block) is inside
5534 //    unmanaged code, we cannot reuse the inlined stub (we still need
5535 //    the original state until we are in the catch handler)
5536 //
5537 //    * We disable pinvoke inlining inside handlers since the GSCookie
5538 //    is in the inlined Frame (see
5539 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5540 //    this would not protect framelets/return-address of handlers.
5541 //
5542 //    These restrictions are currently also in place for CoreCLR but
5543 //    can be relaxed when coreclr/#8459 is addressed.
5544
5545 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5546 {
5547     if (block->hasHndIndex())
5548     {
5549         return false;
5550     }
5551
5552     // The remaining limitations do not apply to CoreRT
5553     if (IsTargetAbi(CORINFO_CORERT_ABI))
5554     {
5555         return true;
5556     }
5557
5558 #ifdef _TARGET_AMD64_
5559     // On x64, we disable pinvoke inlining inside of try regions.
5560     // Here is the comment from JIT64 explaining why:
5561     //
5562     //   [VSWhidbey: 611015] - because the jitted code links in the
5563     //   Frame (instead of the stub) we rely on the Frame not being
5564     //   'active' until inside the stub.  This normally happens by the
5565     //   stub setting the return address pointer in the Frame object
5566     //   inside the stub.  On a normal return, the return address
5567     //   pointer is zeroed out so the Frame can be safely re-used, but
5568     //   if an exception occurs, nobody zeros out the return address
5569     //   pointer.  Thus if we re-used the Frame object, it would go
5570     //   'active' as soon as we link it into the Frame chain.
5571     //
5572     //   Technically we only need to disable PInvoke inlining if we're
5573     //   in a handler or if we're in a try body with a catch or
5574     //   filter/except where other non-handler code in this method
5575     //   might run and try to re-use the dirty Frame object.
5576     //
5577     //   A desktop test case where this seems to matter is
5578     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5579     if (block->hasTryIndex())
5580     {
5581         return false;
5582     }
5583 #endif // _TARGET_AMD64_
5584
5585     return true;
5586 }
5587
5588 //------------------------------------------------------------------------
5589 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5590 // if it can be expressed as an inline pinvoke.
5591 //
5592 // Arguments:
5593 //    call       - tree for the call
5594 //    methHnd    - handle for the method being called (may be null)
5595 //    sig        - signature of the method being called
5596 //    mflags     - method flags for the method being called
5597 //    block      - block contaning the call, or for inlinees, block
5598 //                 containing the call being inlined
5599 //
5600 // Notes:
5601 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5602 //
5603 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5604 //   call passes a combination of legality and profitabilty checks.
5605 //
5606 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5607
5608 void Compiler::impCheckForPInvokeCall(
5609     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5610 {
5611     CorInfoUnmanagedCallConv unmanagedCallConv;
5612
5613     // If VM flagged it as Pinvoke, flag the call node accordingly
5614     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5615     {
5616         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5617     }
5618
5619     if (methHnd)
5620     {
5621         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5622         {
5623             return;
5624         }
5625
5626         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5627     }
5628     else
5629     {
5630         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5631         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5632         {
5633             // Used by the IL Stubs.
5634             callConv = CORINFO_CALLCONV_C;
5635         }
5636         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5637         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5638         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5639         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5640
5641         assert(!call->gtCallCookie);
5642     }
5643
5644     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5645         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5646     {
5647         return;
5648     }
5649     optNativeCallCount++;
5650
5651     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5652     {
5653         // PInvoke CALLI in IL stubs must be inlined
5654     }
5655     else
5656     {
5657         // Check legality
5658         if (!impCanPInvokeInlineCallSite(block))
5659         {
5660             return;
5661         }
5662
5663         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5664         // profitability checks
5665         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5666         {
5667             if (!impCanPInvokeInline())
5668             {
5669                 return;
5670             }
5671
5672             // Size-speed tradeoff: don't use inline pinvoke at rarely
5673             // executed call sites.  The non-inline version is more
5674             // compact.
5675             if (block->isRunRarely())
5676             {
5677                 return;
5678             }
5679         }
5680
5681         // The expensive check should be last
5682         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5683         {
5684             return;
5685         }
5686     }
5687
5688     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5689
5690     call->gtFlags |= GTF_CALL_UNMANAGED;
5691     info.compCallUnmanaged++;
5692
5693     // AMD64 convention is same for native and managed
5694     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5695     {
5696         call->gtFlags |= GTF_CALL_POP_ARGS;
5697     }
5698
5699     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5700     {
5701         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5702     }
5703 }
5704
5705 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5706 {
5707     var_types callRetTyp = JITtype2varType(sig->retType);
5708
5709     /* The function pointer is on top of the stack - It may be a
5710      * complex expression. As it is evaluated after the args,
5711      * it may cause registered args to be spilled. Simply spill it.
5712      */
5713
5714     // Ignore this trivial case.
5715     if (impStackTop().val->gtOper != GT_LCL_VAR)
5716     {
5717         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5718                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5719     }
5720
5721     /* Get the function pointer */
5722
5723     GenTreePtr fptr = impPopStack().val;
5724
5725     // The function pointer is typically a sized to match the target pointer size
5726     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
5727     // See ILCodeStream::LowerOpcode
5728     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
5729
5730 #ifdef DEBUG
5731     // This temporary must never be converted to a double in stress mode,
5732     // because that can introduce a call to the cast helper after the
5733     // arguments have already been evaluated.
5734
5735     if (fptr->OperGet() == GT_LCL_VAR)
5736     {
5737         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5738     }
5739 #endif
5740
5741     /* Create the call node */
5742
5743     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5744
5745     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5746
5747     return call;
5748 }
5749
5750 /*****************************************************************************/
5751
5752 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5753 {
5754     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5755
5756     /* Since we push the arguments in reverse order (i.e. right -> left)
5757      * spill any side effects from the stack
5758      *
5759      * OBS: If there is only one side effect we do not need to spill it
5760      *      thus we have to spill all side-effects except last one
5761      */
5762
5763     unsigned lastLevelWithSideEffects = UINT_MAX;
5764
5765     unsigned argsToReverse = sig->numArgs;
5766
5767     // For "thiscall", the first argument goes in a register. Since its
5768     // order does not need to be changed, we do not need to spill it
5769
5770     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5771     {
5772         assert(argsToReverse);
5773         argsToReverse--;
5774     }
5775
5776 #ifndef _TARGET_X86_
5777     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5778     argsToReverse = 0;
5779 #endif
5780
5781     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5782     {
5783         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5784         {
5785             assert(lastLevelWithSideEffects == UINT_MAX);
5786
5787             impSpillStackEntry(level,
5788                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5789         }
5790         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5791         {
5792             if (lastLevelWithSideEffects != UINT_MAX)
5793             {
5794                 /* We had a previous side effect - must spill it */
5795                 impSpillStackEntry(lastLevelWithSideEffects,
5796                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5797
5798                 /* Record the level for the current side effect in case we will spill it */
5799                 lastLevelWithSideEffects = level;
5800             }
5801             else
5802             {
5803                 /* This is the first side effect encountered - record its level */
5804
5805                 lastLevelWithSideEffects = level;
5806             }
5807         }
5808     }
5809
5810     /* The argument list is now "clean" - no out-of-order side effects
5811      * Pop the argument list in reverse order */
5812
5813     unsigned   argFlags = 0;
5814     GenTreePtr args     = call->gtCall.gtCallArgs =
5815         impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5816
5817     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5818     {
5819         GenTreePtr thisPtr = args->Current();
5820         impBashVarAddrsToI(thisPtr);
5821         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5822     }
5823
5824     if (args)
5825     {
5826         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5827     }
5828 }
5829
5830 //------------------------------------------------------------------------
5831 // impInitClass: Build a node to initialize the class before accessing the
5832 //               field if necessary
5833 //
5834 // Arguments:
5835 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5836 //                     by a call to CEEInfo::resolveToken().
5837 //
5838 // Return Value: If needed, a pointer to the node that will perform the class
5839 //               initializtion.  Otherwise, nullptr.
5840 //
5841
5842 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5843 {
5844     CorInfoInitClassResult initClassResult =
5845         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5846
5847     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5848     {
5849         return nullptr;
5850     }
5851     BOOL runtimeLookup;
5852
5853     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5854
5855     if (node == nullptr)
5856     {
5857         assert(compDonotInline());
5858         return nullptr;
5859     }
5860
5861     if (runtimeLookup)
5862     {
5863         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5864     }
5865     else
5866     {
5867         // Call the shared non gc static helper, as its the fastest
5868         node = fgGetSharedCCtor(pResolvedToken->hClass);
5869     }
5870
5871     return node;
5872 }
5873
5874 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5875 {
5876     GenTreePtr op1 = nullptr;
5877
5878     switch (lclTyp)
5879     {
5880         int     ival;
5881         __int64 lval;
5882         double  dval;
5883
5884         case TYP_BOOL:
5885             ival = *((bool*)fldAddr);
5886             goto IVAL_COMMON;
5887
5888         case TYP_BYTE:
5889             ival = *((signed char*)fldAddr);
5890             goto IVAL_COMMON;
5891
5892         case TYP_UBYTE:
5893             ival = *((unsigned char*)fldAddr);
5894             goto IVAL_COMMON;
5895
5896         case TYP_SHORT:
5897             ival = *((short*)fldAddr);
5898             goto IVAL_COMMON;
5899
5900         case TYP_CHAR:
5901         case TYP_USHORT:
5902             ival = *((unsigned short*)fldAddr);
5903             goto IVAL_COMMON;
5904
5905         case TYP_UINT:
5906         case TYP_INT:
5907             ival = *((int*)fldAddr);
5908         IVAL_COMMON:
5909             op1 = gtNewIconNode(ival);
5910             break;
5911
5912         case TYP_LONG:
5913         case TYP_ULONG:
5914             lval = *((__int64*)fldAddr);
5915             op1  = gtNewLconNode(lval);
5916             break;
5917
5918         case TYP_FLOAT:
5919             dval = *((float*)fldAddr);
5920             op1  = gtNewDconNode(dval);
5921 #if !FEATURE_X87_DOUBLES
5922             // X87 stack doesn't differentiate between float/double
5923             // so R4 is treated as R8, but everybody else does
5924             op1->gtType = TYP_FLOAT;
5925 #endif // FEATURE_X87_DOUBLES
5926             break;
5927
5928         case TYP_DOUBLE:
5929             dval = *((double*)fldAddr);
5930             op1  = gtNewDconNode(dval);
5931             break;
5932
5933         default:
5934             assert(!"Unexpected lclTyp");
5935             break;
5936     }
5937
5938     return op1;
5939 }
5940
5941 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5942                                                 CORINFO_ACCESS_FLAGS    access,
5943                                                 CORINFO_FIELD_INFO*     pFieldInfo,
5944                                                 var_types               lclTyp)
5945 {
5946     GenTreePtr op1;
5947
5948     switch (pFieldInfo->fieldAccessor)
5949     {
5950         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5951         {
5952             assert(!compIsForInlining());
5953
5954             // We first call a special helper to get the statics base pointer
5955             op1 = impParentClassTokenToHandle(pResolvedToken);
5956
5957             // compIsForInlining() is false so we should not neve get NULL here
5958             assert(op1 != nullptr);
5959
5960             var_types type = TYP_BYREF;
5961
5962             switch (pFieldInfo->helper)
5963             {
5964                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5965                     type = TYP_I_IMPL;
5966                     break;
5967                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5968                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5969                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5970                     break;
5971                 default:
5972                     assert(!"unknown generic statics helper");
5973                     break;
5974             }
5975
5976             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5977
5978             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5979             op1              = gtNewOperNode(GT_ADD, type, op1,
5980                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5981         }
5982         break;
5983
5984         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5985         {
5986 #ifdef FEATURE_READYTORUN_COMPILER
5987             if (opts.IsReadyToRun())
5988             {
5989                 unsigned callFlags = 0;
5990
5991                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5992                 {
5993                     callFlags |= GTF_CALL_HOISTABLE;
5994                 }
5995
5996                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5997
5998                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5999             }
6000             else
6001 #endif
6002             {
6003                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6004             }
6005
6006             {
6007                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6008                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6009                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6010             }
6011             break;
6012         }
6013
6014         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6015         {
6016 #ifdef FEATURE_READYTORUN_COMPILER
6017             noway_assert(opts.IsReadyToRun());
6018             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6019             assert(kind.needsRuntimeLookup);
6020
6021             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6022             GenTreeArgList* args    = gtNewArgList(ctxTree);
6023
6024             unsigned callFlags = 0;
6025
6026             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6027             {
6028                 callFlags |= GTF_CALL_HOISTABLE;
6029             }
6030             var_types type = TYP_BYREF;
6031             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
6032
6033             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6034             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6035             op1              = gtNewOperNode(GT_ADD, type, op1,
6036                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6037 #else
6038             unreached();
6039 #endif // FEATURE_READYTORUN_COMPILER
6040         }
6041         break;
6042
6043         default:
6044         {
6045             if (!(access & CORINFO_ACCESS_ADDRESS))
6046             {
6047                 // In future, it may be better to just create the right tree here instead of folding it later.
6048                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6049
6050                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6051                 {
6052                     op1->gtType = TYP_REF; // points at boxed object
6053                     FieldSeqNode* firstElemFldSeq =
6054                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6055                     op1 =
6056                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6057                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
6058
6059                     if (varTypeIsStruct(lclTyp))
6060                     {
6061                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6062                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6063                     }
6064                     else
6065                     {
6066                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6067                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6068                     }
6069                 }
6070
6071                 return op1;
6072             }
6073             else
6074             {
6075                 void** pFldAddr = nullptr;
6076                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6077
6078                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6079
6080                 /* Create the data member node */
6081                 if (pFldAddr == nullptr)
6082                 {
6083                     op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6084                 }
6085                 else
6086                 {
6087                     op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6088
6089                     // There are two cases here, either the static is RVA based,
6090                     // in which case the type of the FIELD node is not a GC type
6091                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6092                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6093                     // because handles to statics now go into the large object heap
6094
6095                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6096                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6097                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6098                 }
6099             }
6100             break;
6101         }
6102     }
6103
6104     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6105     {
6106         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6107
6108         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6109
6110         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6111                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6112     }
6113
6114     if (!(access & CORINFO_ACCESS_ADDRESS))
6115     {
6116         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6117         op1->gtFlags |= GTF_GLOB_REF;
6118     }
6119
6120     return op1;
6121 }
6122
6123 // In general try to call this before most of the verification work.  Most people expect the access
6124 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6125 // out if you can't access something we also think that you're unverifiable for other reasons.
6126 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6127 {
6128     if (result != CORINFO_ACCESS_ALLOWED)
6129     {
6130         impHandleAccessAllowedInternal(result, helperCall);
6131     }
6132 }
6133
6134 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6135 {
6136     switch (result)
6137     {
6138         case CORINFO_ACCESS_ALLOWED:
6139             break;
6140         case CORINFO_ACCESS_ILLEGAL:
6141             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6142             // method is verifiable.  Otherwise, delay the exception to runtime.
6143             if (compIsForImportOnly())
6144             {
6145                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6146             }
6147             else
6148             {
6149                 impInsertHelperCall(helperCall);
6150             }
6151             break;
6152         case CORINFO_ACCESS_RUNTIME_CHECK:
6153             impInsertHelperCall(helperCall);
6154             break;
6155     }
6156 }
6157
6158 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6159 {
6160     // Construct the argument list
6161     GenTreeArgList* args = nullptr;
6162     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6163     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6164     {
6165         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6166         GenTreePtr                currentArg = nullptr;
6167         switch (helperArg.argType)
6168         {
6169             case CORINFO_HELPER_ARG_TYPE_Field:
6170                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6171                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6172                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6173                 break;
6174             case CORINFO_HELPER_ARG_TYPE_Method:
6175                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6176                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6177                 break;
6178             case CORINFO_HELPER_ARG_TYPE_Class:
6179                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6180                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6181                 break;
6182             case CORINFO_HELPER_ARG_TYPE_Module:
6183                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6184                 break;
6185             case CORINFO_HELPER_ARG_TYPE_Const:
6186                 currentArg = gtNewIconNode(helperArg.constant);
6187                 break;
6188             default:
6189                 NO_WAY("Illegal helper arg type");
6190         }
6191         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6192     }
6193
6194     /* TODO-Review:
6195      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6196      * Also, consider sticking this in the first basic block.
6197      */
6198     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6199     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6200 }
6201
6202 // Checks whether the return types of caller and callee are compatible
6203 // so that callee can be tail called. Note that here we don't check
6204 // compatibility in IL Verifier sense, but on the lines of return type
6205 // sizes are equal and get returned in the same return register.
6206 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6207                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6208                                             var_types            calleeRetType,
6209                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6210 {
6211     // Note that we can not relax this condition with genActualType() as the
6212     // calling convention dictates that the caller of a function with a small
6213     // typed return value is responsible for normalizing the return val.
6214     if (callerRetType == calleeRetType)
6215     {
6216         return true;
6217     }
6218
6219 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6220     // Jit64 compat:
6221     if (callerRetType == TYP_VOID)
6222     {
6223         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6224         //     tail.call
6225         //     pop
6226         //     ret
6227         //
6228         // Note that the above IL pattern is not valid as per IL verification rules.
6229         // Therefore, only full trust code can take advantage of this pattern.
6230         return true;
6231     }
6232
6233     // These checks return true if the return value type sizes are the same and
6234     // get returned in the same return register i.e. caller doesn't need to normalize
6235     // return value. Some of the tail calls permitted by below checks would have
6236     // been rejected by IL Verifier before we reached here.  Therefore, only full
6237     // trust code can make those tail calls.
6238     unsigned callerRetTypeSize = 0;
6239     unsigned calleeRetTypeSize = 0;
6240     bool     isCallerRetTypMBEnreg =
6241         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6242     bool isCalleeRetTypMBEnreg =
6243         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6244
6245     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6246     {
6247         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6248     }
6249 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6250
6251     return false;
6252 }
6253
6254 // For prefixFlags
6255 enum
6256 {
6257     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6258     PREFIX_TAILCALL_IMPLICIT =
6259         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6260     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6261     PREFIX_VOLATILE    = 0x00000100,
6262     PREFIX_UNALIGNED   = 0x00001000,
6263     PREFIX_CONSTRAINED = 0x00010000,
6264     PREFIX_READONLY    = 0x00100000
6265 };
6266
6267 /********************************************************************************
6268  *
6269  * Returns true if the current opcode and and the opcodes following it correspond
6270  * to a supported tail call IL pattern.
6271  *
6272  */
6273 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6274                                       OPCODE      curOpcode,
6275                                       const BYTE* codeAddrOfNextOpcode,
6276                                       const BYTE* codeEnd,
6277                                       bool        isRecursive,
6278                                       bool*       isCallPopAndRet /* = nullptr */)
6279 {
6280     // Bail out if the current opcode is not a call.
6281     if (!impOpcodeIsCallOpcode(curOpcode))
6282     {
6283         return false;
6284     }
6285
6286 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6287     // If shared ret tail opt is not enabled, we will enable
6288     // it for recursive methods.
6289     if (isRecursive)
6290 #endif
6291     {
6292         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6293         // sequence. Make sure we don't go past the end of the IL however.
6294         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6295     }
6296
6297     // Bail out if there is no next opcode after call
6298     if (codeAddrOfNextOpcode >= codeEnd)
6299     {
6300         return false;
6301     }
6302
6303     // Scan the opcodes to look for the following IL patterns if either
6304     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6305     //  ii) if tail prefixed, IL verification is not needed for the method.
6306     //
6307     // Only in the above two cases we can allow the below tail call patterns
6308     // violating ECMA spec.
6309     //
6310     // Pattern1:
6311     //       call
6312     //       nop*
6313     //       ret
6314     //
6315     // Pattern2:
6316     //       call
6317     //       nop*
6318     //       pop
6319     //       nop*
6320     //       ret
6321     int    cntPop = 0;
6322     OPCODE nextOpcode;
6323
6324 #ifdef _TARGET_AMD64_
6325     do
6326     {
6327         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6328         codeAddrOfNextOpcode += sizeof(__int8);
6329     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6330              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6331              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6332                                                                                          // one pop seen so far.
6333 #else
6334     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6335 #endif
6336
6337     if (isCallPopAndRet)
6338     {
6339         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6340         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6341     }
6342
6343 #ifdef _TARGET_AMD64_
6344     // Jit64 Compat:
6345     // Tail call IL pattern could be either of the following
6346     // 1) call/callvirt/calli + ret
6347     // 2) call/callvirt/calli + pop + ret in a method returning void.
6348     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6349 #else //!_TARGET_AMD64_
6350     return (nextOpcode == CEE_RET) && (cntPop == 0);
6351 #endif
6352 }
6353
6354 /*****************************************************************************
6355  *
6356  * Determine whether the call could be converted to an implicit tail call
6357  *
6358  */
6359 bool Compiler::impIsImplicitTailCallCandidate(
6360     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6361 {
6362
6363 #if FEATURE_TAILCALL_OPT
6364     if (!opts.compTailCallOpt)
6365     {
6366         return false;
6367     }
6368
6369     if (opts.compDbgCode || opts.MinOpts())
6370     {
6371         return false;
6372     }
6373
6374     // must not be tail prefixed
6375     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6376     {
6377         return false;
6378     }
6379
6380 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6381     // the block containing call is marked as BBJ_RETURN
6382     // We allow shared ret tail call optimization on recursive calls even under
6383     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6384     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6385         return false;
6386 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6387
6388     // must be call+ret or call+pop+ret
6389     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6390     {
6391         return false;
6392     }
6393
6394     return true;
6395 #else
6396     return false;
6397 #endif // FEATURE_TAILCALL_OPT
6398 }
6399
6400 //------------------------------------------------------------------------
6401 // impImportCall: import a call-inspiring opcode
6402 //
6403 // Arguments:
6404 //    opcode                    - opcode that inspires the call
6405 //    pResolvedToken            - resolved token for the call target
6406 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6407 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6408 //    prefixFlags               - IL prefix flags for the call
6409 //    callInfo                  - EE supplied info for the call
6410 //    rawILOffset               - IL offset of the opcode
6411 //
6412 // Returns:
6413 //    Type of the call's return value.
6414 //
6415 // Notes:
6416 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6417 //
6418 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6419 //    uninitalized object.
6420
6421 #ifdef _PREFAST_
6422 #pragma warning(push)
6423 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6424 #endif
6425
6426 var_types Compiler::impImportCall(OPCODE                  opcode,
6427                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6428                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6429                                   GenTreePtr              newobjThis,
6430                                   int                     prefixFlags,
6431                                   CORINFO_CALL_INFO*      callInfo,
6432                                   IL_OFFSET               rawILOffset)
6433 {
6434     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6435
6436     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6437     var_types              callRetTyp                     = TYP_COUNT;
6438     CORINFO_SIG_INFO*      sig                            = nullptr;
6439     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6440     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6441     unsigned               clsFlags                       = 0;
6442     unsigned               mflags                         = 0;
6443     unsigned               argFlags                       = 0;
6444     GenTreePtr             call                           = nullptr;
6445     GenTreeArgList*        args                           = nullptr;
6446     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6447     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6448     bool                   exactContextNeedsRuntimeLookup = false;
6449     bool                   canTailCall                    = true;
6450     const char*            szCanTailCallFailReason        = nullptr;
6451     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6452     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6453
6454     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6455
6456     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6457     // do that before tailcalls, but that is probably not the intended
6458     // semantic. So just disallow tailcalls from synchronized methods.
6459     // Also, popping arguments in a varargs function is more work and NYI
6460     // If we have a security object, we have to keep our frame around for callers
6461     // to see any imperative security.
6462     if (info.compFlags & CORINFO_FLG_SYNCH)
6463     {
6464         canTailCall             = false;
6465         szCanTailCallFailReason = "Caller is synchronized";
6466     }
6467 #if !FEATURE_FIXED_OUT_ARGS
6468     else if (info.compIsVarArgs)
6469     {
6470         canTailCall             = false;
6471         szCanTailCallFailReason = "Caller is varargs";
6472     }
6473 #endif // FEATURE_FIXED_OUT_ARGS
6474     else if (opts.compNeedSecurityCheck)
6475     {
6476         canTailCall             = false;
6477         szCanTailCallFailReason = "Caller requires a security check.";
6478     }
6479
6480     // We only need to cast the return value of pinvoke inlined calls that return small types
6481
6482     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6483     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6484     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6485     // the time being that the callee might be compiled by the other JIT and thus the return
6486     // value will need to be widened by us (or not widened at all...)
6487
6488     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6489
6490     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6491     bool bIntrinsicImported = false;
6492
6493     CORINFO_SIG_INFO calliSig;
6494     GenTreeArgList*  extraArg = nullptr;
6495
6496     /*-------------------------------------------------------------------------
6497      * First create the call node
6498      */
6499
6500     if (opcode == CEE_CALLI)
6501     {
6502         /* Get the call site sig */
6503         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6504
6505         callRetTyp = JITtype2varType(calliSig.retType);
6506
6507         call = impImportIndirectCall(&calliSig, ilOffset);
6508
6509         // We don't know the target method, so we have to infer the flags, or
6510         // assume the worst-case.
6511         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6512
6513 #ifdef DEBUG
6514         if (verbose)
6515         {
6516             unsigned structSize =
6517                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6518             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6519                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6520         }
6521 #endif
6522         // This should be checked in impImportBlockCode.
6523         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6524
6525         sig = &calliSig;
6526
6527 #ifdef DEBUG
6528         // We cannot lazily obtain the signature of a CALLI call because it has no method
6529         // handle that we can use, so we need to save its full call signature here.
6530         assert(call->gtCall.callSig == nullptr);
6531         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6532         *call->gtCall.callSig = calliSig;
6533 #endif // DEBUG
6534
6535         if (IsTargetAbi(CORINFO_CORERT_ABI))
6536         {
6537             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
6538                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
6539                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
6540                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
6541             if (managedCall)
6542             {
6543                 addFatPointerCandidate(call->AsCall());
6544             }
6545         }
6546     }
6547     else // (opcode != CEE_CALLI)
6548     {
6549         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6550
6551         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6552         // supply the instantiation parameters necessary to make direct calls to underlying
6553         // shared generic code, rather than calling through instantiating stubs.  If the
6554         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6555         // must indeed pass an instantiation parameter.
6556
6557         methHnd = callInfo->hMethod;
6558
6559         sig        = &(callInfo->sig);
6560         callRetTyp = JITtype2varType(sig->retType);
6561
6562         mflags = callInfo->methodFlags;
6563
6564 #ifdef DEBUG
6565         if (verbose)
6566         {
6567             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6568             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6569                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6570         }
6571 #endif
6572         if (compIsForInlining())
6573         {
6574             /* Does this call site have security boundary restrictions? */
6575
6576             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6577             {
6578                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6579                 return callRetTyp;
6580             }
6581
6582             /* Does the inlinee need a security check token on the frame */
6583
6584             if (mflags & CORINFO_FLG_SECURITYCHECK)
6585             {
6586                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6587                 return callRetTyp;
6588             }
6589
6590             /* Does the inlinee use StackCrawlMark */
6591
6592             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6593             {
6594                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6595                 return callRetTyp;
6596             }
6597
6598             /* For now ignore delegate invoke */
6599
6600             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6601             {
6602                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6603                 return callRetTyp;
6604             }
6605
6606             /* For now ignore varargs */
6607             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6608             {
6609                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6610                 return callRetTyp;
6611             }
6612
6613             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6614             {
6615                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6616                 return callRetTyp;
6617             }
6618
6619             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6620             {
6621                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6622                 return callRetTyp;
6623             }
6624         }
6625
6626         clsHnd = pResolvedToken->hClass;
6627
6628         clsFlags = callInfo->classFlags;
6629
6630 #ifdef DEBUG
6631         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6632
6633         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6634         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6635         const char* modName;
6636         const char* className;
6637         const char* methodName;
6638         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6639             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6640             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6641         {
6642             return impImportJitTestLabelMark(sig->numArgs);
6643         }
6644 #endif // DEBUG
6645
6646         // <NICE> Factor this into getCallInfo </NICE>
6647         if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6648         {
6649             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6650                                 (canTailCall && (tailCall != 0)), &intrinsicID);
6651
6652             if (call != nullptr)
6653             {
6654                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6655                        (clsFlags & CORINFO_FLG_FINAL));
6656
6657 #ifdef FEATURE_READYTORUN_COMPILER
6658                 if (call->OperGet() == GT_INTRINSIC)
6659                 {
6660                     if (opts.IsReadyToRun())
6661                     {
6662                         noway_assert(callInfo->kind == CORINFO_CALL);
6663                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6664                     }
6665                     else
6666                     {
6667                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6668                     }
6669                 }
6670 #endif
6671
6672                 bIntrinsicImported = true;
6673                 goto DONE_CALL;
6674             }
6675         }
6676
6677 #ifdef FEATURE_SIMD
6678         if (featureSIMD)
6679         {
6680             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6681             if (call != nullptr)
6682             {
6683                 bIntrinsicImported = true;
6684                 goto DONE_CALL;
6685             }
6686         }
6687 #endif // FEATURE_SIMD
6688
6689         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6690         {
6691             NO_WAY("Virtual call to a function added via EnC is not supported");
6692         }
6693
6694         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6695             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6696             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6697         {
6698             BADCODE("Bad calling convention");
6699         }
6700
6701         //-------------------------------------------------------------------------
6702         //  Construct the call node
6703         //
6704         // Work out what sort of call we're making.
6705         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6706
6707         constraintCallThisTransform    = callInfo->thisTransform;
6708         exactContextHnd                = callInfo->contextHandle;
6709         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
6710
6711         // Recursive call is treaded as a loop to the begining of the method.
6712         if (methHnd == info.compMethodHnd)
6713         {
6714 #ifdef DEBUG
6715             if (verbose)
6716             {
6717                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6718                         fgFirstBB->bbNum, compCurBB->bbNum);
6719             }
6720 #endif
6721             fgMarkBackwardJump(fgFirstBB, compCurBB);
6722         }
6723
6724         switch (callInfo->kind)
6725         {
6726
6727             case CORINFO_VIRTUALCALL_STUB:
6728             {
6729                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6730                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6731                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6732                 {
6733
6734                     if (compIsForInlining())
6735                     {
6736                         // Don't import runtime lookups when inlining
6737                         // Inlining has to be aborted in such a case
6738                         /* XXX Fri 3/20/2009
6739                          * By the way, this would never succeed.  If the handle lookup is into the generic
6740                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6741                          * inlined code will crash.
6742                          *
6743                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6744                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6745                          * failing here.
6746                          */
6747                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6748                         return callRetTyp;
6749                     }
6750
6751                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6752                     assert(!compDonotInline());
6753
6754                     // This is the rough code to set up an indirect stub call
6755                     assert(stubAddr != nullptr);
6756
6757                     // The stubAddr may be a
6758                     // complex expression. As it is evaluated after the args,
6759                     // it may cause registered args to be spilled. Simply spill it.
6760
6761                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6762                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6763                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6764
6765                     // Create the actual call node
6766
6767                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6768                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6769
6770                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6771
6772                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6773                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6774
6775 #ifdef _TARGET_X86_
6776                     // No tailcalls allowed for these yet...
6777                     canTailCall             = false;
6778                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6779 #endif
6780                 }
6781                 else
6782                 {
6783                     // ok, the stub is available at compile type.
6784
6785                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6786                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6787                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6788                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6789                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6790                     {
6791                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6792                     }
6793                 }
6794
6795 #ifdef FEATURE_READYTORUN_COMPILER
6796                 if (opts.IsReadyToRun())
6797                 {
6798                     // Null check is sometimes needed for ready to run to handle
6799                     // non-virtual <-> virtual changes between versions
6800                     if (callInfo->nullInstanceCheck)
6801                     {
6802                         call->gtFlags |= GTF_CALL_NULLCHECK;
6803                     }
6804                 }
6805 #endif
6806
6807                 break;
6808             }
6809
6810             case CORINFO_VIRTUALCALL_VTABLE:
6811             {
6812                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6813                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6814                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6815                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6816                 break;
6817             }
6818
6819             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6820             {
6821                 if (compIsForInlining())
6822                 {
6823                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6824                     return callRetTyp;
6825                 }
6826
6827                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6828                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6829                 // OK, We've been told to call via LDVIRTFTN, so just
6830                 // take the call now....
6831
6832                 args = impPopList(sig->numArgs, &argFlags, sig);
6833
6834                 GenTreePtr thisPtr = impPopStack().val;
6835                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6836                 if (compDonotInline())
6837                 {
6838                     return callRetTyp;
6839                 }
6840
6841                 // Clone the (possibly transformed) "this" pointer
6842                 GenTreePtr thisPtrCopy;
6843                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6844                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
6845
6846                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6847
6848                 if (compDonotInline())
6849                 {
6850                     return callRetTyp;
6851                 }
6852
6853                 thisPtr = nullptr; // can't reuse it
6854
6855                 // Now make an indirect call through the function pointer
6856
6857                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6858                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6859                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6860
6861                 // Create the actual call node
6862
6863                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6864                 call->gtCall.gtCallObjp = thisPtrCopy;
6865                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6866
6867                 if (((sig->callConv & CORINFO_CALLCONV_GENERIC) != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
6868                 {
6869                     // CoreRT generic virtual method: need to handle potential fat function pointers
6870                     addFatPointerCandidate(call->AsCall());
6871                 }
6872 #ifdef FEATURE_READYTORUN_COMPILER
6873                 if (opts.IsReadyToRun())
6874                 {
6875                     // Null check is needed for ready to run to handle
6876                     // non-virtual <-> virtual changes between versions
6877                     call->gtFlags |= GTF_CALL_NULLCHECK;
6878                 }
6879 #endif
6880
6881                 // Sine we are jumping over some code, check that its OK to skip that code
6882                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6883                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6884                 goto DONE;
6885             }
6886
6887             case CORINFO_CALL:
6888             {
6889                 // This is for a non-virtual, non-interface etc. call
6890                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6891
6892                 // We remove the nullcheck for the GetType call instrinsic.
6893                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6894                 // and instrinsics.
6895                 if (callInfo->nullInstanceCheck &&
6896                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6897                 {
6898                     call->gtFlags |= GTF_CALL_NULLCHECK;
6899                 }
6900
6901 #ifdef FEATURE_READYTORUN_COMPILER
6902                 if (opts.IsReadyToRun())
6903                 {
6904                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6905                 }
6906 #endif
6907                 break;
6908             }
6909
6910             case CORINFO_CALL_CODE_POINTER:
6911             {
6912                 // The EE has asked us to call by computing a code pointer and then doing an
6913                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
6914
6915                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6916                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6917
6918                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6919                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6920
6921                 GenTreePtr fptr =
6922                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6923
6924                 if (compDonotInline())
6925                 {
6926                     return callRetTyp;
6927                 }
6928
6929                 // Now make an indirect call through the function pointer
6930
6931                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6932                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6933                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6934
6935                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6936                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6937                 if (callInfo->nullInstanceCheck)
6938                 {
6939                     call->gtFlags |= GTF_CALL_NULLCHECK;
6940                 }
6941
6942                 break;
6943             }
6944
6945             default:
6946                 assert(!"unknown call kind");
6947                 break;
6948         }
6949
6950         //-------------------------------------------------------------------------
6951         // Set more flags
6952
6953         PREFIX_ASSUME(call != nullptr);
6954
6955         if (mflags & CORINFO_FLG_NOGCCHECK)
6956         {
6957             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6958         }
6959
6960         // Mark call if it's one of the ones we will maybe treat as an intrinsic
6961         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6962             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6963             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6964         {
6965             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6966         }
6967     }
6968     assert(sig);
6969     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6970
6971     /* Some sanity checks */
6972
6973     // CALL_VIRT and NEWOBJ must have a THIS pointer
6974     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6975     // static bit and hasThis are negations of one another
6976     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6977     assert(call != nullptr);
6978
6979     /*-------------------------------------------------------------------------
6980      * Check special-cases etc
6981      */
6982
6983     /* Special case - Check if it is a call to Delegate.Invoke(). */
6984
6985     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6986     {
6987         assert(!compIsForInlining());
6988         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6989         assert(mflags & CORINFO_FLG_FINAL);
6990
6991         /* Set the delegate flag */
6992         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6993
6994         if (callInfo->secureDelegateInvoke)
6995         {
6996             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6997         }
6998
6999         if (opcode == CEE_CALLVIRT)
7000         {
7001             assert(mflags & CORINFO_FLG_FINAL);
7002
7003             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7004             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7005             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7006         }
7007     }
7008
7009     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7010     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7011     if (varTypeIsStruct(callRetTyp))
7012     {
7013         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7014         call->gtType = callRetTyp;
7015     }
7016
7017 #if !FEATURE_VARARG
7018     /* Check for varargs */
7019     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7020         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7021     {
7022         BADCODE("Varargs not supported.");
7023     }
7024 #endif // !FEATURE_VARARG
7025
7026 #ifdef UNIX_X86_ABI
7027     if (call->gtCall.callSig == nullptr)
7028     {
7029         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7030         *call->gtCall.callSig = *sig;
7031     }
7032 #endif // UNIX_X86_ABI
7033
7034     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7035         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7036     {
7037         assert(!compIsForInlining());
7038
7039         /* Set the right flags */
7040
7041         call->gtFlags |= GTF_CALL_POP_ARGS;
7042         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7043
7044         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7045            will be expecting to pop a certain number of arguments, but if we
7046            tailcall to a function with a different number of arguments, we
7047            are hosed. There are ways around this (caller remembers esp value,
7048            varargs is not caller-pop, etc), but not worth it. */
7049         CLANG_FORMAT_COMMENT_ANCHOR;
7050
7051 #ifdef _TARGET_X86_
7052         if (canTailCall)
7053         {
7054             canTailCall             = false;
7055             szCanTailCallFailReason = "Callee is varargs";
7056         }
7057 #endif
7058
7059         /* Get the total number of arguments - this is already correct
7060          * for CALLI - for methods we have to get it from the call site */
7061
7062         if (opcode != CEE_CALLI)
7063         {
7064 #ifdef DEBUG
7065             unsigned numArgsDef = sig->numArgs;
7066 #endif
7067             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7068
7069 #ifdef DEBUG
7070             // We cannot lazily obtain the signature of a vararg call because using its method
7071             // handle will give us only the declared argument list, not the full argument list.
7072             assert(call->gtCall.callSig == nullptr);
7073             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7074             *call->gtCall.callSig = *sig;
7075 #endif
7076
7077             // For vararg calls we must be sure to load the return type of the
7078             // method actually being called, as well as the return types of the
7079             // specified in the vararg signature. With type equivalency, these types
7080             // may not be the same.
7081             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7082             {
7083                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7084                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7085                     sig->retType != CORINFO_TYPE_VAR)
7086                 {
7087                     // Make sure that all valuetypes (including enums) that we push are loaded.
7088                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7089                     // all valuetypes in the method signature are already loaded.
7090                     // We need to be able to find the size of the valuetypes, but we cannot
7091                     // do a class-load from within GC.
7092                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7093                 }
7094             }
7095
7096             assert(numArgsDef <= sig->numArgs);
7097         }
7098
7099         /* We will have "cookie" as the last argument but we cannot push
7100          * it on the operand stack because we may overflow, so we append it
7101          * to the arg list next after we pop them */
7102     }
7103
7104     if (mflags & CORINFO_FLG_SECURITYCHECK)
7105     {
7106         assert(!compIsForInlining());
7107
7108         // Need security prolog/epilog callouts when there is
7109         // imperative security in the method. This is to give security a
7110         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7111
7112         if (compIsForInlining())
7113         {
7114             // Cannot handle this if the method being imported is an inlinee by itself.
7115             // Because inlinee method does not have its own frame.
7116
7117             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7118             return callRetTyp;
7119         }
7120         else
7121         {
7122             tiSecurityCalloutNeeded = true;
7123
7124             // If the current method calls a method which needs a security check,
7125             // (i.e. the method being compiled has imperative security)
7126             // we need to reserve a slot for the security object in
7127             // the current method's stack frame
7128             opts.compNeedSecurityCheck = true;
7129         }
7130     }
7131
7132     //--------------------------- Inline NDirect ------------------------------
7133
7134     // For inline cases we technically should look at both the current
7135     // block and the call site block (or just the latter if we've
7136     // fused the EH trees). However the block-related checks pertain to
7137     // EH and we currently won't inline a method with EH. So for
7138     // inlinees, just checking the call site block is sufficient.
7139     {
7140         // New lexical block here to avoid compilation errors because of GOTOs.
7141         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7142         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7143     }
7144
7145     if (call->gtFlags & GTF_CALL_UNMANAGED)
7146     {
7147         // We set up the unmanaged call by linking the frame, disabling GC, etc
7148         // This needs to be cleaned up on return
7149         if (canTailCall)
7150         {
7151             canTailCall             = false;
7152             szCanTailCallFailReason = "Callee is native";
7153         }
7154
7155         checkForSmallType = true;
7156
7157         impPopArgsForUnmanagedCall(call, sig);
7158
7159         goto DONE;
7160     }
7161     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7162                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7163                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7164                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7165     {
7166         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7167         {
7168             // Normally this only happens with inlining.
7169             // However, a generic method (or type) being NGENd into another module
7170             // can run into this issue as well.  There's not an easy fall-back for NGEN
7171             // so instead we fallback to JIT.
7172             if (compIsForInlining())
7173             {
7174                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7175             }
7176             else
7177             {
7178                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7179             }
7180
7181             return callRetTyp;
7182         }
7183
7184         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7185
7186         // This cookie is required to be either a simple GT_CNS_INT or
7187         // an indirection of a GT_CNS_INT
7188         //
7189         GenTreePtr cookieConst = cookie;
7190         if (cookie->gtOper == GT_IND)
7191         {
7192             cookieConst = cookie->gtOp.gtOp1;
7193         }
7194         assert(cookieConst->gtOper == GT_CNS_INT);
7195
7196         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7197         // we won't allow this tree to participate in any CSE logic
7198         //
7199         cookie->gtFlags |= GTF_DONT_CSE;
7200         cookieConst->gtFlags |= GTF_DONT_CSE;
7201
7202         call->gtCall.gtCallCookie = cookie;
7203
7204         if (canTailCall)
7205         {
7206             canTailCall             = false;
7207             szCanTailCallFailReason = "PInvoke calli";
7208         }
7209     }
7210
7211     /*-------------------------------------------------------------------------
7212      * Create the argument list
7213      */
7214
7215     //-------------------------------------------------------------------------
7216     // Special case - for varargs we have an implicit last argument
7217
7218     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7219     {
7220         assert(!compIsForInlining());
7221
7222         void *varCookie, *pVarCookie;
7223         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7224         {
7225             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7226             return callRetTyp;
7227         }
7228
7229         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7230         assert((!varCookie) != (!pVarCookie));
7231         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7232
7233         assert(extraArg == nullptr);
7234         extraArg = gtNewArgList(cookie);
7235     }
7236
7237     //-------------------------------------------------------------------------
7238     // Extra arg for shared generic code and array methods
7239     //
7240     // Extra argument containing instantiation information is passed in the
7241     // following circumstances:
7242     // (a) To the "Address" method on array classes; the extra parameter is
7243     //     the array's type handle (a TypeDesc)
7244     // (b) To shared-code instance methods in generic structs; the extra parameter
7245     //     is the struct's type handle (a vtable ptr)
7246     // (c) To shared-code per-instantiation non-generic static methods in generic
7247     //     classes and structs; the extra parameter is the type handle
7248     // (d) To shared-code generic methods; the extra parameter is an
7249     //     exact-instantiation MethodDesc
7250     //
7251     // We also set the exact type context associated with the call so we can
7252     // inline the call correctly later on.
7253
7254     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7255     {
7256         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7257         if (clsHnd == nullptr)
7258         {
7259             NO_WAY("CALLI on parameterized type");
7260         }
7261
7262         assert(opcode != CEE_CALLI);
7263
7264         GenTreePtr instParam;
7265         BOOL       runtimeLookup;
7266
7267         // Instantiated generic method
7268         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7269         {
7270             CORINFO_METHOD_HANDLE exactMethodHandle =
7271                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7272
7273             if (!exactContextNeedsRuntimeLookup)
7274             {
7275 #ifdef FEATURE_READYTORUN_COMPILER
7276                 if (opts.IsReadyToRun())
7277                 {
7278                     instParam =
7279                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7280                     if (instParam == nullptr)
7281                     {
7282                         return callRetTyp;
7283                     }
7284                 }
7285                 else
7286 #endif
7287                 {
7288                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7289                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7290                 }
7291             }
7292             else
7293             {
7294                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7295                 if (instParam == nullptr)
7296                 {
7297                     return callRetTyp;
7298                 }
7299             }
7300         }
7301
7302         // otherwise must be an instance method in a generic struct,
7303         // a static method in a generic type, or a runtime-generated array method
7304         else
7305         {
7306             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7307             CORINFO_CLASS_HANDLE exactClassHandle =
7308                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7309
7310             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7311             {
7312                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7313                 return callRetTyp;
7314             }
7315
7316             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7317             {
7318                 // We indicate "readonly" to the Address operation by using a null
7319                 // instParam.
7320                 instParam = gtNewIconNode(0, TYP_REF);
7321             }
7322
7323             if (!exactContextNeedsRuntimeLookup)
7324             {
7325 #ifdef FEATURE_READYTORUN_COMPILER
7326                 if (opts.IsReadyToRun())
7327                 {
7328                     instParam =
7329                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7330                     if (instParam == nullptr)
7331                     {
7332                         return callRetTyp;
7333                     }
7334                 }
7335                 else
7336 #endif
7337                 {
7338                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7339                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7340                 }
7341             }
7342             else
7343             {
7344                 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7345                 if (instParam == nullptr)
7346                 {
7347                     return callRetTyp;
7348                 }
7349             }
7350         }
7351
7352         assert(extraArg == nullptr);
7353         extraArg = gtNewArgList(instParam);
7354     }
7355
7356     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7357     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7358     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7359     // exactContextHnd is not currently required when inlining shared generic code into shared
7360     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7361     // (e.g. anything marked needsRuntimeLookup)
7362     if (exactContextNeedsRuntimeLookup)
7363     {
7364         exactContextHnd = nullptr;
7365     }
7366
7367     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7368     {
7369         // Only verifiable cases are supported.
7370         // dup; ldvirtftn; newobj; or ldftn; newobj.
7371         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7372         if (impStackHeight() > 0)
7373         {
7374             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7375             if (delegateTypeInfo.IsToken())
7376             {
7377                 ldftnToken = delegateTypeInfo.GetToken();
7378             }
7379         }
7380     }
7381
7382     //-------------------------------------------------------------------------
7383     // The main group of arguments
7384
7385     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7386
7387     if (args)
7388     {
7389         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7390     }
7391
7392     //-------------------------------------------------------------------------
7393     // The "this" pointer
7394
7395     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7396     {
7397         GenTreePtr obj;
7398
7399         if (opcode == CEE_NEWOBJ)
7400         {
7401             obj = newobjThis;
7402         }
7403         else
7404         {
7405             obj = impPopStack().val;
7406             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7407             if (compDonotInline())
7408             {
7409                 return callRetTyp;
7410             }
7411         }
7412
7413         /* Is this a virtual or interface call? */
7414
7415         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7416         {
7417             /* only true object pointers can be virtual */
7418             assert(obj->gtType == TYP_REF);
7419
7420             // See if we can devirtualize.
7421             impDevirtualizeCall(call->AsCall(), obj, callInfo, &exactContextHnd);
7422         }
7423         else
7424         {
7425             if (impIsThis(obj))
7426             {
7427                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7428             }
7429         }
7430
7431         /* Store the "this" value in the call */
7432
7433         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7434         call->gtCall.gtCallObjp = obj;
7435     }
7436
7437     //-------------------------------------------------------------------------
7438     // The "this" pointer for "newobj"
7439
7440     if (opcode == CEE_NEWOBJ)
7441     {
7442         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7443         {
7444             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7445             // This is a 'new' of a variable sized object, wher
7446             // the constructor is to return the object.  In this case
7447             // the constructor claims to return VOID but we know it
7448             // actually returns the new object
7449             assert(callRetTyp == TYP_VOID);
7450             callRetTyp   = TYP_REF;
7451             call->gtType = TYP_REF;
7452             impSpillSpecialSideEff();
7453
7454             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7455         }
7456         else
7457         {
7458             if (clsFlags & CORINFO_FLG_DELEGATE)
7459             {
7460                 // New inliner morph it in impImportCall.
7461                 // This will allow us to inline the call to the delegate constructor.
7462                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7463             }
7464
7465             if (!bIntrinsicImported)
7466             {
7467
7468 #if defined(DEBUG) || defined(INLINE_DATA)
7469
7470                 // Keep track of the raw IL offset of the call
7471                 call->gtCall.gtRawILOffset = rawILOffset;
7472
7473 #endif // defined(DEBUG) || defined(INLINE_DATA)
7474
7475                 // Is it an inline candidate?
7476                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7477             }
7478
7479             // append the call node.
7480             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7481
7482             // Now push the value of the 'new onto the stack
7483
7484             // This is a 'new' of a non-variable sized object.
7485             // Append the new node (op1) to the statement list,
7486             // and then push the local holding the value of this
7487             // new instruction on the stack.
7488
7489             if (clsFlags & CORINFO_FLG_VALUECLASS)
7490             {
7491                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7492
7493                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7494                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7495             }
7496             else
7497             {
7498                 if (newobjThis->gtOper == GT_COMMA)
7499                 {
7500                     // In coreclr the callout can be inserted even if verification is disabled
7501                     // so we cannot rely on tiVerificationNeeded alone
7502
7503                     // We must have inserted the callout. Get the real newobj.
7504                     newobjThis = newobjThis->gtOp.gtOp2;
7505                 }
7506
7507                 assert(newobjThis->gtOper == GT_LCL_VAR);
7508                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7509             }
7510         }
7511         return callRetTyp;
7512     }
7513
7514 DONE:
7515
7516     if (tailCall)
7517     {
7518         // This check cannot be performed for implicit tail calls for the reason
7519         // that impIsImplicitTailCallCandidate() is not checking whether return
7520         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7521         // As a result it is possible that in the following case, we find that
7522         // the type stack is non-empty if Callee() is considered for implicit
7523         // tail calling.
7524         //      int Caller(..) { .... void Callee(); ret val; ... }
7525         //
7526         // Note that we cannot check return type compatibility before ImpImportCall()
7527         // as we don't have required info or need to duplicate some of the logic of
7528         // ImpImportCall().
7529         //
7530         // For implicit tail calls, we perform this check after return types are
7531         // known to be compatible.
7532         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7533         {
7534             BADCODE("Stack should be empty after tailcall");
7535         }
7536
7537         // Note that we can not relax this condition with genActualType() as
7538         // the calling convention dictates that the caller of a function with
7539         // a small-typed return value is responsible for normalizing the return val
7540
7541         if (canTailCall &&
7542             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7543                                           callInfo->sig.retTypeClass))
7544         {
7545             canTailCall             = false;
7546             szCanTailCallFailReason = "Return types are not tail call compatible";
7547         }
7548
7549         // Stack empty check for implicit tail calls.
7550         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7551         {
7552 #ifdef _TARGET_AMD64_
7553             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7554             // in JIT64, not an InvalidProgramException.
7555             Verify(false, "Stack should be empty after tailcall");
7556 #else  // _TARGET_64BIT_
7557             BADCODE("Stack should be empty after tailcall");
7558 #endif //!_TARGET_64BIT_
7559         }
7560
7561         // assert(compCurBB is not a catch, finally or filter block);
7562         // assert(compCurBB is not a try block protected by a finally block);
7563
7564         // Check for permission to tailcall
7565         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7566
7567         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7568
7569         if (canTailCall)
7570         {
7571             // True virtual or indirect calls, shouldn't pass in a callee handle.
7572             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7573                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7574                                                        ? nullptr
7575                                                        : methHnd;
7576             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7577
7578             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7579             {
7580                 canTailCall = true;
7581                 if (explicitTailCall)
7582                 {
7583                     // In case of explicit tail calls, mark it so that it is not considered
7584                     // for in-lining.
7585                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7586 #ifdef DEBUG
7587                     if (verbose)
7588                     {
7589                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7590                         printTreeID(call);
7591                         printf("\n");
7592                     }
7593 #endif
7594                 }
7595                 else
7596                 {
7597 #if FEATURE_TAILCALL_OPT
7598                     // Must be an implicit tail call.
7599                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7600
7601                     // It is possible that a call node is both an inline candidate and marked
7602                     // for opportunistic tail calling.  In-lining happens before morhphing of
7603                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7604                     // reason, it will survive to the morphing stage at which point it will be
7605                     // transformed into a tail call after performing additional checks.
7606
7607                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7608 #ifdef DEBUG
7609                     if (verbose)
7610                     {
7611                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7612                         printTreeID(call);
7613                         printf("\n");
7614                     }
7615 #endif
7616
7617 #else //! FEATURE_TAILCALL_OPT
7618                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7619
7620 #endif // FEATURE_TAILCALL_OPT
7621                 }
7622
7623                 // we can't report success just yet...
7624             }
7625             else
7626             {
7627                 canTailCall = false;
7628 // canTailCall reported its reasons already
7629 #ifdef DEBUG
7630                 if (verbose)
7631                 {
7632                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7633                     printTreeID(call);
7634                     printf("\n");
7635                 }
7636 #endif
7637             }
7638         }
7639         else
7640         {
7641             // If this assert fires it means that canTailCall was set to false without setting a reason!
7642             assert(szCanTailCallFailReason != nullptr);
7643
7644 #ifdef DEBUG
7645             if (verbose)
7646             {
7647                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7648                 printTreeID(call);
7649                 printf(": %s\n", szCanTailCallFailReason);
7650             }
7651 #endif
7652             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7653                                                      szCanTailCallFailReason);
7654         }
7655     }
7656
7657     // Note: we assume that small return types are already normalized by the managed callee
7658     // or by the pinvoke stub for calls to unmanaged code.
7659
7660     if (!bIntrinsicImported)
7661     {
7662         //
7663         // Things needed to be checked when bIntrinsicImported is false.
7664         //
7665
7666         assert(call->gtOper == GT_CALL);
7667         assert(sig != nullptr);
7668
7669         // Tail calls require us to save the call site's sig info so we can obtain an argument
7670         // copying thunk from the EE later on.
7671         if (call->gtCall.callSig == nullptr)
7672         {
7673             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7674             *call->gtCall.callSig = *sig;
7675         }
7676
7677         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7678         {
7679             GenTreePtr callObj = call->gtCall.gtCallObjp;
7680             assert(callObj != nullptr);
7681
7682             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7683
7684             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7685                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7686                                                                    impInlineInfo->inlArgInfo))
7687             {
7688                 impInlineInfo->thisDereferencedFirst = true;
7689             }
7690         }
7691
7692 #if defined(DEBUG) || defined(INLINE_DATA)
7693
7694         // Keep track of the raw IL offset of the call
7695         call->gtCall.gtRawILOffset = rawILOffset;
7696
7697 #endif // defined(DEBUG) || defined(INLINE_DATA)
7698
7699         // Is it an inline candidate?
7700         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7701     }
7702
7703 DONE_CALL:
7704     // Push or append the result of the call
7705     if (callRetTyp == TYP_VOID)
7706     {
7707         if (opcode == CEE_NEWOBJ)
7708         {
7709             // we actually did push something, so don't spill the thing we just pushed.
7710             assert(verCurrentState.esStackDepth > 0);
7711             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7712         }
7713         else
7714         {
7715             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7716         }
7717     }
7718     else
7719     {
7720         impSpillSpecialSideEff();
7721
7722         if (clsFlags & CORINFO_FLG_ARRAY)
7723         {
7724             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7725         }
7726
7727         // Find the return type used for verification by interpreting the method signature.
7728         // NB: we are clobbering the already established sig.
7729         if (tiVerificationNeeded)
7730         {
7731             // Actually, we never get the sig for the original method.
7732             sig = &(callInfo->verSig);
7733         }
7734
7735         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7736         tiRetVal.NormaliseForStack();
7737
7738         // The CEE_READONLY prefix modifies the verification semantics of an Address
7739         // operation on an array type.
7740         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7741         {
7742             tiRetVal.SetIsReadonlyByRef();
7743         }
7744
7745         if (tiVerificationNeeded)
7746         {
7747             // We assume all calls return permanent home byrefs. If they
7748             // didn't they wouldn't be verifiable. This is also covering
7749             // the Address() helper for multidimensional arrays.
7750             if (tiRetVal.IsByRef())
7751             {
7752                 tiRetVal.SetIsPermanentHomeByRef();
7753             }
7754         }
7755
7756         if (call->IsCall())
7757         {
7758             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7759
7760             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7761             if (varTypeIsStruct(callRetTyp))
7762             {
7763                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
7764             }
7765
7766             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7767             {
7768                 assert(opts.OptEnabled(CLFLG_INLINING));
7769                 assert(!fatPointerCandidate); // We should not try to inline calli.
7770
7771                 // Make the call its own tree (spill the stack if needed).
7772                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7773
7774                 // TODO: Still using the widened type.
7775                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7776             }
7777             else
7778             {
7779                 if (fatPointerCandidate)
7780                 {
7781                     // fatPointer candidates should be in statements of the form call() or var = call().
7782                     // Such form allows to find statements with fat calls without walking through whole trees
7783                     // and removes problems with cutting trees.
7784                     assert(!bIntrinsicImported);
7785                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
7786                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7787                     {
7788                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
7789                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
7790                         varDsc->lvVerTypeInfo = tiRetVal;
7791                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
7792                         // impAssignTempGen can change src arg list and return type for call that returns struct.
7793                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7794                         call           = gtNewLclvNode(calliSlot, type);
7795                     }
7796                 }
7797
7798                 // For non-candidates we must also spill, since we
7799                 // might have locals live on the eval stack that this
7800                 // call can modify.
7801                 //
7802                 // Suppress this for certain well-known call targets
7803                 // that we know won't modify locals, eg calls that are
7804                 // recognized in gtCanOptimizeTypeEquality. Otherwise
7805                 // we may break key fragile pattern matches later on.
7806                 bool spillStack = true;
7807                 if (call->IsCall())
7808                 {
7809                     GenTreeCall* callNode = call->AsCall();
7810                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
7811                     {
7812                         spillStack = false;
7813                     }
7814                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
7815                     {
7816                         spillStack = false;
7817                     }
7818                 }
7819
7820                 if (spillStack)
7821                 {
7822                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7823                 }
7824             }
7825         }
7826
7827         if (!bIntrinsicImported)
7828         {
7829             //-------------------------------------------------------------------------
7830             //
7831             /* If the call is of a small type and the callee is managed, the callee will normalize the result
7832                 before returning.
7833                 However, we need to normalize small type values returned by unmanaged
7834                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7835                 if we use the shorter inlined pinvoke stub. */
7836
7837             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7838             {
7839                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7840             }
7841         }
7842
7843         impPushOnStack(call, tiRetVal);
7844     }
7845
7846     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7847     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7848     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7849     //  callInfoCache.uncacheCallInfo();
7850
7851     return callRetTyp;
7852 }
7853 #ifdef _PREFAST_
7854 #pragma warning(pop)
7855 #endif
7856
7857 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7858 {
7859     CorInfoType corType = methInfo->args.retType;
7860
7861     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7862     {
7863         // We have some kind of STRUCT being returned
7864
7865         structPassingKind howToReturnStruct = SPK_Unknown;
7866
7867         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7868
7869         if (howToReturnStruct == SPK_ByReference)
7870         {
7871             return true;
7872         }
7873     }
7874
7875     return false;
7876 }
7877
7878 #ifdef DEBUG
7879 //
7880 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7881 {
7882     TestLabelAndNum tlAndN;
7883     if (numArgs == 2)
7884     {
7885         tlAndN.m_num  = 0;
7886         StackEntry se = impPopStack();
7887         assert(se.seTypeInfo.GetType() == TI_INT);
7888         GenTreePtr val = se.val;
7889         assert(val->IsCnsIntOrI());
7890         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7891     }
7892     else if (numArgs == 3)
7893     {
7894         StackEntry se = impPopStack();
7895         assert(se.seTypeInfo.GetType() == TI_INT);
7896         GenTreePtr val = se.val;
7897         assert(val->IsCnsIntOrI());
7898         tlAndN.m_num = val->AsIntConCommon()->IconValue();
7899         se           = impPopStack();
7900         assert(se.seTypeInfo.GetType() == TI_INT);
7901         val = se.val;
7902         assert(val->IsCnsIntOrI());
7903         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7904     }
7905     else
7906     {
7907         assert(false);
7908     }
7909
7910     StackEntry expSe = impPopStack();
7911     GenTreePtr node  = expSe.val;
7912
7913     // There are a small number of special cases, where we actually put the annotation on a subnode.
7914     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7915     {
7916         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7917         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7918         // offset within the the static field block whose address is returned by the helper call.
7919         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7920         GenTreePtr helperCall = nullptr;
7921         assert(node->OperGet() == GT_IND);
7922         tlAndN.m_num -= 100;
7923         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7924         GetNodeTestData()->Remove(node);
7925     }
7926     else
7927     {
7928         GetNodeTestData()->Set(node, tlAndN);
7929     }
7930
7931     impPushOnStack(node, expSe.seTypeInfo);
7932     return node->TypeGet();
7933 }
7934 #endif // DEBUG
7935
7936 //-----------------------------------------------------------------------------------
7937 //  impFixupCallStructReturn: For a call node that returns a struct type either
7938 //  adjust the return type to an enregisterable type, or set the flag to indicate
7939 //  struct return via retbuf arg.
7940 //
7941 //  Arguments:
7942 //    call       -  GT_CALL GenTree node
7943 //    retClsHnd  -  Class handle of return type of the call
7944 //
7945 //  Return Value:
7946 //    Returns new GenTree node after fixing struct return of call node
7947 //
7948 GenTreePtr Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
7949 {
7950     if (!varTypeIsStruct(call))
7951     {
7952         return call;
7953     }
7954
7955     call->gtRetClsHnd = retClsHnd;
7956
7957 #if FEATURE_MULTIREG_RET
7958     // Initialize Return type descriptor of call node
7959     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
7960     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7961 #endif // FEATURE_MULTIREG_RET
7962
7963 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7964
7965     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7966     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
7967
7968     // The return type will remain as the incoming struct type unless normalized to a
7969     // single eightbyte return type below.
7970     call->gtReturnType = call->gtType;
7971
7972     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7973     if (retRegCount != 0)
7974     {
7975         if (retRegCount == 1)
7976         {
7977             // struct returned in a single register
7978             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
7979         }
7980         else
7981         {
7982             // must be a struct returned in two registers
7983             assert(retRegCount == 2);
7984
7985             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
7986             {
7987                 // Force a call returning multi-reg struct to be always of the IR form
7988                 //   tmp = call
7989                 //
7990                 // No need to assign a multi-reg struct to a local var if:
7991                 //  - It is a tail call or
7992                 //  - The call is marked for in-lining later
7993                 return impAssignMultiRegTypeToVar(call, retClsHnd);
7994             }
7995         }
7996     }
7997     else
7998     {
7999         // struct not returned in registers i.e returned via hiddden retbuf arg.
8000         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8001     }
8002
8003 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8004
8005 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8006     // There is no fixup necessary if the return type is a HFA struct.
8007     // HFA structs are returned in registers for ARM32 and ARM64
8008     //
8009     if (!call->IsVarargs() && IsHfa(retClsHnd))
8010     {
8011         if (call->CanTailCall())
8012         {
8013             if (info.compIsVarArgs)
8014             {
8015                 // We cannot tail call because control needs to return to fixup the calling
8016                 // convention for result return.
8017                 call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8018             }
8019             else
8020             {
8021                 // If we can tail call returning HFA, then don't assign it to
8022                 // a variable back and forth.
8023                 return call;
8024             }
8025         }
8026
8027         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
8028         {
8029             return call;
8030         }
8031
8032         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8033         if (retRegCount >= 2)
8034         {
8035             return impAssignMultiRegTypeToVar(call, retClsHnd);
8036         }
8037     }
8038 #endif // _TARGET_ARM_
8039
8040     // Check for TYP_STRUCT type that wraps a primitive type
8041     // Such structs are returned using a single register
8042     // and we change the return type on those calls here.
8043     //
8044     structPassingKind howToReturnStruct;
8045     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8046
8047     if (howToReturnStruct == SPK_ByReference)
8048     {
8049         assert(returnType == TYP_UNKNOWN);
8050         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8051     }
8052     else
8053     {
8054         assert(returnType != TYP_UNKNOWN);
8055         call->gtReturnType = returnType;
8056
8057         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8058         if ((returnType == TYP_LONG) && (compLongUsed == false))
8059         {
8060             compLongUsed = true;
8061         }
8062         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8063         {
8064             compFloatingPointUsed = true;
8065         }
8066
8067 #if FEATURE_MULTIREG_RET
8068         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8069         assert(retRegCount != 0);
8070
8071         if (retRegCount >= 2)
8072         {
8073             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8074             {
8075                 // Force a call returning multi-reg struct to be always of the IR form
8076                 //   tmp = call
8077                 //
8078                 // No need to assign a multi-reg struct to a local var if:
8079                 //  - It is a tail call or
8080                 //  - The call is marked for in-lining later
8081                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8082             }
8083         }
8084 #endif // FEATURE_MULTIREG_RET
8085     }
8086
8087 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8088
8089     return call;
8090 }
8091
8092 /*****************************************************************************
8093    For struct return values, re-type the operand in the case where the ABI
8094    does not use a struct return buffer
8095    Note that this method is only call for !_TARGET_X86_
8096  */
8097
8098 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8099 {
8100     assert(varTypeIsStruct(info.compRetType));
8101     assert(info.compRetBuffArg == BAD_VAR_NUM);
8102
8103 #if defined(_TARGET_XARCH_)
8104
8105 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8106     // No VarArgs for CoreCLR on x64 Unix
8107     assert(!info.compIsVarArgs);
8108
8109     // Is method returning a multi-reg struct?
8110     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8111     {
8112         // In case of multi-reg struct return, we force IR to be one of the following:
8113         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8114         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8115
8116         if (op->gtOper == GT_LCL_VAR)
8117         {
8118             // Make sure that this struct stays in memory and doesn't get promoted.
8119             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8120             lvaTable[lclNum].lvIsMultiRegRet = true;
8121
8122             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8123             op->gtFlags |= GTF_DONT_CSE;
8124
8125             return op;
8126         }
8127
8128         if (op->gtOper == GT_CALL)
8129         {
8130             return op;
8131         }
8132
8133         return impAssignMultiRegTypeToVar(op, retClsHnd);
8134     }
8135 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8136     assert(info.compRetNativeType != TYP_STRUCT);
8137 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8138
8139 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8140
8141     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8142     {
8143         if (op->gtOper == GT_LCL_VAR)
8144         {
8145             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8146             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8147             // Make sure this struct type stays as struct so that we can return it as an HFA
8148             lvaTable[lclNum].lvIsMultiRegRet = true;
8149
8150             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8151             op->gtFlags |= GTF_DONT_CSE;
8152
8153             return op;
8154         }
8155
8156         if (op->gtOper == GT_CALL)
8157         {
8158             if (op->gtCall.IsVarargs())
8159             {
8160                 // We cannot tail call because control needs to return to fixup the calling
8161                 // convention for result return.
8162                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8163                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8164             }
8165             else
8166             {
8167                 return op;
8168             }
8169         }
8170         return impAssignMultiRegTypeToVar(op, retClsHnd);
8171     }
8172
8173 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8174
8175     // Is method returning a multi-reg struct?
8176     if (IsMultiRegReturnedType(retClsHnd))
8177     {
8178         if (op->gtOper == GT_LCL_VAR)
8179         {
8180             // This LCL_VAR stays as a TYP_STRUCT
8181             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8182
8183             // Make sure this struct type is not struct promoted
8184             lvaTable[lclNum].lvIsMultiRegRet = true;
8185
8186             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8187             op->gtFlags |= GTF_DONT_CSE;
8188
8189             return op;
8190         }
8191
8192         if (op->gtOper == GT_CALL)
8193         {
8194             if (op->gtCall.IsVarargs())
8195             {
8196                 // We cannot tail call because control needs to return to fixup the calling
8197                 // convention for result return.
8198                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8199                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8200             }
8201             else
8202             {
8203                 return op;
8204             }
8205         }
8206         return impAssignMultiRegTypeToVar(op, retClsHnd);
8207     }
8208
8209 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8210
8211 REDO_RETURN_NODE:
8212     // adjust the type away from struct to integral
8213     // and no normalizing
8214     if (op->gtOper == GT_LCL_VAR)
8215     {
8216         op->ChangeOper(GT_LCL_FLD);
8217     }
8218     else if (op->gtOper == GT_OBJ)
8219     {
8220         GenTreePtr op1 = op->AsObj()->Addr();
8221
8222         // We will fold away OBJ/ADDR
8223         // except for OBJ/ADDR/INDEX
8224         //     as the array type influences the array element's offset
8225         //     Later in this method we change op->gtType to info.compRetNativeType
8226         //     This is not correct when op is a GT_INDEX as the starting offset
8227         //     for the array elements 'elemOffs' is different for an array of
8228         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8229         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8230         //
8231         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8232         {
8233             // Change '*(&X)' to 'X' and see if we can do better
8234             op = op1->gtOp.gtOp1;
8235             goto REDO_RETURN_NODE;
8236         }
8237         op->gtObj.gtClass = NO_CLASS_HANDLE;
8238         op->ChangeOperUnchecked(GT_IND);
8239         op->gtFlags |= GTF_IND_TGTANYWHERE;
8240     }
8241     else if (op->gtOper == GT_CALL)
8242     {
8243         if (op->AsCall()->TreatAsHasRetBufArg(this))
8244         {
8245             // This must be one of those 'special' helpers that don't
8246             // really have a return buffer, but instead use it as a way
8247             // to keep the trees cleaner with fewer address-taken temps.
8248             //
8249             // Well now we have to materialize the the return buffer as
8250             // an address-taken temp. Then we can return the temp.
8251             //
8252             // NOTE: this code assumes that since the call directly
8253             // feeds the return, then the call must be returning the
8254             // same structure/class/type.
8255             //
8256             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8257
8258             // No need to spill anything as we're about to return.
8259             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8260
8261             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8262             // jump directly to a GT_LCL_FLD.
8263             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8264             op->ChangeOper(GT_LCL_FLD);
8265         }
8266         else
8267         {
8268             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8269
8270             // Don't change the gtType of the node just yet, it will get changed later.
8271             return op;
8272         }
8273     }
8274     else if (op->gtOper == GT_COMMA)
8275     {
8276         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8277     }
8278
8279     op->gtType = info.compRetNativeType;
8280
8281     return op;
8282 }
8283
8284 /*****************************************************************************
8285    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8286    finally-protected try. We find the finally blocks protecting the current
8287    offset (in order) by walking over the complete exception table and
8288    finding enclosing clauses. This assumes that the table is sorted.
8289    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8290
8291    If we are leaving a catch handler, we need to attach the
8292    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8293
8294    After this function, the BBJ_LEAVE block has been converted to a different type.
8295  */
8296
8297 #if !FEATURE_EH_FUNCLETS
8298
8299 void Compiler::impImportLeave(BasicBlock* block)
8300 {
8301 #ifdef DEBUG
8302     if (verbose)
8303     {
8304         printf("\nBefore import CEE_LEAVE:\n");
8305         fgDispBasicBlocks();
8306         fgDispHandlerTab();
8307     }
8308 #endif // DEBUG
8309
8310     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8311     unsigned    blkAddr         = block->bbCodeOffs;
8312     BasicBlock* leaveTarget     = block->bbJumpDest;
8313     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8314
8315     // LEAVE clears the stack, spill side effects, and set stack to 0
8316
8317     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8318     verCurrentState.esStackDepth = 0;
8319
8320     assert(block->bbJumpKind == BBJ_LEAVE);
8321     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8322
8323     BasicBlock* step         = DUMMY_INIT(NULL);
8324     unsigned    encFinallies = 0; // Number of enclosing finallies.
8325     GenTreePtr  endCatches   = NULL;
8326     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8327
8328     unsigned  XTnum;
8329     EHblkDsc* HBtab;
8330
8331     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8332     {
8333         // Grab the handler offsets
8334
8335         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8336         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8337         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8338         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8339
8340         /* Is this a catch-handler we are CEE_LEAVEing out of?
8341          * If so, we need to call CORINFO_HELP_ENDCATCH.
8342          */
8343
8344         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8345         {
8346             // Can't CEE_LEAVE out of a finally/fault handler
8347             if (HBtab->HasFinallyOrFaultHandler())
8348                 BADCODE("leave out of fault/finally block");
8349
8350             // Create the call to CORINFO_HELP_ENDCATCH
8351             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8352
8353             // Make a list of all the currently pending endCatches
8354             if (endCatches)
8355                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8356             else
8357                 endCatches = endCatch;
8358
8359 #ifdef DEBUG
8360             if (verbose)
8361             {
8362                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8363                        "CORINFO_HELP_ENDCATCH\n",
8364                        block->bbNum, XTnum);
8365             }
8366 #endif
8367         }
8368         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8369                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8370         {
8371             /* This is a finally-protected try we are jumping out of */
8372
8373             /* If there are any pending endCatches, and we have already
8374                jumped out of a finally-protected try, then the endCatches
8375                have to be put in a block in an outer try for async
8376                exceptions to work correctly.
8377                Else, just use append to the original block */
8378
8379             BasicBlock* callBlock;
8380
8381             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8382
8383             if (encFinallies == 0)
8384             {
8385                 assert(step == DUMMY_INIT(NULL));
8386                 callBlock             = block;
8387                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8388
8389                 if (endCatches)
8390                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8391
8392 #ifdef DEBUG
8393                 if (verbose)
8394                 {
8395                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8396                            "block BB%02u [%08p]\n",
8397                            callBlock->bbNum, dspPtr(callBlock));
8398                 }
8399 #endif
8400             }
8401             else
8402             {
8403                 assert(step != DUMMY_INIT(NULL));
8404
8405                 /* Calling the finally block */
8406                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8407                 assert(step->bbJumpKind == BBJ_ALWAYS);
8408                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8409                                               // finally in the chain)
8410                 step->bbJumpDest->bbRefs++;
8411
8412                 /* The new block will inherit this block's weight */
8413                 callBlock->setBBWeight(block->bbWeight);
8414                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8415
8416 #ifdef DEBUG
8417                 if (verbose)
8418                 {
8419                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8420                            "[%08p]\n",
8421                            callBlock->bbNum, dspPtr(callBlock));
8422                 }
8423 #endif
8424
8425                 GenTreePtr lastStmt;
8426
8427                 if (endCatches)
8428                 {
8429                     lastStmt         = gtNewStmt(endCatches);
8430                     endLFin->gtNext  = lastStmt;
8431                     lastStmt->gtPrev = endLFin;
8432                 }
8433                 else
8434                 {
8435                     lastStmt = endLFin;
8436                 }
8437
8438                 // note that this sets BBF_IMPORTED on the block
8439                 impEndTreeList(callBlock, endLFin, lastStmt);
8440             }
8441
8442             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8443             /* The new block will inherit this block's weight */
8444             step->setBBWeight(block->bbWeight);
8445             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8446
8447 #ifdef DEBUG
8448             if (verbose)
8449             {
8450                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8451                        "BB%02u [%08p]\n",
8452                        step->bbNum, dspPtr(step));
8453             }
8454 #endif
8455
8456             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8457             assert(finallyNesting <= compHndBBtabCount);
8458
8459             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8460             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8461             endLFin               = gtNewStmt(endLFin);
8462             endCatches            = NULL;
8463
8464             encFinallies++;
8465
8466             invalidatePreds = true;
8467         }
8468     }
8469
8470     /* Append any remaining endCatches, if any */
8471
8472     assert(!encFinallies == !endLFin);
8473
8474     if (encFinallies == 0)
8475     {
8476         assert(step == DUMMY_INIT(NULL));
8477         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8478
8479         if (endCatches)
8480             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8481
8482 #ifdef DEBUG
8483         if (verbose)
8484         {
8485             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8486                    "block BB%02u [%08p]\n",
8487                    block->bbNum, dspPtr(block));
8488         }
8489 #endif
8490     }
8491     else
8492     {
8493         // If leaveTarget is the start of another try block, we want to make sure that
8494         // we do not insert finalStep into that try block. Hence, we find the enclosing
8495         // try block.
8496         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8497
8498         // Insert a new BB either in the try region indicated by tryIndex or
8499         // the handler region indicated by leaveTarget->bbHndIndex,
8500         // depending on which is the inner region.
8501         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8502         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8503         step->bbJumpDest = finalStep;
8504
8505         /* The new block will inherit this block's weight */
8506         finalStep->setBBWeight(block->bbWeight);
8507         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8508
8509 #ifdef DEBUG
8510         if (verbose)
8511         {
8512             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8513                    encFinallies, finalStep->bbNum, dspPtr(finalStep));
8514         }
8515 #endif
8516
8517         GenTreePtr lastStmt;
8518
8519         if (endCatches)
8520         {
8521             lastStmt         = gtNewStmt(endCatches);
8522             endLFin->gtNext  = lastStmt;
8523             lastStmt->gtPrev = endLFin;
8524         }
8525         else
8526         {
8527             lastStmt = endLFin;
8528         }
8529
8530         impEndTreeList(finalStep, endLFin, lastStmt);
8531
8532         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8533
8534         // Queue up the jump target for importing
8535
8536         impImportBlockPending(leaveTarget);
8537
8538         invalidatePreds = true;
8539     }
8540
8541     if (invalidatePreds && fgComputePredsDone)
8542     {
8543         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8544         fgRemovePreds();
8545     }
8546
8547 #ifdef DEBUG
8548     fgVerifyHandlerTab();
8549
8550     if (verbose)
8551     {
8552         printf("\nAfter import CEE_LEAVE:\n");
8553         fgDispBasicBlocks();
8554         fgDispHandlerTab();
8555     }
8556 #endif // DEBUG
8557 }
8558
8559 #else // FEATURE_EH_FUNCLETS
8560
8561 void Compiler::impImportLeave(BasicBlock* block)
8562 {
8563 #ifdef DEBUG
8564     if (verbose)
8565     {
8566         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8567         fgDispBasicBlocks();
8568         fgDispHandlerTab();
8569     }
8570 #endif // DEBUG
8571
8572     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8573     unsigned    blkAddr         = block->bbCodeOffs;
8574     BasicBlock* leaveTarget     = block->bbJumpDest;
8575     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8576
8577     // LEAVE clears the stack, spill side effects, and set stack to 0
8578
8579     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8580     verCurrentState.esStackDepth = 0;
8581
8582     assert(block->bbJumpKind == BBJ_LEAVE);
8583     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8584
8585     BasicBlock* step = nullptr;
8586
8587     enum StepType
8588     {
8589         // No step type; step == NULL.
8590         ST_None,
8591
8592         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8593         // That is, is step->bbJumpDest where a finally will return to?
8594         ST_FinallyReturn,
8595
8596         // The step block is a catch return.
8597         ST_Catch,
8598
8599         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8600         ST_Try
8601     };
8602     StepType stepType = ST_None;
8603
8604     unsigned  XTnum;
8605     EHblkDsc* HBtab;
8606
8607     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8608     {
8609         // Grab the handler offsets
8610
8611         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8612         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8613         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8614         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8615
8616         /* Is this a catch-handler we are CEE_LEAVEing out of?
8617          */
8618
8619         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8620         {
8621             // Can't CEE_LEAVE out of a finally/fault handler
8622             if (HBtab->HasFinallyOrFaultHandler())
8623             {
8624                 BADCODE("leave out of fault/finally block");
8625             }
8626
8627             /* We are jumping out of a catch */
8628
8629             if (step == nullptr)
8630             {
8631                 step             = block;
8632                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8633                 stepType         = ST_Catch;
8634
8635 #ifdef DEBUG
8636                 if (verbose)
8637                 {
8638                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8639                            "block\n",
8640                            XTnum, step->bbNum);
8641                 }
8642 #endif
8643             }
8644             else
8645             {
8646                 BasicBlock* exitBlock;
8647
8648                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8649                  * scope */
8650                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8651
8652                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8653                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8654                                               // exit) returns to this block
8655                 step->bbJumpDest->bbRefs++;
8656
8657 #if defined(_TARGET_ARM_)
8658                 if (stepType == ST_FinallyReturn)
8659                 {
8660                     assert(step->bbJumpKind == BBJ_ALWAYS);
8661                     // Mark the target of a finally return
8662                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8663                 }
8664 #endif // defined(_TARGET_ARM_)
8665
8666                 /* The new block will inherit this block's weight */
8667                 exitBlock->setBBWeight(block->bbWeight);
8668                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8669
8670                 /* This exit block is the new step */
8671                 step     = exitBlock;
8672                 stepType = ST_Catch;
8673
8674                 invalidatePreds = true;
8675
8676 #ifdef DEBUG
8677                 if (verbose)
8678                 {
8679                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8680                            exitBlock->bbNum);
8681                 }
8682 #endif
8683             }
8684         }
8685         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8686                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8687         {
8688             /* We are jumping out of a finally-protected try */
8689
8690             BasicBlock* callBlock;
8691
8692             if (step == nullptr)
8693             {
8694 #if FEATURE_EH_CALLFINALLY_THUNKS
8695
8696                 // Put the call to the finally in the enclosing region.
8697                 unsigned callFinallyTryIndex =
8698                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8699                 unsigned callFinallyHndIndex =
8700                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8701                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8702
8703                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8704                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8705                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8706                 // next block, and flow optimizations will remove it.
8707                 block->bbJumpKind = BBJ_ALWAYS;
8708                 block->bbJumpDest = callBlock;
8709                 block->bbJumpDest->bbRefs++;
8710
8711                 /* The new block will inherit this block's weight */
8712                 callBlock->setBBWeight(block->bbWeight);
8713                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8714
8715 #ifdef DEBUG
8716                 if (verbose)
8717                 {
8718                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8719                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8720                            XTnum, block->bbNum, callBlock->bbNum);
8721                 }
8722 #endif
8723
8724 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8725
8726                 callBlock             = block;
8727                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8728
8729 #ifdef DEBUG
8730                 if (verbose)
8731                 {
8732                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8733                            "BBJ_CALLFINALLY block\n",
8734                            XTnum, callBlock->bbNum);
8735                 }
8736 #endif
8737
8738 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8739             }
8740             else
8741             {
8742                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8743                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8744                 // a 'finally'), or the step block is the return from a catch.
8745                 //
8746                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8747                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8748                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8749                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8750                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8751                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8752                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8753                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8754                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8755                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8756                 // stack walks.)
8757
8758                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8759
8760 #if FEATURE_EH_CALLFINALLY_THUNKS
8761                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8762                 {
8763                     // Need to create another step block in the 'try' region that will actually branch to the
8764                     // call-to-finally thunk.
8765                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8766                     step->bbJumpDest  = step2;
8767                     step->bbJumpDest->bbRefs++;
8768                     step2->setBBWeight(block->bbWeight);
8769                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8770
8771 #ifdef DEBUG
8772                     if (verbose)
8773                     {
8774                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8775                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8776                                XTnum, step->bbNum, step2->bbNum);
8777                     }
8778 #endif
8779
8780                     step = step2;
8781                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8782                 }
8783 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8784
8785 #if FEATURE_EH_CALLFINALLY_THUNKS
8786                 unsigned callFinallyTryIndex =
8787                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8788                 unsigned callFinallyHndIndex =
8789                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8790 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8791                 unsigned callFinallyTryIndex = XTnum + 1;
8792                 unsigned callFinallyHndIndex = 0; // don't care
8793 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8794
8795                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8796                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8797                                               // finally in the chain)
8798                 step->bbJumpDest->bbRefs++;
8799
8800 #if defined(_TARGET_ARM_)
8801                 if (stepType == ST_FinallyReturn)
8802                 {
8803                     assert(step->bbJumpKind == BBJ_ALWAYS);
8804                     // Mark the target of a finally return
8805                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8806                 }
8807 #endif // defined(_TARGET_ARM_)
8808
8809                 /* The new block will inherit this block's weight */
8810                 callBlock->setBBWeight(block->bbWeight);
8811                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8812
8813 #ifdef DEBUG
8814                 if (verbose)
8815                 {
8816                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8817                            "BB%02u\n",
8818                            XTnum, callBlock->bbNum);
8819                 }
8820 #endif
8821             }
8822
8823             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8824             stepType = ST_FinallyReturn;
8825
8826             /* The new block will inherit this block's weight */
8827             step->setBBWeight(block->bbWeight);
8828             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8829
8830 #ifdef DEBUG
8831             if (verbose)
8832             {
8833                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8834                        "block BB%02u\n",
8835                        XTnum, step->bbNum);
8836             }
8837 #endif
8838
8839             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8840
8841             invalidatePreds = true;
8842         }
8843         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8844                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8845         {
8846             // We are jumping out of a catch-protected try.
8847             //
8848             // If we are returning from a call to a finally, then we must have a step block within a try
8849             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8850             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8851             // and invoke the appropriate catch.
8852             //
8853             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8854             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8855             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8856             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8857             // address of the catch return as the new exception address. That is, the re-raised exception appears to
8858             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8859             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8860             // For example:
8861             //
8862             // try {
8863             //    try {
8864             //       // something here raises ThreadAbortException
8865             //       LEAVE LABEL_1; // no need to stop at LABEL_2
8866             //    } catch (Exception) {
8867             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8868             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8869             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8870             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8871             //       // need to do this transformation if the current EH block is a try/catch that catches
8872             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
8873             //       // information, so currently we do it for all catch types.
8874             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8875             //    }
8876             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8877             // } catch (ThreadAbortException) {
8878             // }
8879             // LABEL_1:
8880             //
8881             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8882             // compiler.
8883
8884             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8885             {
8886                 BasicBlock* catchStep;
8887
8888                 assert(step);
8889
8890                 if (stepType == ST_FinallyReturn)
8891                 {
8892                     assert(step->bbJumpKind == BBJ_ALWAYS);
8893                 }
8894                 else
8895                 {
8896                     assert(stepType == ST_Catch);
8897                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
8898                 }
8899
8900                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8901                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8902                 step->bbJumpDest = catchStep;
8903                 step->bbJumpDest->bbRefs++;
8904
8905 #if defined(_TARGET_ARM_)
8906                 if (stepType == ST_FinallyReturn)
8907                 {
8908                     // Mark the target of a finally return
8909                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8910                 }
8911 #endif // defined(_TARGET_ARM_)
8912
8913                 /* The new block will inherit this block's weight */
8914                 catchStep->setBBWeight(block->bbWeight);
8915                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8916
8917 #ifdef DEBUG
8918                 if (verbose)
8919                 {
8920                     if (stepType == ST_FinallyReturn)
8921                     {
8922                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8923                                "BBJ_ALWAYS block BB%02u\n",
8924                                XTnum, catchStep->bbNum);
8925                     }
8926                     else
8927                     {
8928                         assert(stepType == ST_Catch);
8929                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8930                                "BBJ_ALWAYS block BB%02u\n",
8931                                XTnum, catchStep->bbNum);
8932                     }
8933                 }
8934 #endif // DEBUG
8935
8936                 /* This block is the new step */
8937                 step     = catchStep;
8938                 stepType = ST_Try;
8939
8940                 invalidatePreds = true;
8941             }
8942         }
8943     }
8944
8945     if (step == nullptr)
8946     {
8947         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8948
8949 #ifdef DEBUG
8950         if (verbose)
8951         {
8952             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8953                    "block BB%02u to BBJ_ALWAYS\n",
8954                    block->bbNum);
8955         }
8956 #endif
8957     }
8958     else
8959     {
8960         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8961
8962 #if defined(_TARGET_ARM_)
8963         if (stepType == ST_FinallyReturn)
8964         {
8965             assert(step->bbJumpKind == BBJ_ALWAYS);
8966             // Mark the target of a finally return
8967             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8968         }
8969 #endif // defined(_TARGET_ARM_)
8970
8971 #ifdef DEBUG
8972         if (verbose)
8973         {
8974             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8975         }
8976 #endif
8977
8978         // Queue up the jump target for importing
8979
8980         impImportBlockPending(leaveTarget);
8981     }
8982
8983     if (invalidatePreds && fgComputePredsDone)
8984     {
8985         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8986         fgRemovePreds();
8987     }
8988
8989 #ifdef DEBUG
8990     fgVerifyHandlerTab();
8991
8992     if (verbose)
8993     {
8994         printf("\nAfter import CEE_LEAVE:\n");
8995         fgDispBasicBlocks();
8996         fgDispHandlerTab();
8997     }
8998 #endif // DEBUG
8999 }
9000
9001 #endif // FEATURE_EH_FUNCLETS
9002
9003 /*****************************************************************************/
9004 // This is called when reimporting a leave block. It resets the JumpKind,
9005 // JumpDest, and bbNext to the original values
9006
9007 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9008 {
9009 #if FEATURE_EH_FUNCLETS
9010     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9011     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9012     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9013     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9014     // only predecessor are also considered orphans and attempted to be deleted.
9015     //
9016     //  try  {
9017     //     ....
9018     //     try
9019     //     {
9020     //         ....
9021     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9022     //     } finally { }
9023     //  } finally { }
9024     //  OUTSIDE:
9025     //
9026     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9027     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9028     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9029     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9030     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9031     // will be treated as pair and handled correctly.
9032     if (block->bbJumpKind == BBJ_CALLFINALLY)
9033     {
9034         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9035         dupBlock->bbFlags    = block->bbFlags;
9036         dupBlock->bbJumpDest = block->bbJumpDest;
9037         dupBlock->copyEHRegion(block);
9038         dupBlock->bbCatchTyp = block->bbCatchTyp;
9039
9040         // Mark this block as
9041         //  a) not referenced by any other block to make sure that it gets deleted
9042         //  b) weight zero
9043         //  c) prevent from being imported
9044         //  d) as internal
9045         //  e) as rarely run
9046         dupBlock->bbRefs   = 0;
9047         dupBlock->bbWeight = 0;
9048         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9049
9050         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9051         // will be next to each other.
9052         fgInsertBBafter(block, dupBlock);
9053
9054 #ifdef DEBUG
9055         if (verbose)
9056         {
9057             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9058         }
9059 #endif
9060     }
9061 #endif // FEATURE_EH_FUNCLETS
9062
9063     block->bbJumpKind = BBJ_LEAVE;
9064     fgInitBBLookup();
9065     block->bbJumpDest = fgLookupBB(jmpAddr);
9066
9067     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9068     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9069     // reason we don't want to remove the block at this point is that if we call
9070     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9071     // added and the linked list length will be different than fgBBcount.
9072 }
9073
9074 /*****************************************************************************/
9075 // Get the first non-prefix opcode. Used for verification of valid combinations
9076 // of prefixes and actual opcodes.
9077
9078 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9079 {
9080     while (codeAddr < codeEndp)
9081     {
9082         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9083         codeAddr += sizeof(__int8);
9084
9085         if (opcode == CEE_PREFIX1)
9086         {
9087             if (codeAddr >= codeEndp)
9088             {
9089                 break;
9090             }
9091             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9092             codeAddr += sizeof(__int8);
9093         }
9094
9095         switch (opcode)
9096         {
9097             case CEE_UNALIGNED:
9098             case CEE_VOLATILE:
9099             case CEE_TAILCALL:
9100             case CEE_CONSTRAINED:
9101             case CEE_READONLY:
9102                 break;
9103             default:
9104                 return opcode;
9105         }
9106
9107         codeAddr += opcodeSizes[opcode];
9108     }
9109
9110     return CEE_ILLEGAL;
9111 }
9112
9113 /*****************************************************************************/
9114 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9115
9116 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9117 {
9118     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9119
9120     if (!(
9121             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9122             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9123             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9124             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9125             // volatile. prefix is allowed with the ldsfld and stsfld
9126             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9127     {
9128         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9129     }
9130 }
9131
9132 /*****************************************************************************/
9133
9134 #ifdef DEBUG
9135
9136 #undef RETURN // undef contracts RETURN macro
9137
9138 enum controlFlow_t
9139 {
9140     NEXT,
9141     CALL,
9142     RETURN,
9143     THROW,
9144     BRANCH,
9145     COND_BRANCH,
9146     BREAK,
9147     PHI,
9148     META,
9149 };
9150
9151 const static controlFlow_t controlFlow[] = {
9152 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9153 #include "opcode.def"
9154 #undef OPDEF
9155 };
9156
9157 #endif // DEBUG
9158
9159 /*****************************************************************************
9160  *  Determine the result type of an arithemetic operation
9161  *  On 64-bit inserts upcasts when native int is mixed with int32
9162  */
9163 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9164 {
9165     var_types  type = TYP_UNDEF;
9166     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9167
9168     // Arithemetic operations are generally only allowed with
9169     // primitive types, but certain operations are allowed
9170     // with byrefs
9171
9172     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9173     {
9174         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9175         {
9176             // byref1-byref2 => gives a native int
9177             type = TYP_I_IMPL;
9178         }
9179         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9180         {
9181             // [native] int - byref => gives a native int
9182
9183             //
9184             // The reason is that it is possible, in managed C++,
9185             // to have a tree like this:
9186             //
9187             //              -
9188             //             / \
9189             //            /   \
9190             //           /     \
9191             //          /       \
9192             // const(h) int     addr byref
9193             //
9194             // <BUGNUM> VSW 318822 </BUGNUM>
9195             //
9196             // So here we decide to make the resulting type to be a native int.
9197             CLANG_FORMAT_COMMENT_ANCHOR;
9198
9199 #ifdef _TARGET_64BIT_
9200             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9201             {
9202                 // insert an explicit upcast
9203                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9204             }
9205 #endif // _TARGET_64BIT_
9206
9207             type = TYP_I_IMPL;
9208         }
9209         else
9210         {
9211             // byref - [native] int => gives a byref
9212             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9213
9214 #ifdef _TARGET_64BIT_
9215             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9216             {
9217                 // insert an explicit upcast
9218                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9219             }
9220 #endif // _TARGET_64BIT_
9221
9222             type = TYP_BYREF;
9223         }
9224     }
9225     else if ((oper == GT_ADD) &&
9226              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9227     {
9228         // byref + [native] int => gives a byref
9229         // (or)
9230         // [native] int + byref => gives a byref
9231
9232         // only one can be a byref : byref op byref not allowed
9233         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9234         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9235
9236 #ifdef _TARGET_64BIT_
9237         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9238         {
9239             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9240             {
9241                 // insert an explicit upcast
9242                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9243             }
9244         }
9245         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9246         {
9247             // insert an explicit upcast
9248             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9249         }
9250 #endif // _TARGET_64BIT_
9251
9252         type = TYP_BYREF;
9253     }
9254 #ifdef _TARGET_64BIT_
9255     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9256     {
9257         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9258
9259         // int + long => gives long
9260         // long + int => gives long
9261         // we get this because in the IL the long isn't Int64, it's just IntPtr
9262
9263         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9264         {
9265             // insert an explicit upcast
9266             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9267         }
9268         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9269         {
9270             // insert an explicit upcast
9271             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9272         }
9273
9274         type = TYP_I_IMPL;
9275     }
9276 #else  // 32-bit TARGET
9277     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9278     {
9279         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9280
9281         // int + long => gives long
9282         // long + int => gives long
9283
9284         type = TYP_LONG;
9285     }
9286 #endif // _TARGET_64BIT_
9287     else
9288     {
9289         // int + int => gives an int
9290         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9291
9292         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9293                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9294
9295         type = genActualType(op1->gtType);
9296
9297 #if FEATURE_X87_DOUBLES
9298
9299         // For x87, since we only have 1 size of registers, prefer double
9300         // For everybody else, be more precise
9301         if (type == TYP_FLOAT)
9302             type = TYP_DOUBLE;
9303
9304 #else // !FEATURE_X87_DOUBLES
9305
9306         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9307         // Otherwise, turn floats into doubles
9308         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9309         {
9310             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9311             type = TYP_DOUBLE;
9312         }
9313
9314 #endif // FEATURE_X87_DOUBLES
9315     }
9316
9317 #if FEATURE_X87_DOUBLES
9318     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9319 #else  // FEATURE_X87_DOUBLES
9320     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9321 #endif // FEATURE_X87_DOUBLES
9322
9323     return type;
9324 }
9325
9326 /*****************************************************************************
9327  * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9328  *
9329  * typeRef contains the token, op1 to contain the value being cast,
9330  * and op2 to contain code that creates the type handle corresponding to typeRef
9331  * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9332  */
9333 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9334                                                 GenTreePtr              op2,
9335                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9336                                                 bool                    isCastClass)
9337 {
9338     bool expandInline;
9339
9340     assert(op1->TypeGet() == TYP_REF);
9341
9342     CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9343
9344     if (isCastClass)
9345     {
9346         // We only want to expand inline the normal CHKCASTCLASS helper;
9347         expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9348     }
9349     else
9350     {
9351         if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9352         {
9353             // Get the Class Handle abd class attributes for the type we are casting to
9354             //
9355             DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9356
9357             //
9358             // If the class handle is marked as final we can also expand the IsInst check inline
9359             //
9360             expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9361
9362             //
9363             // But don't expand inline these two cases
9364             //
9365             if (flags & CORINFO_FLG_MARSHAL_BYREF)
9366             {
9367                 expandInline = false;
9368             }
9369             else if (flags & CORINFO_FLG_CONTEXTFUL)
9370             {
9371                 expandInline = false;
9372             }
9373         }
9374         else
9375         {
9376             //
9377             // We can't expand inline any other helpers
9378             //
9379             expandInline = false;
9380         }
9381     }
9382
9383     if (expandInline)
9384     {
9385         if (compCurBB->isRunRarely())
9386         {
9387             expandInline = false; // not worth the code expansion in a rarely run block
9388         }
9389
9390         if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9391         {
9392             expandInline = false; // not worth creating an untracked local variable
9393         }
9394     }
9395
9396     if (!expandInline)
9397     {
9398         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9399         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9400         //
9401         op2->gtFlags |= GTF_DONT_CSE;
9402
9403         return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9404     }
9405
9406     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9407
9408     GenTreePtr temp;
9409     GenTreePtr condMT;
9410     //
9411     // expand the methodtable match:
9412     //
9413     //  condMT ==>   GT_NE
9414     //               /    \
9415     //           GT_IND   op2 (typically CNS_INT)
9416     //              |
9417     //           op1Copy
9418     //
9419
9420     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9421     //
9422     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9423     //
9424     // op1 is now known to be a non-complex tree
9425     // thus we can use gtClone(op1) from now on
9426     //
9427
9428     GenTreePtr op2Var = op2;
9429     if (isCastClass)
9430     {
9431         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9432         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9433     }
9434     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9435     temp->gtFlags |= GTF_EXCEPT;
9436     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9437
9438     GenTreePtr condNull;
9439     //
9440     // expand the null check:
9441     //
9442     //  condNull ==>   GT_EQ
9443     //                 /    \
9444     //             op1Copy CNS_INT
9445     //                      null
9446     //
9447     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9448
9449     //
9450     // expand the true and false trees for the condMT
9451     //
9452     GenTreePtr condFalse = gtClone(op1);
9453     GenTreePtr condTrue;
9454     if (isCastClass)
9455     {
9456         //
9457         // use the special helper that skips the cases checked by our inlined cast
9458         //
9459         helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9460
9461         condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9462     }
9463     else
9464     {
9465         condTrue = gtNewIconNode(0, TYP_REF);
9466     }
9467
9468 #define USE_QMARK_TREES
9469
9470 #ifdef USE_QMARK_TREES
9471     GenTreePtr qmarkMT;
9472     //
9473     // Generate first QMARK - COLON tree
9474     //
9475     //  qmarkMT ==>   GT_QMARK
9476     //                 /     \
9477     //            condMT   GT_COLON
9478     //                      /     \
9479     //                condFalse  condTrue
9480     //
9481     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9482     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9483     condMT->gtFlags |= GTF_RELOP_QMARK;
9484
9485     GenTreePtr qmarkNull;
9486     //
9487     // Generate second QMARK - COLON tree
9488     //
9489     //  qmarkNull ==>  GT_QMARK
9490     //                 /     \
9491     //           condNull  GT_COLON
9492     //                      /     \
9493     //                qmarkMT   op1Copy
9494     //
9495     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9496     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9497     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9498     condNull->gtFlags |= GTF_RELOP_QMARK;
9499
9500     // Make QMark node a top level node by spilling it.
9501     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9502     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9503
9504     // TODO: Is it possible op1 has a better type?
9505     lvaSetClass(tmp, pResolvedToken->hClass);
9506     return gtNewLclvNode(tmp, TYP_REF);
9507 #endif
9508 }
9509
9510 #ifndef DEBUG
9511 #define assertImp(cond) ((void)0)
9512 #else
9513 #define assertImp(cond)                                                                                                \
9514     do                                                                                                                 \
9515     {                                                                                                                  \
9516         if (!(cond))                                                                                                   \
9517         {                                                                                                              \
9518             const int cchAssertImpBuf = 600;                                                                           \
9519             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9520             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9521                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9522                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9523                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9524             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9525         }                                                                                                              \
9526     } while (0)
9527 #endif // DEBUG
9528
9529 #ifdef _PREFAST_
9530 #pragma warning(push)
9531 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9532 #endif
9533 /*****************************************************************************
9534  *  Import the instr for the given basic block
9535  */
9536 void Compiler::impImportBlockCode(BasicBlock* block)
9537 {
9538 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9539
9540 #ifdef DEBUG
9541
9542     if (verbose)
9543     {
9544         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9545     }
9546 #endif
9547
9548     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9549     IL_OFFSET nxtStmtOffs;
9550
9551     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9552     bool                         expandInline;
9553     CorInfoHelpFunc              helper;
9554     CorInfoIsAccessAllowedResult accessAllowedResult;
9555     CORINFO_HELPER_DESC          calloutHelper;
9556     const BYTE*                  lastLoadToken = nullptr;
9557
9558     // reject cyclic constraints
9559     if (tiVerificationNeeded)
9560     {
9561         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9562         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9563     }
9564
9565     /* Get the tree list started */
9566
9567     impBeginTreeList();
9568
9569     /* Walk the opcodes that comprise the basic block */
9570
9571     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9572     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9573
9574     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9575     IL_OFFSET lastSpillOffs = opcodeOffs;
9576
9577     signed jmpDist;
9578
9579     /* remember the start of the delegate creation sequence (used for verification) */
9580     const BYTE* delegateCreateStart = nullptr;
9581
9582     int  prefixFlags = 0;
9583     bool explicitTailCall, constraintCall, readonlyCall;
9584
9585     typeInfo tiRetVal;
9586
9587     unsigned numArgs = info.compArgsCount;
9588
9589     /* Now process all the opcodes in the block */
9590
9591     var_types callTyp    = TYP_COUNT;
9592     OPCODE    prevOpcode = CEE_ILLEGAL;
9593
9594     if (block->bbCatchTyp)
9595     {
9596         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9597         {
9598             impCurStmtOffsSet(block->bbCodeOffs);
9599         }
9600
9601         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9602         // to a temp. This is a trade off for code simplicity
9603         impSpillSpecialSideEff();
9604     }
9605
9606     while (codeAddr < codeEndp)
9607     {
9608         bool                   usingReadyToRunHelper = false;
9609         CORINFO_RESOLVED_TOKEN resolvedToken;
9610         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9611         CORINFO_CALL_INFO      callInfo;
9612         CORINFO_FIELD_INFO     fieldInfo;
9613
9614         tiRetVal = typeInfo(); // Default type info
9615
9616         //---------------------------------------------------------------------
9617
9618         /* We need to restrict the max tree depth as many of the Compiler
9619            functions are recursive. We do this by spilling the stack */
9620
9621         if (verCurrentState.esStackDepth)
9622         {
9623             /* Has it been a while since we last saw a non-empty stack (which
9624                guarantees that the tree depth isnt accumulating. */
9625
9626             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
9627             {
9628                 impSpillStackEnsure();
9629                 lastSpillOffs = opcodeOffs;
9630             }
9631         }
9632         else
9633         {
9634             lastSpillOffs   = opcodeOffs;
9635             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9636         }
9637
9638         /* Compute the current instr offset */
9639
9640         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9641
9642 #ifndef DEBUG
9643         if (opts.compDbgInfo)
9644 #endif
9645         {
9646             if (!compIsForInlining())
9647             {
9648                 nxtStmtOffs =
9649                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9650
9651                 /* Have we reached the next stmt boundary ? */
9652
9653                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9654                 {
9655                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9656
9657                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9658                     {
9659                         /* We need to provide accurate IP-mapping at this point.
9660                            So spill anything on the stack so that it will form
9661                            gtStmts with the correct stmt offset noted */
9662
9663                         impSpillStackEnsure(true);
9664                     }
9665
9666                     // Has impCurStmtOffs been reported in any tree?
9667
9668                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9669                     {
9670                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9671                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9672
9673                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9674                     }
9675
9676                     if (impCurStmtOffs == BAD_IL_OFFSET)
9677                     {
9678                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9679                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9680
9681                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9682                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9683                         {
9684                             nxtStmtIndex++;
9685                         }
9686
9687                         /* Go to the new stmt */
9688
9689                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9690
9691                         /* Update the stmt boundary index */
9692
9693                         nxtStmtIndex++;
9694                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9695
9696                         /* Are there any more line# entries after this one? */
9697
9698                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9699                         {
9700                             /* Remember where the next line# starts */
9701
9702                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9703                         }
9704                         else
9705                         {
9706                             /* No more line# entries */
9707
9708                             nxtStmtOffs = BAD_IL_OFFSET;
9709                         }
9710                     }
9711                 }
9712                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9713                          (verCurrentState.esStackDepth == 0))
9714                 {
9715                     /* At stack-empty locations, we have already added the tree to
9716                        the stmt list with the last offset. We just need to update
9717                        impCurStmtOffs
9718                      */
9719
9720                     impCurStmtOffsSet(opcodeOffs);
9721                 }
9722                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9723                          impOpcodeIsCallSiteBoundary(prevOpcode))
9724                 {
9725                     /* Make sure we have a type cached */
9726                     assert(callTyp != TYP_COUNT);
9727
9728                     if (callTyp == TYP_VOID)
9729                     {
9730                         impCurStmtOffsSet(opcodeOffs);
9731                     }
9732                     else if (opts.compDbgCode)
9733                     {
9734                         impSpillStackEnsure(true);
9735                         impCurStmtOffsSet(opcodeOffs);
9736                     }
9737                 }
9738                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9739                 {
9740                     if (opts.compDbgCode)
9741                     {
9742                         impSpillStackEnsure(true);
9743                     }
9744
9745                     impCurStmtOffsSet(opcodeOffs);
9746                 }
9747
9748                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9749                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9750             }
9751         }
9752
9753         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9754         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9755         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9756
9757         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9758         GenTreePtr      op1           = DUMMY_INIT(NULL);
9759         GenTreePtr      op2           = DUMMY_INIT(NULL);
9760         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9761         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9762         bool            uns           = DUMMY_INIT(false);
9763         bool            isLocal       = false;
9764
9765         /* Get the next opcode and the size of its parameters */
9766
9767         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9768         codeAddr += sizeof(__int8);
9769
9770 #ifdef DEBUG
9771         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9772         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9773 #endif
9774
9775     DECODE_OPCODE:
9776
9777         // Return if any previous code has caused inline to fail.
9778         if (compDonotInline())
9779         {
9780             return;
9781         }
9782
9783         /* Get the size of additional parameters */
9784
9785         signed int sz = opcodeSizes[opcode];
9786
9787 #ifdef DEBUG
9788         clsHnd  = NO_CLASS_HANDLE;
9789         lclTyp  = TYP_COUNT;
9790         callTyp = TYP_COUNT;
9791
9792         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9793         impCurOpcName = opcodeNames[opcode];
9794
9795         if (verbose && (opcode != CEE_PREFIX1))
9796         {
9797             printf("%s", impCurOpcName);
9798         }
9799
9800         /* Use assertImp() to display the opcode */
9801
9802         op1 = op2 = nullptr;
9803 #endif
9804
9805         /* See what kind of an opcode we have, then */
9806
9807         unsigned mflags   = 0;
9808         unsigned clsFlags = 0;
9809
9810         switch (opcode)
9811         {
9812             unsigned  lclNum;
9813             var_types type;
9814
9815             GenTreePtr op3;
9816             genTreeOps oper;
9817             unsigned   size;
9818
9819             int val;
9820
9821             CORINFO_SIG_INFO     sig;
9822             unsigned             flags;
9823             IL_OFFSET            jmpAddr;
9824             bool                 ovfl, unordered, callNode;
9825             bool                 ldstruct;
9826             CORINFO_CLASS_HANDLE tokenType;
9827
9828             union {
9829                 int     intVal;
9830                 float   fltVal;
9831                 __int64 lngVal;
9832                 double  dblVal;
9833             } cval;
9834
9835             case CEE_PREFIX1:
9836                 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9837                 codeAddr += sizeof(__int8);
9838                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9839                 goto DECODE_OPCODE;
9840
9841             SPILL_APPEND:
9842
9843                 // We need to call impSpillLclRefs() for a struct type lclVar.
9844                 // This is done for non-block assignments in the handling of stloc.
9845                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9846                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9847                 {
9848                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9849                 }
9850
9851                 /* Append 'op1' to the list of statements */
9852                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9853                 goto DONE_APPEND;
9854
9855             APPEND:
9856
9857                 /* Append 'op1' to the list of statements */
9858
9859                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9860                 goto DONE_APPEND;
9861
9862             DONE_APPEND:
9863
9864 #ifdef DEBUG
9865                 // Remember at which BC offset the tree was finished
9866                 impNoteLastILoffs();
9867 #endif
9868                 break;
9869
9870             case CEE_LDNULL:
9871                 impPushNullObjRefOnStack();
9872                 break;
9873
9874             case CEE_LDC_I4_M1:
9875             case CEE_LDC_I4_0:
9876             case CEE_LDC_I4_1:
9877             case CEE_LDC_I4_2:
9878             case CEE_LDC_I4_3:
9879             case CEE_LDC_I4_4:
9880             case CEE_LDC_I4_5:
9881             case CEE_LDC_I4_6:
9882             case CEE_LDC_I4_7:
9883             case CEE_LDC_I4_8:
9884                 cval.intVal = (opcode - CEE_LDC_I4_0);
9885                 assert(-1 <= cval.intVal && cval.intVal <= 8);
9886                 goto PUSH_I4CON;
9887
9888             case CEE_LDC_I4_S:
9889                 cval.intVal = getI1LittleEndian(codeAddr);
9890                 goto PUSH_I4CON;
9891             case CEE_LDC_I4:
9892                 cval.intVal = getI4LittleEndian(codeAddr);
9893                 goto PUSH_I4CON;
9894             PUSH_I4CON:
9895                 JITDUMP(" %d", cval.intVal);
9896                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9897                 break;
9898
9899             case CEE_LDC_I8:
9900                 cval.lngVal = getI8LittleEndian(codeAddr);
9901                 JITDUMP(" 0x%016llx", cval.lngVal);
9902                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9903                 break;
9904
9905             case CEE_LDC_R8:
9906                 cval.dblVal = getR8LittleEndian(codeAddr);
9907                 JITDUMP(" %#.17g", cval.dblVal);
9908                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9909                 break;
9910
9911             case CEE_LDC_R4:
9912                 cval.dblVal = getR4LittleEndian(codeAddr);
9913                 JITDUMP(" %#.17g", cval.dblVal);
9914                 {
9915                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9916 #if !FEATURE_X87_DOUBLES
9917                     // X87 stack doesn't differentiate between float/double
9918                     // so R4 is treated as R8, but everybody else does
9919                     cnsOp->gtType = TYP_FLOAT;
9920 #endif // FEATURE_X87_DOUBLES
9921                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9922                 }
9923                 break;
9924
9925             case CEE_LDSTR:
9926
9927                 if (compIsForInlining())
9928                 {
9929                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9930                     {
9931                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9932                         return;
9933                     }
9934                 }
9935
9936                 val = getU4LittleEndian(codeAddr);
9937                 JITDUMP(" %08X", val);
9938                 if (tiVerificationNeeded)
9939                 {
9940                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9941                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
9942                 }
9943                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9944
9945                 break;
9946
9947             case CEE_LDARG:
9948                 lclNum = getU2LittleEndian(codeAddr);
9949                 JITDUMP(" %u", lclNum);
9950                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9951                 break;
9952
9953             case CEE_LDARG_S:
9954                 lclNum = getU1LittleEndian(codeAddr);
9955                 JITDUMP(" %u", lclNum);
9956                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9957                 break;
9958
9959             case CEE_LDARG_0:
9960             case CEE_LDARG_1:
9961             case CEE_LDARG_2:
9962             case CEE_LDARG_3:
9963                 lclNum = (opcode - CEE_LDARG_0);
9964                 assert(lclNum >= 0 && lclNum < 4);
9965                 impLoadArg(lclNum, opcodeOffs + sz + 1);
9966                 break;
9967
9968             case CEE_LDLOC:
9969                 lclNum = getU2LittleEndian(codeAddr);
9970                 JITDUMP(" %u", lclNum);
9971                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9972                 break;
9973
9974             case CEE_LDLOC_S:
9975                 lclNum = getU1LittleEndian(codeAddr);
9976                 JITDUMP(" %u", lclNum);
9977                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9978                 break;
9979
9980             case CEE_LDLOC_0:
9981             case CEE_LDLOC_1:
9982             case CEE_LDLOC_2:
9983             case CEE_LDLOC_3:
9984                 lclNum = (opcode - CEE_LDLOC_0);
9985                 assert(lclNum >= 0 && lclNum < 4);
9986                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9987                 break;
9988
9989             case CEE_STARG:
9990                 lclNum = getU2LittleEndian(codeAddr);
9991                 goto STARG;
9992
9993             case CEE_STARG_S:
9994                 lclNum = getU1LittleEndian(codeAddr);
9995             STARG:
9996                 JITDUMP(" %u", lclNum);
9997
9998                 if (tiVerificationNeeded)
9999                 {
10000                     Verify(lclNum < info.compILargsCount, "bad arg num");
10001                 }
10002
10003                 if (compIsForInlining())
10004                 {
10005                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10006                     noway_assert(op1->gtOper == GT_LCL_VAR);
10007                     lclNum = op1->AsLclVar()->gtLclNum;
10008
10009                     goto VAR_ST_VALID;
10010                 }
10011
10012                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10013                 assertImp(lclNum < numArgs);
10014
10015                 if (lclNum == info.compThisArg)
10016                 {
10017                     lclNum = lvaArg0Var;
10018                 }
10019
10020                 // We should have seen this arg write in the prescan
10021                 assert(lvaTable[lclNum].lvHasILStoreOp);
10022
10023                 if (tiVerificationNeeded)
10024                 {
10025                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10026                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10027                            "type mismatch");
10028
10029                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10030                     {
10031                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10032                     }
10033                 }
10034
10035                 goto VAR_ST;
10036
10037             case CEE_STLOC:
10038                 lclNum  = getU2LittleEndian(codeAddr);
10039                 isLocal = true;
10040                 JITDUMP(" %u", lclNum);
10041                 goto LOC_ST;
10042
10043             case CEE_STLOC_S:
10044                 lclNum  = getU1LittleEndian(codeAddr);
10045                 isLocal = true;
10046                 JITDUMP(" %u", lclNum);
10047                 goto LOC_ST;
10048
10049             case CEE_STLOC_0:
10050             case CEE_STLOC_1:
10051             case CEE_STLOC_2:
10052             case CEE_STLOC_3:
10053                 isLocal = true;
10054                 lclNum  = (opcode - CEE_STLOC_0);
10055                 assert(lclNum >= 0 && lclNum < 4);
10056
10057             LOC_ST:
10058                 if (tiVerificationNeeded)
10059                 {
10060                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10061                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10062                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10063                            "type mismatch");
10064                 }
10065
10066                 if (compIsForInlining())
10067                 {
10068                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10069
10070                     /* Have we allocated a temp for this local? */
10071
10072                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10073
10074                     goto _PopValue;
10075                 }
10076
10077                 lclNum += numArgs;
10078
10079             VAR_ST:
10080
10081                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10082                 {
10083                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10084                     BADCODE("Bad IL");
10085                 }
10086
10087             VAR_ST_VALID:
10088
10089                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10090                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10091
10092                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10093                 {
10094                     lclTyp = lvaGetRealType(lclNum);
10095                 }
10096                 else
10097                 {
10098                     lclTyp = lvaGetActualType(lclNum);
10099                 }
10100
10101             _PopValue:
10102                 /* Pop the value being assigned */
10103
10104                 {
10105                     StackEntry se = impPopStack();
10106                     clsHnd        = se.seTypeInfo.GetClassHandle();
10107                     op1           = se.val;
10108                     tiRetVal      = se.seTypeInfo;
10109                 }
10110
10111 #ifdef FEATURE_SIMD
10112                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10113                 {
10114                     assert(op1->TypeGet() == TYP_STRUCT);
10115                     op1->gtType = lclTyp;
10116                 }
10117 #endif // FEATURE_SIMD
10118
10119                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10120
10121 #ifdef _TARGET_64BIT_
10122                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10123                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10124                 {
10125                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10126                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10127                 }
10128 #endif // _TARGET_64BIT_
10129
10130                 // We had better assign it a value of the correct type
10131                 assertImp(
10132                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10133                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10134                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10135                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10136                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10137                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10138
10139                 /* If op1 is "&var" then its type is the transient "*" and it can
10140                    be used either as TYP_BYREF or TYP_I_IMPL */
10141
10142                 if (op1->IsVarAddr())
10143                 {
10144                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10145
10146                     /* When "&var" is created, we assume it is a byref. If it is
10147                        being assigned to a TYP_I_IMPL var, change the type to
10148                        prevent unnecessary GC info */
10149
10150                     if (genActualType(lclTyp) == TYP_I_IMPL)
10151                     {
10152                         op1->gtType = TYP_I_IMPL;
10153                     }
10154                 }
10155
10156                 // If this is a local and the local is a ref type, see
10157                 // if we can improve type information based on the
10158                 // value being assigned.
10159                 if (isLocal && (lclTyp == TYP_REF))
10160                 {
10161                     // We should have seen a stloc in our IL prescan.
10162                     assert(lvaTable[lclNum].lvHasILStoreOp);
10163
10164                     const bool isSingleILStoreLocal =
10165                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10166
10167                     // Conservative check that there is just one
10168                     // definition that reaches this store.
10169                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10170
10171                     if (isSingleILStoreLocal && hasSingleReachingDef)
10172                     {
10173                         lvaUpdateClass(lclNum, op1, clsHnd);
10174                     }
10175                 }
10176
10177                 /* Filter out simple assignments to itself */
10178
10179                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10180                 {
10181                     if (opts.compDbgCode)
10182                     {
10183                         op1 = gtNewNothingNode();
10184                         goto SPILL_APPEND;
10185                     }
10186                     else
10187                     {
10188                         break;
10189                     }
10190                 }
10191
10192                 /* Create the assignment node */
10193
10194                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10195
10196                 /* If the local is aliased, we need to spill calls and
10197                    indirections from the stack. */
10198
10199                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10200                     verCurrentState.esStackDepth > 0)
10201                 {
10202                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10203                 }
10204
10205                 /* Spill any refs to the local from the stack */
10206
10207                 impSpillLclRefs(lclNum);
10208
10209 #if !FEATURE_X87_DOUBLES
10210                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10211                 // We insert a cast to the dest 'op2' type
10212                 //
10213                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10214                     varTypeIsFloating(op2->gtType))
10215                 {
10216                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10217                 }
10218 #endif // !FEATURE_X87_DOUBLES
10219
10220                 if (varTypeIsStruct(lclTyp))
10221                 {
10222                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10223                 }
10224                 else
10225                 {
10226                     // The code generator generates GC tracking information
10227                     // based on the RHS of the assignment.  Later the LHS (which is
10228                     // is a BYREF) gets used and the emitter checks that that variable
10229                     // is being tracked.  It is not (since the RHS was an int and did
10230                     // not need tracking).  To keep this assert happy, we change the RHS
10231                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10232                     {
10233                         op1->gtType = TYP_BYREF;
10234                     }
10235                     op1 = gtNewAssignNode(op2, op1);
10236                 }
10237
10238                 goto SPILL_APPEND;
10239
10240             case CEE_LDLOCA:
10241                 lclNum = getU2LittleEndian(codeAddr);
10242                 goto LDLOCA;
10243
10244             case CEE_LDLOCA_S:
10245                 lclNum = getU1LittleEndian(codeAddr);
10246             LDLOCA:
10247                 JITDUMP(" %u", lclNum);
10248                 if (tiVerificationNeeded)
10249                 {
10250                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10251                     Verify(info.compInitMem, "initLocals not set");
10252                 }
10253
10254                 if (compIsForInlining())
10255                 {
10256                     // Get the local type
10257                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10258
10259                     /* Have we allocated a temp for this local? */
10260
10261                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10262
10263                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10264
10265                     goto _PUSH_ADRVAR;
10266                 }
10267
10268                 lclNum += numArgs;
10269                 assertImp(lclNum < info.compLocalsCount);
10270                 goto ADRVAR;
10271
10272             case CEE_LDARGA:
10273                 lclNum = getU2LittleEndian(codeAddr);
10274                 goto LDARGA;
10275
10276             case CEE_LDARGA_S:
10277                 lclNum = getU1LittleEndian(codeAddr);
10278             LDARGA:
10279                 JITDUMP(" %u", lclNum);
10280                 Verify(lclNum < info.compILargsCount, "bad arg num");
10281
10282                 if (compIsForInlining())
10283                 {
10284                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10285                     // followed by a ldfld to load the field.
10286
10287                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10288                     if (op1->gtOper != GT_LCL_VAR)
10289                     {
10290                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10291                         return;
10292                     }
10293
10294                     assert(op1->gtOper == GT_LCL_VAR);
10295
10296                     goto _PUSH_ADRVAR;
10297                 }
10298
10299                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10300                 assertImp(lclNum < numArgs);
10301
10302                 if (lclNum == info.compThisArg)
10303                 {
10304                     lclNum = lvaArg0Var;
10305                 }
10306
10307                 goto ADRVAR;
10308
10309             ADRVAR:
10310
10311                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10312
10313             _PUSH_ADRVAR:
10314                 assert(op1->gtOper == GT_LCL_VAR);
10315
10316                 /* Note that this is supposed to create the transient type "*"
10317                    which may be used as a TYP_I_IMPL. However we catch places
10318                    where it is used as a TYP_I_IMPL and change the node if needed.
10319                    Thus we are pessimistic and may report byrefs in the GC info
10320                    where it was not absolutely needed, but it is safer this way.
10321                  */
10322                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10323
10324                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10325                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10326
10327                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10328                 if (tiVerificationNeeded)
10329                 {
10330                     // Don't allow taking address of uninit this ptr.
10331                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10332                     {
10333                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10334                     }
10335
10336                     if (!tiRetVal.IsByRef())
10337                     {
10338                         tiRetVal.MakeByRef();
10339                     }
10340                     else
10341                     {
10342                         Verify(false, "byref to byref");
10343                     }
10344                 }
10345
10346                 impPushOnStack(op1, tiRetVal);
10347                 break;
10348
10349             case CEE_ARGLIST:
10350
10351                 if (!info.compIsVarArgs)
10352                 {
10353                     BADCODE("arglist in non-vararg method");
10354                 }
10355
10356                 if (tiVerificationNeeded)
10357                 {
10358                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10359                 }
10360                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10361
10362                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10363                    adjusted the arg count cos this is like fetching the last param */
10364                 assertImp(0 < numArgs);
10365                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10366                 lclNum = lvaVarargsHandleArg;
10367                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10368                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10369                 impPushOnStack(op1, tiRetVal);
10370                 break;
10371
10372             case CEE_ENDFINALLY:
10373
10374                 if (compIsForInlining())
10375                 {
10376                     assert(!"Shouldn't have exception handlers in the inliner!");
10377                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10378                     return;
10379                 }
10380
10381                 if (verCurrentState.esStackDepth > 0)
10382                 {
10383                     impEvalSideEffects();
10384                 }
10385
10386                 if (info.compXcptnsCount == 0)
10387                 {
10388                     BADCODE("endfinally outside finally");
10389                 }
10390
10391                 assert(verCurrentState.esStackDepth == 0);
10392
10393                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10394                 goto APPEND;
10395
10396             case CEE_ENDFILTER:
10397
10398                 if (compIsForInlining())
10399                 {
10400                     assert(!"Shouldn't have exception handlers in the inliner!");
10401                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10402                     return;
10403                 }
10404
10405                 block->bbSetRunRarely(); // filters are rare
10406
10407                 if (info.compXcptnsCount == 0)
10408                 {
10409                     BADCODE("endfilter outside filter");
10410                 }
10411
10412                 if (tiVerificationNeeded)
10413                 {
10414                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10415                 }
10416
10417                 op1 = impPopStack().val;
10418                 assertImp(op1->gtType == TYP_INT);
10419                 if (!bbInFilterILRange(block))
10420                 {
10421                     BADCODE("EndFilter outside a filter handler");
10422                 }
10423
10424                 /* Mark current bb as end of filter */
10425
10426                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10427                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10428
10429                 /* Mark catch handler as successor */
10430
10431                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10432                 if (verCurrentState.esStackDepth != 0)
10433                 {
10434                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10435                                                 DEBUGARG(__LINE__));
10436                 }
10437                 goto APPEND;
10438
10439             case CEE_RET:
10440                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10441             RET:
10442                 if (!impReturnInstruction(block, prefixFlags, opcode))
10443                 {
10444                     return; // abort
10445                 }
10446                 else
10447                 {
10448                     break;
10449                 }
10450
10451             case CEE_JMP:
10452
10453                 assert(!compIsForInlining());
10454
10455                 if (tiVerificationNeeded)
10456                 {
10457                     Verify(false, "Invalid opcode: CEE_JMP");
10458                 }
10459
10460                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10461                 {
10462                     /* CEE_JMP does not make sense in some "protected" regions. */
10463
10464                     BADCODE("Jmp not allowed in protected region");
10465                 }
10466
10467                 if (verCurrentState.esStackDepth != 0)
10468                 {
10469                     BADCODE("Stack must be empty after CEE_JMPs");
10470                 }
10471
10472                 _impResolveToken(CORINFO_TOKENKIND_Method);
10473
10474                 JITDUMP(" %08X", resolvedToken.token);
10475
10476                 /* The signature of the target has to be identical to ours.
10477                    At least check that argCnt and returnType match */
10478
10479                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10480                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10481                     sig.retType != info.compMethodInfo->args.retType ||
10482                     sig.callConv != info.compMethodInfo->args.callConv)
10483                 {
10484                     BADCODE("Incompatible target for CEE_JMPs");
10485                 }
10486
10487 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10488
10489                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10490
10491                 /* Mark the basic block as being a JUMP instead of RETURN */
10492
10493                 block->bbFlags |= BBF_HAS_JMP;
10494
10495                 /* Set this flag to make sure register arguments have a location assigned
10496                  * even if we don't use them inside the method */
10497
10498                 compJmpOpUsed = true;
10499
10500                 fgNoStructPromotion = true;
10501
10502                 goto APPEND;
10503
10504 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10505
10506                 // Import this just like a series of LDARGs + tail. + call + ret
10507
10508                 if (info.compIsVarArgs)
10509                 {
10510                     // For now we don't implement true tail calls, so this breaks varargs.
10511                     // So warn the user instead of generating bad code.
10512                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10513                     // implement true tail calls.
10514                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10515                 }
10516
10517                 // First load up the arguments (0 - N)
10518                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10519                 {
10520                     impLoadArg(argNum, opcodeOffs + sz + 1);
10521                 }
10522
10523                 // Now generate the tail call
10524                 noway_assert(prefixFlags == 0);
10525                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10526                 opcode      = CEE_CALL;
10527
10528                 eeGetCallInfo(&resolvedToken, NULL,
10529                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10530
10531                 // All calls and delegates need a security callout.
10532                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10533
10534                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10535                                         opcodeOffs);
10536
10537                 // And finish with the ret
10538                 goto RET;
10539
10540 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10541
10542             case CEE_LDELEMA:
10543                 assertImp(sz == sizeof(unsigned));
10544
10545                 _impResolveToken(CORINFO_TOKENKIND_Class);
10546
10547                 JITDUMP(" %08X", resolvedToken.token);
10548
10549                 ldelemClsHnd = resolvedToken.hClass;
10550
10551                 if (tiVerificationNeeded)
10552                 {
10553                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10554                     typeInfo tiIndex = impStackTop().seTypeInfo;
10555
10556                     // As per ECMA 'index' specified can be either int32 or native int.
10557                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10558
10559                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10560                     Verify(tiArray.IsNullObjRef() ||
10561                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10562                            "bad array");
10563
10564                     tiRetVal = arrayElemType;
10565                     tiRetVal.MakeByRef();
10566                     if (prefixFlags & PREFIX_READONLY)
10567                     {
10568                         tiRetVal.SetIsReadonlyByRef();
10569                     }
10570
10571                     // an array interior pointer is always in the heap
10572                     tiRetVal.SetIsPermanentHomeByRef();
10573                 }
10574
10575                 // If it's a value class array we just do a simple address-of
10576                 if (eeIsValueClass(ldelemClsHnd))
10577                 {
10578                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10579                     if (cit == CORINFO_TYPE_UNDEF)
10580                     {
10581                         lclTyp = TYP_STRUCT;
10582                     }
10583                     else
10584                     {
10585                         lclTyp = JITtype2varType(cit);
10586                     }
10587                     goto ARR_LD_POST_VERIFY;
10588                 }
10589
10590                 // Similarly, if its a readonly access, we can do a simple address-of
10591                 // without doing a runtime type-check
10592                 if (prefixFlags & PREFIX_READONLY)
10593                 {
10594                     lclTyp = TYP_REF;
10595                     goto ARR_LD_POST_VERIFY;
10596                 }
10597
10598                 // Otherwise we need the full helper function with run-time type check
10599                 op1 = impTokenToHandle(&resolvedToken);
10600                 if (op1 == nullptr)
10601                 { // compDonotInline()
10602                     return;
10603                 }
10604
10605                 args = gtNewArgList(op1);                      // Type
10606                 args = gtNewListNode(impPopStack().val, args); // index
10607                 args = gtNewListNode(impPopStack().val, args); // array
10608                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10609
10610                 impPushOnStack(op1, tiRetVal);
10611                 break;
10612
10613             // ldelem for reference and value types
10614             case CEE_LDELEM:
10615                 assertImp(sz == sizeof(unsigned));
10616
10617                 _impResolveToken(CORINFO_TOKENKIND_Class);
10618
10619                 JITDUMP(" %08X", resolvedToken.token);
10620
10621                 ldelemClsHnd = resolvedToken.hClass;
10622
10623                 if (tiVerificationNeeded)
10624                 {
10625                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10626                     typeInfo tiIndex = impStackTop().seTypeInfo;
10627
10628                     // As per ECMA 'index' specified can be either int32 or native int.
10629                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10630                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10631
10632                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10633                            "type of array incompatible with type operand");
10634                     tiRetVal.NormaliseForStack();
10635                 }
10636
10637                 // If it's a reference type or generic variable type
10638                 // then just generate code as though it's a ldelem.ref instruction
10639                 if (!eeIsValueClass(ldelemClsHnd))
10640                 {
10641                     lclTyp = TYP_REF;
10642                     opcode = CEE_LDELEM_REF;
10643                 }
10644                 else
10645                 {
10646                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10647                     lclTyp             = JITtype2varType(jitTyp);
10648                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10649                     tiRetVal.NormaliseForStack();
10650                 }
10651                 goto ARR_LD_POST_VERIFY;
10652
10653             case CEE_LDELEM_I1:
10654                 lclTyp = TYP_BYTE;
10655                 goto ARR_LD;
10656             case CEE_LDELEM_I2:
10657                 lclTyp = TYP_SHORT;
10658                 goto ARR_LD;
10659             case CEE_LDELEM_I:
10660                 lclTyp = TYP_I_IMPL;
10661                 goto ARR_LD;
10662
10663             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10664             // and treating it as TYP_INT avoids other asserts.
10665             case CEE_LDELEM_U4:
10666                 lclTyp = TYP_INT;
10667                 goto ARR_LD;
10668
10669             case CEE_LDELEM_I4:
10670                 lclTyp = TYP_INT;
10671                 goto ARR_LD;
10672             case CEE_LDELEM_I8:
10673                 lclTyp = TYP_LONG;
10674                 goto ARR_LD;
10675             case CEE_LDELEM_REF:
10676                 lclTyp = TYP_REF;
10677                 goto ARR_LD;
10678             case CEE_LDELEM_R4:
10679                 lclTyp = TYP_FLOAT;
10680                 goto ARR_LD;
10681             case CEE_LDELEM_R8:
10682                 lclTyp = TYP_DOUBLE;
10683                 goto ARR_LD;
10684             case CEE_LDELEM_U1:
10685                 lclTyp = TYP_UBYTE;
10686                 goto ARR_LD;
10687             case CEE_LDELEM_U2:
10688                 lclTyp = TYP_CHAR;
10689                 goto ARR_LD;
10690
10691             ARR_LD:
10692
10693                 if (tiVerificationNeeded)
10694                 {
10695                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10696                     typeInfo tiIndex = impStackTop().seTypeInfo;
10697
10698                     // As per ECMA 'index' specified can be either int32 or native int.
10699                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10700                     if (tiArray.IsNullObjRef())
10701                     {
10702                         if (lclTyp == TYP_REF)
10703                         { // we will say a deref of a null array yields a null ref
10704                             tiRetVal = typeInfo(TI_NULL);
10705                         }
10706                         else
10707                         {
10708                             tiRetVal = typeInfo(lclTyp);
10709                         }
10710                     }
10711                     else
10712                     {
10713                         tiRetVal             = verGetArrayElemType(tiArray);
10714                         typeInfo arrayElemTi = typeInfo(lclTyp);
10715 #ifdef _TARGET_64BIT_
10716                         if (opcode == CEE_LDELEM_I)
10717                         {
10718                             arrayElemTi = typeInfo::nativeInt();
10719                         }
10720
10721                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10722                         {
10723                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10724                         }
10725                         else
10726 #endif // _TARGET_64BIT_
10727                         {
10728                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10729                         }
10730                     }
10731                     tiRetVal.NormaliseForStack();
10732                 }
10733             ARR_LD_POST_VERIFY:
10734
10735                 /* Pull the index value and array address */
10736                 op2 = impPopStack().val;
10737                 op1 = impPopStack().val;
10738                 assertImp(op1->gtType == TYP_REF);
10739
10740                 /* Check for null pointer - in the inliner case we simply abort */
10741
10742                 if (compIsForInlining())
10743                 {
10744                     if (op1->gtOper == GT_CNS_INT)
10745                     {
10746                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10747                         return;
10748                     }
10749                 }
10750
10751                 op1 = impCheckForNullPointer(op1);
10752
10753                 /* Mark the block as containing an index expression */
10754
10755                 if (op1->gtOper == GT_LCL_VAR)
10756                 {
10757                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10758                     {
10759                         block->bbFlags |= BBF_HAS_IDX_LEN;
10760                         optMethodFlags |= OMF_HAS_ARRAYREF;
10761                     }
10762                 }
10763
10764                 /* Create the index node and push it on the stack */
10765
10766                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10767
10768                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10769
10770                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10771                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10772                 {
10773                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10774
10775                     // remember the element size
10776                     if (lclTyp == TYP_REF)
10777                     {
10778                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10779                     }
10780                     else
10781                     {
10782                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10783                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10784                         {
10785                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10786                         }
10787                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10788                         if (lclTyp == TYP_STRUCT)
10789                         {
10790                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10791                             op1->gtIndex.gtIndElemSize = size;
10792                             op1->gtType                = lclTyp;
10793                         }
10794                     }
10795
10796                     if ((opcode == CEE_LDELEMA) || ldstruct)
10797                     {
10798                         // wrap it in a &
10799                         lclTyp = TYP_BYREF;
10800
10801                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10802                     }
10803                     else
10804                     {
10805                         assert(lclTyp != TYP_STRUCT);
10806                     }
10807                 }
10808
10809                 if (ldstruct)
10810                 {
10811                     // Create an OBJ for the result
10812                     op1 = gtNewObjNode(ldelemClsHnd, op1);
10813                     op1->gtFlags |= GTF_EXCEPT;
10814                 }
10815                 impPushOnStack(op1, tiRetVal);
10816                 break;
10817
10818             // stelem for reference and value types
10819             case CEE_STELEM:
10820
10821                 assertImp(sz == sizeof(unsigned));
10822
10823                 _impResolveToken(CORINFO_TOKENKIND_Class);
10824
10825                 JITDUMP(" %08X", resolvedToken.token);
10826
10827                 stelemClsHnd = resolvedToken.hClass;
10828
10829                 if (tiVerificationNeeded)
10830                 {
10831                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10832                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10833                     typeInfo tiValue = impStackTop().seTypeInfo;
10834
10835                     // As per ECMA 'index' specified can be either int32 or native int.
10836                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10837                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10838
10839                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10840                            "type operand incompatible with array element type");
10841                     arrayElem.NormaliseForStack();
10842                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10843                 }
10844
10845                 // If it's a reference type just behave as though it's a stelem.ref instruction
10846                 if (!eeIsValueClass(stelemClsHnd))
10847                 {
10848                     goto STELEM_REF_POST_VERIFY;
10849                 }
10850
10851                 // Otherwise extract the type
10852                 {
10853                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10854                     lclTyp             = JITtype2varType(jitTyp);
10855                     goto ARR_ST_POST_VERIFY;
10856                 }
10857
10858             case CEE_STELEM_REF:
10859
10860                 if (tiVerificationNeeded)
10861                 {
10862                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10863                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10864                     typeInfo tiValue = impStackTop().seTypeInfo;
10865
10866                     // As per ECMA 'index' specified can be either int32 or native int.
10867                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10868                     Verify(tiValue.IsObjRef(), "bad value");
10869
10870                     // we only check that it is an object referece, The helper does additional checks
10871                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10872                 }
10873
10874                 arrayNodeTo      = impStackTop(2).val;
10875                 arrayNodeToIndex = impStackTop(1).val;
10876                 arrayNodeFrom    = impStackTop().val;
10877
10878                 //
10879                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10880                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10881                 //
10882
10883                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10884                 // This does not need CORINFO_HELP_ARRADDR_ST
10885
10886                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10887                     arrayNodeTo->gtOper == GT_LCL_VAR &&
10888                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10889                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10890                 {
10891                     lclTyp = TYP_REF;
10892                     goto ARR_ST_POST_VERIFY;
10893                 }
10894
10895                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10896
10897                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10898                 {
10899                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10900
10901                     lclTyp = TYP_REF;
10902                     goto ARR_ST_POST_VERIFY;
10903                 }
10904
10905             STELEM_REF_POST_VERIFY:
10906
10907                 /* Call a helper function to do the assignment */
10908                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10909
10910                 goto SPILL_APPEND;
10911
10912             case CEE_STELEM_I1:
10913                 lclTyp = TYP_BYTE;
10914                 goto ARR_ST;
10915             case CEE_STELEM_I2:
10916                 lclTyp = TYP_SHORT;
10917                 goto ARR_ST;
10918             case CEE_STELEM_I:
10919                 lclTyp = TYP_I_IMPL;
10920                 goto ARR_ST;
10921             case CEE_STELEM_I4:
10922                 lclTyp = TYP_INT;
10923                 goto ARR_ST;
10924             case CEE_STELEM_I8:
10925                 lclTyp = TYP_LONG;
10926                 goto ARR_ST;
10927             case CEE_STELEM_R4:
10928                 lclTyp = TYP_FLOAT;
10929                 goto ARR_ST;
10930             case CEE_STELEM_R8:
10931                 lclTyp = TYP_DOUBLE;
10932                 goto ARR_ST;
10933
10934             ARR_ST:
10935
10936                 if (tiVerificationNeeded)
10937                 {
10938                     typeInfo tiArray = impStackTop(2).seTypeInfo;
10939                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
10940                     typeInfo tiValue = impStackTop().seTypeInfo;
10941
10942                     // As per ECMA 'index' specified can be either int32 or native int.
10943                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10944                     typeInfo arrayElem = typeInfo(lclTyp);
10945 #ifdef _TARGET_64BIT_
10946                     if (opcode == CEE_STELEM_I)
10947                     {
10948                         arrayElem = typeInfo::nativeInt();
10949                     }
10950 #endif // _TARGET_64BIT_
10951                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10952                            "bad array");
10953
10954                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10955                            "bad value");
10956                 }
10957
10958             ARR_ST_POST_VERIFY:
10959                 /* The strict order of evaluation is LHS-operands, RHS-operands,
10960                    range-check, and then assignment. However, codegen currently
10961                    does the range-check before evaluation the RHS-operands. So to
10962                    maintain strict ordering, we spill the stack. */
10963
10964                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10965                 {
10966                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10967                                                    "Strict ordering of exceptions for Array store"));
10968                 }
10969
10970                 /* Pull the new value from the stack */
10971                 op2 = impPopStack().val;
10972
10973                 /* Pull the index value */
10974                 op1 = impPopStack().val;
10975
10976                 /* Pull the array address */
10977                 op3 = impPopStack().val;
10978
10979                 assertImp(op3->gtType == TYP_REF);
10980                 if (op2->IsVarAddr())
10981                 {
10982                     op2->gtType = TYP_I_IMPL;
10983                 }
10984
10985                 op3 = impCheckForNullPointer(op3);
10986
10987                 // Mark the block as containing an index expression
10988
10989                 if (op3->gtOper == GT_LCL_VAR)
10990                 {
10991                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10992                     {
10993                         block->bbFlags |= BBF_HAS_IDX_LEN;
10994                         optMethodFlags |= OMF_HAS_ARRAYREF;
10995                     }
10996                 }
10997
10998                 /* Create the index node */
10999
11000                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11001
11002                 /* Create the assignment node and append it */
11003
11004                 if (lclTyp == TYP_STRUCT)
11005                 {
11006                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11007
11008                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11009                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11010                 }
11011                 if (varTypeIsStruct(op1))
11012                 {
11013                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11014                 }
11015                 else
11016                 {
11017                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11018                     op1 = gtNewAssignNode(op1, op2);
11019                 }
11020
11021                 /* Mark the expression as containing an assignment */
11022
11023                 op1->gtFlags |= GTF_ASG;
11024
11025                 goto SPILL_APPEND;
11026
11027             case CEE_ADD:
11028                 oper = GT_ADD;
11029                 goto MATH_OP2;
11030
11031             case CEE_ADD_OVF:
11032                 uns = false;
11033                 goto ADD_OVF;
11034             case CEE_ADD_OVF_UN:
11035                 uns = true;
11036                 goto ADD_OVF;
11037
11038             ADD_OVF:
11039                 ovfl     = true;
11040                 callNode = false;
11041                 oper     = GT_ADD;
11042                 goto MATH_OP2_FLAGS;
11043
11044             case CEE_SUB:
11045                 oper = GT_SUB;
11046                 goto MATH_OP2;
11047
11048             case CEE_SUB_OVF:
11049                 uns = false;
11050                 goto SUB_OVF;
11051             case CEE_SUB_OVF_UN:
11052                 uns = true;
11053                 goto SUB_OVF;
11054
11055             SUB_OVF:
11056                 ovfl     = true;
11057                 callNode = false;
11058                 oper     = GT_SUB;
11059                 goto MATH_OP2_FLAGS;
11060
11061             case CEE_MUL:
11062                 oper = GT_MUL;
11063                 goto MATH_MAYBE_CALL_NO_OVF;
11064
11065             case CEE_MUL_OVF:
11066                 uns = false;
11067                 goto MUL_OVF;
11068             case CEE_MUL_OVF_UN:
11069                 uns = true;
11070                 goto MUL_OVF;
11071
11072             MUL_OVF:
11073                 ovfl = true;
11074                 oper = GT_MUL;
11075                 goto MATH_MAYBE_CALL_OVF;
11076
11077             // Other binary math operations
11078
11079             case CEE_DIV:
11080                 oper = GT_DIV;
11081                 goto MATH_MAYBE_CALL_NO_OVF;
11082
11083             case CEE_DIV_UN:
11084                 oper = GT_UDIV;
11085                 goto MATH_MAYBE_CALL_NO_OVF;
11086
11087             case CEE_REM:
11088                 oper = GT_MOD;
11089                 goto MATH_MAYBE_CALL_NO_OVF;
11090
11091             case CEE_REM_UN:
11092                 oper = GT_UMOD;
11093                 goto MATH_MAYBE_CALL_NO_OVF;
11094
11095             MATH_MAYBE_CALL_NO_OVF:
11096                 ovfl = false;
11097             MATH_MAYBE_CALL_OVF:
11098                 // Morpher has some complex logic about when to turn different
11099                 // typed nodes on different platforms into helper calls. We
11100                 // need to either duplicate that logic here, or just
11101                 // pessimistically make all the nodes large enough to become
11102                 // call nodes.  Since call nodes aren't that much larger and
11103                 // these opcodes are infrequent enough I chose the latter.
11104                 callNode = true;
11105                 goto MATH_OP2_FLAGS;
11106
11107             case CEE_AND:
11108                 oper = GT_AND;
11109                 goto MATH_OP2;
11110             case CEE_OR:
11111                 oper = GT_OR;
11112                 goto MATH_OP2;
11113             case CEE_XOR:
11114                 oper = GT_XOR;
11115                 goto MATH_OP2;
11116
11117             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11118
11119                 ovfl     = false;
11120                 callNode = false;
11121
11122             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11123
11124                 /* Pull two values and push back the result */
11125
11126                 if (tiVerificationNeeded)
11127                 {
11128                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11129                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11130
11131                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11132                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11133                     {
11134                         Verify(tiOp1.IsNumberType(), "not number");
11135                     }
11136                     else
11137                     {
11138                         Verify(tiOp1.IsIntegerType(), "not integer");
11139                     }
11140
11141                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11142
11143                     tiRetVal = tiOp1;
11144
11145 #ifdef _TARGET_64BIT_
11146                     if (tiOp2.IsNativeIntType())
11147                     {
11148                         tiRetVal = tiOp2;
11149                     }
11150 #endif // _TARGET_64BIT_
11151                 }
11152
11153                 op2 = impPopStack().val;
11154                 op1 = impPopStack().val;
11155
11156 #if !CPU_HAS_FP_SUPPORT
11157                 if (varTypeIsFloating(op1->gtType))
11158                 {
11159                     callNode = true;
11160                 }
11161 #endif
11162                 /* Can't do arithmetic with references */
11163                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11164
11165                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11166                 // if it is in the stack)
11167                 impBashVarAddrsToI(op1, op2);
11168
11169                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11170
11171                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11172
11173                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11174
11175                 if (op2->gtOper == GT_CNS_INT)
11176                 {
11177                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11178                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11179
11180                     {
11181                         impPushOnStack(op1, tiRetVal);
11182                         break;
11183                     }
11184                 }
11185
11186 #if !FEATURE_X87_DOUBLES
11187                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11188                 //
11189                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11190                 {
11191                     if (op1->TypeGet() != type)
11192                     {
11193                         // We insert a cast of op1 to 'type'
11194                         op1 = gtNewCastNode(type, op1, type);
11195                     }
11196                     if (op2->TypeGet() != type)
11197                     {
11198                         // We insert a cast of op2 to 'type'
11199                         op2 = gtNewCastNode(type, op2, type);
11200                     }
11201                 }
11202 #endif // !FEATURE_X87_DOUBLES
11203
11204 #if SMALL_TREE_NODES
11205                 if (callNode)
11206                 {
11207                     /* These operators can later be transformed into 'GT_CALL' */
11208
11209                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11210 #ifndef _TARGET_ARM_
11211                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11212                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11213                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11214                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11215 #endif
11216                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11217                     // that we'll need to transform into a general large node, but rather specifically
11218                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11219                     // and a CALL is no longer the largest.
11220                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11221                     // than an "if".
11222                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11223                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11224                 }
11225                 else
11226 #endif // SMALL_TREE_NODES
11227                 {
11228                     op1 = gtNewOperNode(oper, type, op1, op2);
11229                 }
11230
11231                 /* Special case: integer/long division may throw an exception */
11232
11233                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11234                 {
11235                     op1->gtFlags |= GTF_EXCEPT;
11236                 }
11237
11238                 if (ovfl)
11239                 {
11240                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11241                     if (ovflType != TYP_UNKNOWN)
11242                     {
11243                         op1->gtType = ovflType;
11244                     }
11245                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11246                     if (uns)
11247                     {
11248                         op1->gtFlags |= GTF_UNSIGNED;
11249                     }
11250                 }
11251
11252                 impPushOnStack(op1, tiRetVal);
11253                 break;
11254
11255             case CEE_SHL:
11256                 oper = GT_LSH;
11257                 goto CEE_SH_OP2;
11258
11259             case CEE_SHR:
11260                 oper = GT_RSH;
11261                 goto CEE_SH_OP2;
11262             case CEE_SHR_UN:
11263                 oper = GT_RSZ;
11264                 goto CEE_SH_OP2;
11265
11266             CEE_SH_OP2:
11267                 if (tiVerificationNeeded)
11268                 {
11269                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11270                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11271                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11272                     tiRetVal = tiVal;
11273                 }
11274                 op2 = impPopStack().val;
11275                 op1 = impPopStack().val; // operand to be shifted
11276                 impBashVarAddrsToI(op1, op2);
11277
11278                 type = genActualType(op1->TypeGet());
11279                 op1  = gtNewOperNode(oper, type, op1, op2);
11280
11281                 impPushOnStack(op1, tiRetVal);
11282                 break;
11283
11284             case CEE_NOT:
11285                 if (tiVerificationNeeded)
11286                 {
11287                     tiRetVal = impStackTop().seTypeInfo;
11288                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11289                 }
11290
11291                 op1 = impPopStack().val;
11292                 impBashVarAddrsToI(op1, nullptr);
11293                 type = genActualType(op1->TypeGet());
11294                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11295                 break;
11296
11297             case CEE_CKFINITE:
11298                 if (tiVerificationNeeded)
11299                 {
11300                     tiRetVal = impStackTop().seTypeInfo;
11301                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11302                 }
11303                 op1  = impPopStack().val;
11304                 type = op1->TypeGet();
11305                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11306                 op1->gtFlags |= GTF_EXCEPT;
11307
11308                 impPushOnStack(op1, tiRetVal);
11309                 break;
11310
11311             case CEE_LEAVE:
11312
11313                 val     = getI4LittleEndian(codeAddr); // jump distance
11314                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11315                 goto LEAVE;
11316
11317             case CEE_LEAVE_S:
11318                 val     = getI1LittleEndian(codeAddr); // jump distance
11319                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11320
11321             LEAVE:
11322
11323                 if (compIsForInlining())
11324                 {
11325                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11326                     return;
11327                 }
11328
11329                 JITDUMP(" %04X", jmpAddr);
11330                 if (block->bbJumpKind != BBJ_LEAVE)
11331                 {
11332                     impResetLeaveBlock(block, jmpAddr);
11333                 }
11334
11335                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11336                 impImportLeave(block);
11337                 impNoteBranchOffs();
11338
11339                 break;
11340
11341             case CEE_BR:
11342             case CEE_BR_S:
11343                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11344
11345                 if (compIsForInlining() && jmpDist == 0)
11346                 {
11347                     break; /* NOP */
11348                 }
11349
11350                 impNoteBranchOffs();
11351                 break;
11352
11353             case CEE_BRTRUE:
11354             case CEE_BRTRUE_S:
11355             case CEE_BRFALSE:
11356             case CEE_BRFALSE_S:
11357
11358                 /* Pop the comparand (now there's a neat term) from the stack */
11359                 if (tiVerificationNeeded)
11360                 {
11361                     typeInfo& tiVal = impStackTop().seTypeInfo;
11362                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11363                            "bad value");
11364                 }
11365
11366                 op1  = impPopStack().val;
11367                 type = op1->TypeGet();
11368
11369                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11370                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11371                 {
11372                     block->bbJumpKind = BBJ_NONE;
11373
11374                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11375                     {
11376                         op1 = gtUnusedValNode(op1);
11377                         goto SPILL_APPEND;
11378                     }
11379                     else
11380                     {
11381                         break;
11382                     }
11383                 }
11384
11385                 if (op1->OperIsCompare())
11386                 {
11387                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11388                     {
11389                         // Flip the sense of the compare
11390
11391                         op1 = gtReverseCond(op1);
11392                     }
11393                 }
11394                 else
11395                 {
11396                     /* We'll compare against an equally-sized integer 0 */
11397                     /* For small types, we always compare against int   */
11398                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11399
11400                     /* Create the comparison operator and try to fold it */
11401
11402                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11403                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11404                 }
11405
11406             // fall through
11407
11408             COND_JUMP:
11409
11410                 /* Fold comparison if we can */
11411
11412                 op1 = gtFoldExpr(op1);
11413
11414                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11415                 /* Don't make any blocks unreachable in import only mode */
11416
11417                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11418                 {
11419                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11420                        unreachable under compDbgCode */
11421                     assert(!opts.compDbgCode);
11422
11423                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11424                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11425                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11426                                                                          // block for the second time
11427
11428                     block->bbJumpKind = foldedJumpKind;
11429 #ifdef DEBUG
11430                     if (verbose)
11431                     {
11432                         if (op1->gtIntCon.gtIconVal)
11433                         {
11434                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11435                                    block->bbJumpDest->bbNum);
11436                         }
11437                         else
11438                         {
11439                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11440                         }
11441                     }
11442 #endif
11443                     break;
11444                 }
11445
11446                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11447
11448                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11449                    in impImportBlock(block). For correct line numbers, spill stack. */
11450
11451                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11452                 {
11453                     impSpillStackEnsure(true);
11454                 }
11455
11456                 goto SPILL_APPEND;
11457
11458             case CEE_CEQ:
11459                 oper = GT_EQ;
11460                 uns  = false;
11461                 goto CMP_2_OPs;
11462             case CEE_CGT_UN:
11463                 oper = GT_GT;
11464                 uns  = true;
11465                 goto CMP_2_OPs;
11466             case CEE_CGT:
11467                 oper = GT_GT;
11468                 uns  = false;
11469                 goto CMP_2_OPs;
11470             case CEE_CLT_UN:
11471                 oper = GT_LT;
11472                 uns  = true;
11473                 goto CMP_2_OPs;
11474             case CEE_CLT:
11475                 oper = GT_LT;
11476                 uns  = false;
11477                 goto CMP_2_OPs;
11478
11479             CMP_2_OPs:
11480                 if (tiVerificationNeeded)
11481                 {
11482                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11483                     tiRetVal = typeInfo(TI_INT);
11484                 }
11485
11486                 op2 = impPopStack().val;
11487                 op1 = impPopStack().val;
11488
11489 #ifdef _TARGET_64BIT_
11490                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11491                 {
11492                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11493                 }
11494                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11495                 {
11496                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11497                 }
11498 #endif // _TARGET_64BIT_
11499
11500                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11501                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11502                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11503
11504                 /* Create the comparison node */
11505
11506                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11507
11508                 /* TODO: setting both flags when only one is appropriate */
11509                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11510                 {
11511                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11512                 }
11513
11514                 impPushOnStack(op1, tiRetVal);
11515                 break;
11516
11517             case CEE_BEQ_S:
11518             case CEE_BEQ:
11519                 oper = GT_EQ;
11520                 goto CMP_2_OPs_AND_BR;
11521
11522             case CEE_BGE_S:
11523             case CEE_BGE:
11524                 oper = GT_GE;
11525                 goto CMP_2_OPs_AND_BR;
11526
11527             case CEE_BGE_UN_S:
11528             case CEE_BGE_UN:
11529                 oper = GT_GE;
11530                 goto CMP_2_OPs_AND_BR_UN;
11531
11532             case CEE_BGT_S:
11533             case CEE_BGT:
11534                 oper = GT_GT;
11535                 goto CMP_2_OPs_AND_BR;
11536
11537             case CEE_BGT_UN_S:
11538             case CEE_BGT_UN:
11539                 oper = GT_GT;
11540                 goto CMP_2_OPs_AND_BR_UN;
11541
11542             case CEE_BLE_S:
11543             case CEE_BLE:
11544                 oper = GT_LE;
11545                 goto CMP_2_OPs_AND_BR;
11546
11547             case CEE_BLE_UN_S:
11548             case CEE_BLE_UN:
11549                 oper = GT_LE;
11550                 goto CMP_2_OPs_AND_BR_UN;
11551
11552             case CEE_BLT_S:
11553             case CEE_BLT:
11554                 oper = GT_LT;
11555                 goto CMP_2_OPs_AND_BR;
11556
11557             case CEE_BLT_UN_S:
11558             case CEE_BLT_UN:
11559                 oper = GT_LT;
11560                 goto CMP_2_OPs_AND_BR_UN;
11561
11562             case CEE_BNE_UN_S:
11563             case CEE_BNE_UN:
11564                 oper = GT_NE;
11565                 goto CMP_2_OPs_AND_BR_UN;
11566
11567             CMP_2_OPs_AND_BR_UN:
11568                 uns       = true;
11569                 unordered = true;
11570                 goto CMP_2_OPs_AND_BR_ALL;
11571             CMP_2_OPs_AND_BR:
11572                 uns       = false;
11573                 unordered = false;
11574                 goto CMP_2_OPs_AND_BR_ALL;
11575             CMP_2_OPs_AND_BR_ALL:
11576
11577                 if (tiVerificationNeeded)
11578                 {
11579                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11580                 }
11581
11582                 /* Pull two values */
11583                 op2 = impPopStack().val;
11584                 op1 = impPopStack().val;
11585
11586 #ifdef _TARGET_64BIT_
11587                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11588                 {
11589                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11590                 }
11591                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11592                 {
11593                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11594                 }
11595 #endif // _TARGET_64BIT_
11596
11597                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11598                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11599                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11600
11601                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11602                 {
11603                     block->bbJumpKind = BBJ_NONE;
11604
11605                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11606                     {
11607                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11608                                                        "Branch to next Optimization, op1 side effect"));
11609                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11610                     }
11611                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11612                     {
11613                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11614                                                        "Branch to next Optimization, op2 side effect"));
11615                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11616                     }
11617
11618 #ifdef DEBUG
11619                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11620                     {
11621                         impNoteLastILoffs();
11622                     }
11623 #endif
11624                     break;
11625                 }
11626 #if !FEATURE_X87_DOUBLES
11627                 // We can generate an compare of different sized floating point op1 and op2
11628                 // We insert a cast
11629                 //
11630                 if (varTypeIsFloating(op1->TypeGet()))
11631                 {
11632                     if (op1->TypeGet() != op2->TypeGet())
11633                     {
11634                         assert(varTypeIsFloating(op2->TypeGet()));
11635
11636                         // say op1=double, op2=float. To avoid loss of precision
11637                         // while comparing, op2 is converted to double and double
11638                         // comparison is done.
11639                         if (op1->TypeGet() == TYP_DOUBLE)
11640                         {
11641                             // We insert a cast of op2 to TYP_DOUBLE
11642                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11643                         }
11644                         else if (op2->TypeGet() == TYP_DOUBLE)
11645                         {
11646                             // We insert a cast of op1 to TYP_DOUBLE
11647                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11648                         }
11649                     }
11650                 }
11651 #endif // !FEATURE_X87_DOUBLES
11652
11653                 /* Create and append the operator */
11654
11655                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11656
11657                 if (uns)
11658                 {
11659                     op1->gtFlags |= GTF_UNSIGNED;
11660                 }
11661
11662                 if (unordered)
11663                 {
11664                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11665                 }
11666
11667                 goto COND_JUMP;
11668
11669             case CEE_SWITCH:
11670                 assert(!compIsForInlining());
11671
11672                 if (tiVerificationNeeded)
11673                 {
11674                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11675                 }
11676                 /* Pop the switch value off the stack */
11677                 op1 = impPopStack().val;
11678                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11679
11680                 /* We can create a switch node */
11681
11682                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11683
11684                 val = (int)getU4LittleEndian(codeAddr);
11685                 codeAddr += 4 + val * 4; // skip over the switch-table
11686
11687                 goto SPILL_APPEND;
11688
11689             /************************** Casting OPCODES ***************************/
11690
11691             case CEE_CONV_OVF_I1:
11692                 lclTyp = TYP_BYTE;
11693                 goto CONV_OVF;
11694             case CEE_CONV_OVF_I2:
11695                 lclTyp = TYP_SHORT;
11696                 goto CONV_OVF;
11697             case CEE_CONV_OVF_I:
11698                 lclTyp = TYP_I_IMPL;
11699                 goto CONV_OVF;
11700             case CEE_CONV_OVF_I4:
11701                 lclTyp = TYP_INT;
11702                 goto CONV_OVF;
11703             case CEE_CONV_OVF_I8:
11704                 lclTyp = TYP_LONG;
11705                 goto CONV_OVF;
11706
11707             case CEE_CONV_OVF_U1:
11708                 lclTyp = TYP_UBYTE;
11709                 goto CONV_OVF;
11710             case CEE_CONV_OVF_U2:
11711                 lclTyp = TYP_CHAR;
11712                 goto CONV_OVF;
11713             case CEE_CONV_OVF_U:
11714                 lclTyp = TYP_U_IMPL;
11715                 goto CONV_OVF;
11716             case CEE_CONV_OVF_U4:
11717                 lclTyp = TYP_UINT;
11718                 goto CONV_OVF;
11719             case CEE_CONV_OVF_U8:
11720                 lclTyp = TYP_ULONG;
11721                 goto CONV_OVF;
11722
11723             case CEE_CONV_OVF_I1_UN:
11724                 lclTyp = TYP_BYTE;
11725                 goto CONV_OVF_UN;
11726             case CEE_CONV_OVF_I2_UN:
11727                 lclTyp = TYP_SHORT;
11728                 goto CONV_OVF_UN;
11729             case CEE_CONV_OVF_I_UN:
11730                 lclTyp = TYP_I_IMPL;
11731                 goto CONV_OVF_UN;
11732             case CEE_CONV_OVF_I4_UN:
11733                 lclTyp = TYP_INT;
11734                 goto CONV_OVF_UN;
11735             case CEE_CONV_OVF_I8_UN:
11736                 lclTyp = TYP_LONG;
11737                 goto CONV_OVF_UN;
11738
11739             case CEE_CONV_OVF_U1_UN:
11740                 lclTyp = TYP_UBYTE;
11741                 goto CONV_OVF_UN;
11742             case CEE_CONV_OVF_U2_UN:
11743                 lclTyp = TYP_CHAR;
11744                 goto CONV_OVF_UN;
11745             case CEE_CONV_OVF_U_UN:
11746                 lclTyp = TYP_U_IMPL;
11747                 goto CONV_OVF_UN;
11748             case CEE_CONV_OVF_U4_UN:
11749                 lclTyp = TYP_UINT;
11750                 goto CONV_OVF_UN;
11751             case CEE_CONV_OVF_U8_UN:
11752                 lclTyp = TYP_ULONG;
11753                 goto CONV_OVF_UN;
11754
11755             CONV_OVF_UN:
11756                 uns = true;
11757                 goto CONV_OVF_COMMON;
11758             CONV_OVF:
11759                 uns = false;
11760                 goto CONV_OVF_COMMON;
11761
11762             CONV_OVF_COMMON:
11763                 ovfl = true;
11764                 goto _CONV;
11765
11766             case CEE_CONV_I1:
11767                 lclTyp = TYP_BYTE;
11768                 goto CONV;
11769             case CEE_CONV_I2:
11770                 lclTyp = TYP_SHORT;
11771                 goto CONV;
11772             case CEE_CONV_I:
11773                 lclTyp = TYP_I_IMPL;
11774                 goto CONV;
11775             case CEE_CONV_I4:
11776                 lclTyp = TYP_INT;
11777                 goto CONV;
11778             case CEE_CONV_I8:
11779                 lclTyp = TYP_LONG;
11780                 goto CONV;
11781
11782             case CEE_CONV_U1:
11783                 lclTyp = TYP_UBYTE;
11784                 goto CONV;
11785             case CEE_CONV_U2:
11786                 lclTyp = TYP_CHAR;
11787                 goto CONV;
11788 #if (REGSIZE_BYTES == 8)
11789             case CEE_CONV_U:
11790                 lclTyp = TYP_U_IMPL;
11791                 goto CONV_UN;
11792 #else
11793             case CEE_CONV_U:
11794                 lclTyp = TYP_U_IMPL;
11795                 goto CONV;
11796 #endif
11797             case CEE_CONV_U4:
11798                 lclTyp = TYP_UINT;
11799                 goto CONV;
11800             case CEE_CONV_U8:
11801                 lclTyp = TYP_ULONG;
11802                 goto CONV_UN;
11803
11804             case CEE_CONV_R4:
11805                 lclTyp = TYP_FLOAT;
11806                 goto CONV;
11807             case CEE_CONV_R8:
11808                 lclTyp = TYP_DOUBLE;
11809                 goto CONV;
11810
11811             case CEE_CONV_R_UN:
11812                 lclTyp = TYP_DOUBLE;
11813                 goto CONV_UN;
11814
11815             CONV_UN:
11816                 uns  = true;
11817                 ovfl = false;
11818                 goto _CONV;
11819
11820             CONV:
11821                 uns  = false;
11822                 ovfl = false;
11823                 goto _CONV;
11824
11825             _CONV:
11826                 // just check that we have a number on the stack
11827                 if (tiVerificationNeeded)
11828                 {
11829                     const typeInfo& tiVal = impStackTop().seTypeInfo;
11830                     Verify(tiVal.IsNumberType(), "bad arg");
11831
11832 #ifdef _TARGET_64BIT_
11833                     bool isNative = false;
11834
11835                     switch (opcode)
11836                     {
11837                         case CEE_CONV_OVF_I:
11838                         case CEE_CONV_OVF_I_UN:
11839                         case CEE_CONV_I:
11840                         case CEE_CONV_OVF_U:
11841                         case CEE_CONV_OVF_U_UN:
11842                         case CEE_CONV_U:
11843                             isNative = true;
11844                         default:
11845                             // leave 'isNative' = false;
11846                             break;
11847                     }
11848                     if (isNative)
11849                     {
11850                         tiRetVal = typeInfo::nativeInt();
11851                     }
11852                     else
11853 #endif // _TARGET_64BIT_
11854                     {
11855                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11856                     }
11857                 }
11858
11859                 // only converts from FLOAT or DOUBLE to an integer type
11860                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11861
11862                 if (varTypeIsFloating(lclTyp))
11863                 {
11864                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11865 #ifdef _TARGET_64BIT_
11866                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11867                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
11868                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11869                                // and generate SSE2 code instead of going through helper calls.
11870                                || (impStackTop().val->TypeGet() == TYP_BYREF)
11871 #endif
11872                         ;
11873                 }
11874                 else
11875                 {
11876                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11877                 }
11878
11879                 // At this point uns, ovf, callNode all set
11880
11881                 op1 = impPopStack().val;
11882                 impBashVarAddrsToI(op1);
11883
11884                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11885                 {
11886                     op2 = op1->gtOp.gtOp2;
11887
11888                     if (op2->gtOper == GT_CNS_INT)
11889                     {
11890                         ssize_t ival = op2->gtIntCon.gtIconVal;
11891                         ssize_t mask, umask;
11892
11893                         switch (lclTyp)
11894                         {
11895                             case TYP_BYTE:
11896                             case TYP_UBYTE:
11897                                 mask  = 0x00FF;
11898                                 umask = 0x007F;
11899                                 break;
11900                             case TYP_CHAR:
11901                             case TYP_SHORT:
11902                                 mask  = 0xFFFF;
11903                                 umask = 0x7FFF;
11904                                 break;
11905
11906                             default:
11907                                 assert(!"unexpected type");
11908                                 return;
11909                         }
11910
11911                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11912                         {
11913                             /* Toss the cast, it's a waste of time */
11914
11915                             impPushOnStack(op1, tiRetVal);
11916                             break;
11917                         }
11918                         else if (ival == mask)
11919                         {
11920                             /* Toss the masking, it's a waste of time, since
11921                                we sign-extend from the small value anyways */
11922
11923                             op1 = op1->gtOp.gtOp1;
11924                         }
11925                     }
11926                 }
11927
11928                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
11929                     since the result of a cast to one of the 'small' integer
11930                     types is an integer.
11931                  */
11932
11933                 type = genActualType(lclTyp);
11934
11935 #if SMALL_TREE_NODES
11936                 if (callNode)
11937                 {
11938                     op1 = gtNewCastNodeL(type, op1, lclTyp);
11939                 }
11940                 else
11941 #endif // SMALL_TREE_NODES
11942                 {
11943                     op1 = gtNewCastNode(type, op1, lclTyp);
11944                 }
11945
11946                 if (ovfl)
11947                 {
11948                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11949                 }
11950                 if (uns)
11951                 {
11952                     op1->gtFlags |= GTF_UNSIGNED;
11953                 }
11954                 impPushOnStack(op1, tiRetVal);
11955                 break;
11956
11957             case CEE_NEG:
11958                 if (tiVerificationNeeded)
11959                 {
11960                     tiRetVal = impStackTop().seTypeInfo;
11961                     Verify(tiRetVal.IsNumberType(), "Bad arg");
11962                 }
11963
11964                 op1 = impPopStack().val;
11965                 impBashVarAddrsToI(op1, nullptr);
11966                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11967                 break;
11968
11969             case CEE_POP:
11970             {
11971                 /* Pull the top value from the stack */
11972
11973                 StackEntry se = impPopStack();
11974                 clsHnd        = se.seTypeInfo.GetClassHandle();
11975                 op1           = se.val;
11976
11977                 /* Get hold of the type of the value being duplicated */
11978
11979                 lclTyp = genActualType(op1->gtType);
11980
11981                 /* Does the value have any side effects? */
11982
11983                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11984                 {
11985                     // Since we are throwing away the value, just normalize
11986                     // it to its address.  This is more efficient.
11987
11988                     if (varTypeIsStruct(op1))
11989                     {
11990 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11991                         // Non-calls, such as obj or ret_expr, have to go through this.
11992                         // Calls with large struct return value have to go through this.
11993                         // Helper calls with small struct return value also have to go
11994                         // through this since they do not follow Unix calling convention.
11995                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11996                             op1->AsCall()->gtCallType == CT_HELPER)
11997 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11998                         {
11999                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12000                         }
12001                     }
12002
12003                     // If op1 is non-overflow cast, throw it away since it is useless.
12004                     // Another reason for throwing away the useless cast is in the context of
12005                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12006                     // The cast gets added as part of importing GT_CALL, which gets in the way
12007                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12008                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12009                     {
12010                         op1 = op1->gtOp.gtOp1;
12011                     }
12012
12013                     // If 'op1' is an expression, create an assignment node.
12014                     // Helps analyses (like CSE) to work fine.
12015
12016                     if (op1->gtOper != GT_CALL)
12017                     {
12018                         op1 = gtUnusedValNode(op1);
12019                     }
12020
12021                     /* Append the value to the tree list */
12022                     goto SPILL_APPEND;
12023                 }
12024
12025                 /* No side effects - just throw the <BEEP> thing away */
12026             }
12027             break;
12028
12029             case CEE_DUP:
12030             {
12031                 if (tiVerificationNeeded)
12032                 {
12033                     // Dup could start the begining of delegate creation sequence, remember that
12034                     delegateCreateStart = codeAddr - 1;
12035                     impStackTop(0);
12036                 }
12037
12038                 // If the expression to dup is simple, just clone it.
12039                 // Otherwise spill it to a temp, and reload the temp
12040                 // twice.
12041                 StackEntry se = impPopStack();
12042                 tiRetVal      = se.seTypeInfo;
12043                 op1           = se.val;
12044
12045                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12046                 {
12047                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12048                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12049                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12050                     op1            = gtNewLclvNode(tmpNum, type);
12051
12052                     // Propagate type info to the temp
12053                     if (type == TYP_REF)
12054                     {
12055                         lvaSetClass(tmpNum, op1, tiRetVal.GetClassHandle());
12056                     }
12057                 }
12058
12059                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12060                                    nullptr DEBUGARG("DUP instruction"));
12061
12062                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12063                 impPushOnStack(op1, tiRetVal);
12064                 impPushOnStack(op2, tiRetVal);
12065             }
12066             break;
12067
12068             case CEE_STIND_I1:
12069                 lclTyp = TYP_BYTE;
12070                 goto STIND;
12071             case CEE_STIND_I2:
12072                 lclTyp = TYP_SHORT;
12073                 goto STIND;
12074             case CEE_STIND_I4:
12075                 lclTyp = TYP_INT;
12076                 goto STIND;
12077             case CEE_STIND_I8:
12078                 lclTyp = TYP_LONG;
12079                 goto STIND;
12080             case CEE_STIND_I:
12081                 lclTyp = TYP_I_IMPL;
12082                 goto STIND;
12083             case CEE_STIND_REF:
12084                 lclTyp = TYP_REF;
12085                 goto STIND;
12086             case CEE_STIND_R4:
12087                 lclTyp = TYP_FLOAT;
12088                 goto STIND;
12089             case CEE_STIND_R8:
12090                 lclTyp = TYP_DOUBLE;
12091                 goto STIND;
12092             STIND:
12093
12094                 if (tiVerificationNeeded)
12095                 {
12096                     typeInfo instrType(lclTyp);
12097 #ifdef _TARGET_64BIT_
12098                     if (opcode == CEE_STIND_I)
12099                     {
12100                         instrType = typeInfo::nativeInt();
12101                     }
12102 #endif // _TARGET_64BIT_
12103                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12104                 }
12105                 else
12106                 {
12107                     compUnsafeCastUsed = true; // Have to go conservative
12108                 }
12109
12110             STIND_POST_VERIFY:
12111
12112                 op2 = impPopStack().val; // value to store
12113                 op1 = impPopStack().val; // address to store to
12114
12115                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12116                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12117
12118                 impBashVarAddrsToI(op1, op2);
12119
12120                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12121
12122 #ifdef _TARGET_64BIT_
12123                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12124                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12125                 {
12126                     op2->gtType = TYP_I_IMPL;
12127                 }
12128                 else
12129                 {
12130                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12131                     //
12132                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12133                     {
12134                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12135                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12136                     }
12137                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12138                     //
12139                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12140                     {
12141                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12142                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12143                     }
12144                 }
12145 #endif // _TARGET_64BIT_
12146
12147                 if (opcode == CEE_STIND_REF)
12148                 {
12149                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12150                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12151                     lclTyp = genActualType(op2->TypeGet());
12152                 }
12153
12154 // Check target type.
12155 #ifdef DEBUG
12156                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12157                 {
12158                     if (op2->gtType == TYP_BYREF)
12159                     {
12160                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12161                     }
12162                     else if (lclTyp == TYP_BYREF)
12163                     {
12164                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12165                     }
12166                 }
12167                 else
12168                 {
12169                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12170                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12171                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12172                 }
12173 #endif
12174
12175                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12176
12177                 // stind could point anywhere, example a boxed class static int
12178                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12179
12180                 if (prefixFlags & PREFIX_VOLATILE)
12181                 {
12182                     assert(op1->OperGet() == GT_IND);
12183                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12184                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12185                     op1->gtFlags |= GTF_IND_VOLATILE;
12186                 }
12187
12188                 if (prefixFlags & PREFIX_UNALIGNED)
12189                 {
12190                     assert(op1->OperGet() == GT_IND);
12191                     op1->gtFlags |= GTF_IND_UNALIGNED;
12192                 }
12193
12194                 op1 = gtNewAssignNode(op1, op2);
12195                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12196
12197                 // Spill side-effects AND global-data-accesses
12198                 if (verCurrentState.esStackDepth > 0)
12199                 {
12200                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12201                 }
12202
12203                 goto APPEND;
12204
12205             case CEE_LDIND_I1:
12206                 lclTyp = TYP_BYTE;
12207                 goto LDIND;
12208             case CEE_LDIND_I2:
12209                 lclTyp = TYP_SHORT;
12210                 goto LDIND;
12211             case CEE_LDIND_U4:
12212             case CEE_LDIND_I4:
12213                 lclTyp = TYP_INT;
12214                 goto LDIND;
12215             case CEE_LDIND_I8:
12216                 lclTyp = TYP_LONG;
12217                 goto LDIND;
12218             case CEE_LDIND_REF:
12219                 lclTyp = TYP_REF;
12220                 goto LDIND;
12221             case CEE_LDIND_I:
12222                 lclTyp = TYP_I_IMPL;
12223                 goto LDIND;
12224             case CEE_LDIND_R4:
12225                 lclTyp = TYP_FLOAT;
12226                 goto LDIND;
12227             case CEE_LDIND_R8:
12228                 lclTyp = TYP_DOUBLE;
12229                 goto LDIND;
12230             case CEE_LDIND_U1:
12231                 lclTyp = TYP_UBYTE;
12232                 goto LDIND;
12233             case CEE_LDIND_U2:
12234                 lclTyp = TYP_CHAR;
12235                 goto LDIND;
12236             LDIND:
12237
12238                 if (tiVerificationNeeded)
12239                 {
12240                     typeInfo lclTiType(lclTyp);
12241 #ifdef _TARGET_64BIT_
12242                     if (opcode == CEE_LDIND_I)
12243                     {
12244                         lclTiType = typeInfo::nativeInt();
12245                     }
12246 #endif // _TARGET_64BIT_
12247                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12248                     tiRetVal.NormaliseForStack();
12249                 }
12250                 else
12251                 {
12252                     compUnsafeCastUsed = true; // Have to go conservative
12253                 }
12254
12255             LDIND_POST_VERIFY:
12256
12257                 op1 = impPopStack().val; // address to load from
12258                 impBashVarAddrsToI(op1);
12259
12260 #ifdef _TARGET_64BIT_
12261                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12262                 //
12263                 if (genActualType(op1->gtType) == TYP_INT)
12264                 {
12265                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12266                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12267                 }
12268 #endif
12269
12270                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12271
12272                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12273
12274                 // ldind could point anywhere, example a boxed class static int
12275                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12276
12277                 if (prefixFlags & PREFIX_VOLATILE)
12278                 {
12279                     assert(op1->OperGet() == GT_IND);
12280                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12281                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12282                     op1->gtFlags |= GTF_IND_VOLATILE;
12283                 }
12284
12285                 if (prefixFlags & PREFIX_UNALIGNED)
12286                 {
12287                     assert(op1->OperGet() == GT_IND);
12288                     op1->gtFlags |= GTF_IND_UNALIGNED;
12289                 }
12290
12291                 impPushOnStack(op1, tiRetVal);
12292
12293                 break;
12294
12295             case CEE_UNALIGNED:
12296
12297                 assert(sz == 1);
12298                 val = getU1LittleEndian(codeAddr);
12299                 ++codeAddr;
12300                 JITDUMP(" %u", val);
12301                 if ((val != 1) && (val != 2) && (val != 4))
12302                 {
12303                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12304                 }
12305
12306                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12307                 prefixFlags |= PREFIX_UNALIGNED;
12308
12309                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12310
12311             PREFIX:
12312                 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12313                 codeAddr += sizeof(__int8);
12314                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12315                 goto DECODE_OPCODE;
12316
12317             case CEE_VOLATILE:
12318
12319                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12320                 prefixFlags |= PREFIX_VOLATILE;
12321
12322                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12323
12324                 assert(sz == 0);
12325                 goto PREFIX;
12326
12327             case CEE_LDFTN:
12328             {
12329                 // Need to do a lookup here so that we perform an access check
12330                 // and do a NOWAY if protections are violated
12331                 _impResolveToken(CORINFO_TOKENKIND_Method);
12332
12333                 JITDUMP(" %08X", resolvedToken.token);
12334
12335                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12336                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12337                               &callInfo);
12338
12339                 // This check really only applies to intrinsic Array.Address methods
12340                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12341                 {
12342                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12343                 }
12344
12345                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12346                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12347
12348                 if (tiVerificationNeeded)
12349                 {
12350                     // LDFTN could start the begining of delegate creation sequence, remember that
12351                     delegateCreateStart = codeAddr - 2;
12352
12353                     // check any constraints on the callee's class and type parameters
12354                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12355                                    "method has unsatisfied class constraints");
12356                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12357                                                                                 resolvedToken.hMethod),
12358                                    "method has unsatisfied method constraints");
12359
12360                     mflags = callInfo.verMethodFlags;
12361                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12362                 }
12363
12364             DO_LDFTN:
12365                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12366                 if (compDonotInline())
12367                 {
12368                     return;
12369                 }
12370
12371                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12372                 impPushOnStack(op1, typeInfo(heapToken));
12373
12374                 break;
12375             }
12376
12377             case CEE_LDVIRTFTN:
12378             {
12379                 /* Get the method token */
12380
12381                 _impResolveToken(CORINFO_TOKENKIND_Method);
12382
12383                 JITDUMP(" %08X", resolvedToken.token);
12384
12385                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12386                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12387                                                     CORINFO_CALLINFO_CALLVIRT)),
12388                               &callInfo);
12389
12390                 // This check really only applies to intrinsic Array.Address methods
12391                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12392                 {
12393                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12394                 }
12395
12396                 mflags = callInfo.methodFlags;
12397
12398                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12399
12400                 if (compIsForInlining())
12401                 {
12402                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12403                     {
12404                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12405                         return;
12406                     }
12407                 }
12408
12409                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12410
12411                 if (tiVerificationNeeded)
12412                 {
12413
12414                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12415                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12416
12417                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12418                     typeInfo declType =
12419                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12420
12421                     typeInfo arg = impStackTop().seTypeInfo;
12422                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12423                            "bad ldvirtftn");
12424
12425                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12426                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12427                     {
12428                         instanceClassHnd = arg.GetClassHandleForObjRef();
12429                     }
12430
12431                     // check any constraints on the method's class and type parameters
12432                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12433                                    "method has unsatisfied class constraints");
12434                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12435                                                                                 resolvedToken.hMethod),
12436                                    "method has unsatisfied method constraints");
12437
12438                     if (mflags & CORINFO_FLG_PROTECTED)
12439                     {
12440                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12441                                "Accessing protected method through wrong type.");
12442                     }
12443                 }
12444
12445                 /* Get the object-ref */
12446                 op1 = impPopStack().val;
12447                 assertImp(op1->gtType == TYP_REF);
12448
12449                 if (opts.IsReadyToRun())
12450                 {
12451                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12452                     {
12453                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12454                         {
12455                             op1 = gtUnusedValNode(op1);
12456                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12457                         }
12458                         goto DO_LDFTN;
12459                     }
12460                 }
12461                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12462                 {
12463                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12464                     {
12465                         op1 = gtUnusedValNode(op1);
12466                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12467                     }
12468                     goto DO_LDFTN;
12469                 }
12470
12471                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12472                 if (compDonotInline())
12473                 {
12474                     return;
12475                 }
12476
12477                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12478                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
12479                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
12480                 impPushOnStack(fptr, typeInfo(heapToken));
12481
12482                 break;
12483             }
12484
12485             case CEE_CONSTRAINED:
12486
12487                 assertImp(sz == sizeof(unsigned));
12488                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12489                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12490                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12491
12492                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12493                 prefixFlags |= PREFIX_CONSTRAINED;
12494
12495                 {
12496                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12497                     if (actualOpcode != CEE_CALLVIRT)
12498                     {
12499                         BADCODE("constrained. has to be followed by callvirt");
12500                     }
12501                 }
12502
12503                 goto PREFIX;
12504
12505             case CEE_READONLY:
12506                 JITDUMP(" readonly.");
12507
12508                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12509                 prefixFlags |= PREFIX_READONLY;
12510
12511                 {
12512                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12513                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12514                     {
12515                         BADCODE("readonly. has to be followed by ldelema or call");
12516                     }
12517                 }
12518
12519                 assert(sz == 0);
12520                 goto PREFIX;
12521
12522             case CEE_TAILCALL:
12523                 JITDUMP(" tail.");
12524
12525                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12526                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12527
12528                 {
12529                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12530                     if (!impOpcodeIsCallOpcode(actualOpcode))
12531                     {
12532                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12533                     }
12534                 }
12535                 assert(sz == 0);
12536                 goto PREFIX;
12537
12538             case CEE_NEWOBJ:
12539
12540                 /* Since we will implicitly insert newObjThisPtr at the start of the
12541                    argument list, spill any GTF_ORDER_SIDEEFF */
12542                 impSpillSpecialSideEff();
12543
12544                 /* NEWOBJ does not respond to TAIL */
12545                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12546
12547                 /* NEWOBJ does not respond to CONSTRAINED */
12548                 prefixFlags &= ~PREFIX_CONSTRAINED;
12549
12550                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12551
12552                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12553                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12554                               &callInfo);
12555
12556                 if (compIsForInlining())
12557                 {
12558                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12559                     {
12560                         // Check to see if this call violates the boundary.
12561                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12562                         return;
12563                     }
12564                 }
12565
12566                 mflags = callInfo.methodFlags;
12567
12568                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12569                 {
12570                     BADCODE("newobj on static or abstract method");
12571                 }
12572
12573                 // Insert the security callout before any actual code is generated
12574                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12575
12576                 // There are three different cases for new
12577                 // Object size is variable (depends on arguments)
12578                 //      1) Object is an array (arrays treated specially by the EE)
12579                 //      2) Object is some other variable sized object (e.g. String)
12580                 //      3) Class Size can be determined beforehand (normal case)
12581                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12582                 // in the second case we call the constructor with a '0' this pointer
12583                 // In the third case we alloc the memory, then call the constuctor
12584
12585                 clsFlags = callInfo.classFlags;
12586                 if (clsFlags & CORINFO_FLG_ARRAY)
12587                 {
12588                     if (tiVerificationNeeded)
12589                     {
12590                         CORINFO_CLASS_HANDLE elemTypeHnd;
12591                         INDEBUG(CorInfoType corType =)
12592                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12593                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12594                         Verify(elemTypeHnd == nullptr ||
12595                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12596                                "newarr of byref-like objects");
12597                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12598                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12599                                       &callInfo DEBUGARG(info.compFullName));
12600                     }
12601                     // Arrays need to call the NEWOBJ helper.
12602                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12603
12604                     impImportNewObjArray(&resolvedToken, &callInfo);
12605                     if (compDonotInline())
12606                     {
12607                         return;
12608                     }
12609
12610                     callTyp = TYP_REF;
12611                     break;
12612                 }
12613                 // At present this can only be String
12614                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12615                 {
12616                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12617                     {
12618                         // The dummy argument does not exist in CoreRT
12619                         newObjThisPtr = nullptr;
12620                     }
12621                     else
12622                     {
12623                         // This is the case for variable-sized objects that are not
12624                         // arrays.  In this case, call the constructor with a null 'this'
12625                         // pointer
12626                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12627                     }
12628
12629                     /* Remember that this basic block contains 'new' of an object */
12630                     block->bbFlags |= BBF_HAS_NEWOBJ;
12631                     optMethodFlags |= OMF_HAS_NEWOBJ;
12632                 }
12633                 else
12634                 {
12635                     // This is the normal case where the size of the object is
12636                     // fixed.  Allocate the memory and call the constructor.
12637
12638                     // Note: We cannot add a peep to avoid use of temp here
12639                     // becase we don't have enough interference info to detect when
12640                     // sources and destination interfere, example: s = new S(ref);
12641
12642                     // TODO: We find the correct place to introduce a general
12643                     // reverse copy prop for struct return values from newobj or
12644                     // any function returning structs.
12645
12646                     /* get a temporary for the new object */
12647                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12648
12649                     // In the value class case we only need clsHnd for size calcs.
12650                     //
12651                     // The lookup of the code pointer will be handled by CALL in this case
12652                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12653                     {
12654                         if (compIsForInlining())
12655                         {
12656                             // If value class has GC fields, inform the inliner. It may choose to
12657                             // bail out on the inline.
12658                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12659                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12660                             {
12661                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12662                                 if (compInlineResult->IsFailure())
12663                                 {
12664                                     return;
12665                                 }
12666
12667                                 // Do further notification in the case where the call site is rare;
12668                                 // some policies do not track the relative hotness of call sites for
12669                                 // "always" inline cases.
12670                                 if (impInlineInfo->iciBlock->isRunRarely())
12671                                 {
12672                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12673                                     if (compInlineResult->IsFailure())
12674                                     {
12675                                         return;
12676                                     }
12677                                 }
12678                             }
12679                         }
12680
12681                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12682                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12683
12684                         if (impIsPrimitive(jitTyp))
12685                         {
12686                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12687                         }
12688                         else
12689                         {
12690                             // The local variable itself is the allocated space.
12691                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12692                             // and potentially exploitable.
12693                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12694                         }
12695
12696                         // Append a tree to zero-out the temp
12697                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12698
12699                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12700                                                        gtNewIconNode(0), // Value
12701                                                        size,             // Size
12702                                                        false,            // isVolatile
12703                                                        false);           // not copyBlock
12704                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12705
12706                         // Obtain the address of the temp
12707                         newObjThisPtr =
12708                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12709                     }
12710                     else
12711                     {
12712 #ifdef FEATURE_READYTORUN_COMPILER
12713                         if (opts.IsReadyToRun())
12714                         {
12715                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12716                             usingReadyToRunHelper = (op1 != nullptr);
12717                         }
12718
12719                         if (!usingReadyToRunHelper)
12720 #endif
12721                         {
12722                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12723                             if (op1 == nullptr)
12724                             { // compDonotInline()
12725                                 return;
12726                             }
12727
12728                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12729                             // and the newfast call with a single call to a dynamic R2R cell that will:
12730                             //      1) Load the context
12731                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12732                             //      stub
12733                             //      3) Allocate and return the new object
12734                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12735
12736                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12737                                                     resolvedToken.hClass, TYP_REF, op1);
12738                         }
12739
12740                         // Remember that this basic block contains 'new' of an object
12741                         block->bbFlags |= BBF_HAS_NEWOBJ;
12742                         optMethodFlags |= OMF_HAS_NEWOBJ;
12743
12744                         // Append the assignment to the temp/local. Dont need to spill
12745                         // at all as we are just calling an EE-Jit helper which can only
12746                         // cause an (async) OutOfMemoryException.
12747
12748                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12749                         // to a temp. Note that the pattern "temp = allocObj" is required
12750                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12751                         // without exhaustive walk over all expressions.
12752
12753                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12754                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
12755
12756                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12757                     }
12758                 }
12759                 goto CALL;
12760
12761             case CEE_CALLI:
12762
12763                 /* CALLI does not respond to CONSTRAINED */
12764                 prefixFlags &= ~PREFIX_CONSTRAINED;
12765
12766                 if (compIsForInlining())
12767                 {
12768                     // CALLI doesn't have a method handle, so assume the worst.
12769                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12770                     {
12771                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12772                         return;
12773                     }
12774                 }
12775
12776             // fall through
12777
12778             case CEE_CALLVIRT:
12779             case CEE_CALL:
12780
12781                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12782                 // many other places.  We unfortunately embed that knowledge here.
12783                 if (opcode != CEE_CALLI)
12784                 {
12785                     _impResolveToken(CORINFO_TOKENKIND_Method);
12786
12787                     eeGetCallInfo(&resolvedToken,
12788                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12789                                   // this is how impImportCall invokes getCallInfo
12790                                   addVerifyFlag(
12791                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12792                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12793                                                                        : CORINFO_CALLINFO_NONE)),
12794                                   &callInfo);
12795                 }
12796                 else
12797                 {
12798                     // Suppress uninitialized use warning.
12799                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12800                     memset(&callInfo, 0, sizeof(callInfo));
12801
12802                     resolvedToken.token = getU4LittleEndian(codeAddr);
12803                 }
12804
12805             CALL: // memberRef should be set.
12806                 // newObjThisPtr should be set for CEE_NEWOBJ
12807
12808                 JITDUMP(" %08X", resolvedToken.token);
12809                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12810
12811                 bool newBBcreatedForTailcallStress;
12812
12813                 newBBcreatedForTailcallStress = false;
12814
12815                 if (compIsForInlining())
12816                 {
12817                     if (compDonotInline())
12818                     {
12819                         return;
12820                     }
12821                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12822                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12823                 }
12824                 else
12825                 {
12826                     if (compTailCallStress())
12827                     {
12828                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12829                         // Tail call stress only recognizes call+ret patterns and forces them to be
12830                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
12831                         // doesn't import 'ret' opcode following the call into the basic block containing
12832                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
12833                         // is already checking that there is an opcode following call and hence it is
12834                         // safe here to read next opcode without bounds check.
12835                         newBBcreatedForTailcallStress =
12836                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12837                                                              // make it jump to RET.
12838                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12839
12840                         if (newBBcreatedForTailcallStress &&
12841                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12842                             verCheckTailCallConstraint(opcode, &resolvedToken,
12843                                                        constraintCall ? &constrainedResolvedToken : nullptr,
12844                                                        true) // Is it legal to do talcall?
12845                             )
12846                         {
12847                             // Stress the tailcall.
12848                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12849                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12850                         }
12851                     }
12852                 }
12853
12854                 // This is split up to avoid goto flow warnings.
12855                 bool isRecursive;
12856                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
12857
12858                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12859                 // hence will not be considered for implicit tail calling.
12860                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12861                 {
12862                     if (compIsForInlining())
12863                     {
12864 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
12865                         // Are we inlining at an implicit tail call site? If so the we can flag
12866                         // implicit tail call sites in the inline body. These call sites
12867                         // often end up in non BBJ_RETURN blocks, so only flag them when
12868                         // we're able to handle shared returns.
12869                         if (impInlineInfo->iciCall->IsImplicitTailCall())
12870                         {
12871                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12872                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12873                         }
12874 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
12875                     }
12876                     else
12877                     {
12878                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12879                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12880                     }
12881                 }
12882
12883                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12884                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12885                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
12886
12887                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12888                 {
12889                     // All calls and delegates need a security callout.
12890                     // For delegates, this is the call to the delegate constructor, not the access check on the
12891                     // LD(virt)FTN.
12892                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12893
12894 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12895
12896                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12897                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12898                 // ldtoken <filed token>, and we now check accessibility
12899                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12900                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12901                 {
12902                     if (prevOpcode != CEE_LDTOKEN)
12903                     {
12904                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12905                     }
12906                     else
12907                     {
12908                         assert(lastLoadToken != NULL);
12909                         // Now that we know we have a token, verify that it is accessible for loading
12910                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
12911                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12912                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12913                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12914                     }
12915                 }
12916
12917 #endif // DevDiv 410397
12918                 }
12919
12920                 if (tiVerificationNeeded)
12921                 {
12922                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12923                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12924                                   &callInfo DEBUGARG(info.compFullName));
12925                 }
12926
12927                 // Insert delegate callout here.
12928                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12929                 {
12930 #ifdef DEBUG
12931                     // We should do this only if verification is enabled
12932                     // If verification is disabled, delegateCreateStart will not be initialized correctly
12933                     if (tiVerificationNeeded)
12934                     {
12935                         mdMemberRef delegateMethodRef = mdMemberRefNil;
12936                         // We should get here only for well formed delegate creation.
12937                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12938                     }
12939 #endif
12940                 }
12941
12942                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12943                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12944                 if (compDonotInline())
12945                 {
12946                     return;
12947                 }
12948
12949                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12950                                                                        // have created a new BB after the "call"
12951                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12952                 {
12953                     assert(!compIsForInlining());
12954                     goto RET;
12955                 }
12956
12957                 break;
12958
12959             case CEE_LDFLD:
12960             case CEE_LDSFLD:
12961             case CEE_LDFLDA:
12962             case CEE_LDSFLDA:
12963             {
12964
12965                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12966                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12967
12968                 /* Get the CP_Fieldref index */
12969                 assertImp(sz == sizeof(unsigned));
12970
12971                 _impResolveToken(CORINFO_TOKENKIND_Field);
12972
12973                 JITDUMP(" %08X", resolvedToken.token);
12974
12975                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12976
12977                 GenTreePtr           obj     = nullptr;
12978                 typeInfo*            tiObj   = nullptr;
12979                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12980
12981                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12982                 {
12983                     tiObj         = &impStackTop().seTypeInfo;
12984                     StackEntry se = impPopStack();
12985                     objType       = se.seTypeInfo.GetClassHandle();
12986                     obj           = se.val;
12987
12988                     if (impIsThis(obj))
12989                     {
12990                         aflags |= CORINFO_ACCESS_THIS;
12991
12992                         // An optimization for Contextful classes:
12993                         // we unwrap the proxy when we have a 'this reference'
12994
12995                         if (info.compUnwrapContextful)
12996                         {
12997                             aflags |= CORINFO_ACCESS_UNWRAP;
12998                         }
12999                     }
13000                 }
13001
13002                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13003
13004                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13005                 // handle
13006                 CorInfoType ciType = fieldInfo.fieldType;
13007                 clsHnd             = fieldInfo.structType;
13008
13009                 lclTyp = JITtype2varType(ciType);
13010
13011 #ifdef _TARGET_AMD64
13012                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13013 #endif // _TARGET_AMD64
13014
13015                 if (compIsForInlining())
13016                 {
13017                     switch (fieldInfo.fieldAccessor)
13018                     {
13019                         case CORINFO_FIELD_INSTANCE_HELPER:
13020                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13021                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13022                         case CORINFO_FIELD_STATIC_TLS:
13023
13024                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13025                             return;
13026
13027                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13028                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13029                             /* We may be able to inline the field accessors in specific instantiations of generic
13030                              * methods */
13031                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13032                             return;
13033
13034                         default:
13035                             break;
13036                     }
13037
13038                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13039                         clsHnd)
13040                     {
13041                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13042                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13043                         {
13044                             // Loading a static valuetype field usually will cause a JitHelper to be called
13045                             // for the static base. This will bloat the code.
13046                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13047
13048                             if (compInlineResult->IsFailure())
13049                             {
13050                                 return;
13051                             }
13052                         }
13053                     }
13054                 }
13055
13056                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13057                 if (isLoadAddress)
13058                 {
13059                     tiRetVal.MakeByRef();
13060                 }
13061                 else
13062                 {
13063                     tiRetVal.NormaliseForStack();
13064                 }
13065
13066                 // Perform this check always to ensure that we get field access exceptions even with
13067                 // SkipVerification.
13068                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13069
13070                 if (tiVerificationNeeded)
13071                 {
13072                     // You can also pass the unboxed struct to  LDFLD
13073                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13074                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13075                     {
13076                         bAllowPlainValueTypeAsThis = TRUE;
13077                     }
13078
13079                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13080
13081                     // If we're doing this on a heap object or from a 'safe' byref
13082                     // then the result is a safe byref too
13083                     if (isLoadAddress) // load address
13084                     {
13085                         if (fieldInfo.fieldFlags &
13086                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13087                         {
13088                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13089                             {
13090                                 tiRetVal.SetIsPermanentHomeByRef();
13091                             }
13092                         }
13093                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13094                         {
13095                             // ldflda of byref is safe if done on a gc object or on  a
13096                             // safe byref
13097                             tiRetVal.SetIsPermanentHomeByRef();
13098                         }
13099                     }
13100                 }
13101                 else
13102                 {
13103                     // tiVerificationNeeded is false.
13104                     // Raise InvalidProgramException if static load accesses non-static field
13105                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13106                     {
13107                         BADCODE("static access on an instance field");
13108                     }
13109                 }
13110
13111                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13112                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13113                 {
13114                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13115                     {
13116                         obj = gtUnusedValNode(obj);
13117                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13118                     }
13119                     obj = nullptr;
13120                 }
13121
13122                 /* Preserve 'small' int types */
13123                 if (lclTyp > TYP_INT)
13124                 {
13125                     lclTyp = genActualType(lclTyp);
13126                 }
13127
13128                 bool usesHelper = false;
13129
13130                 switch (fieldInfo.fieldAccessor)
13131                 {
13132                     case CORINFO_FIELD_INSTANCE:
13133 #ifdef FEATURE_READYTORUN_COMPILER
13134                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13135 #endif
13136                     {
13137                         bool nullcheckNeeded = false;
13138
13139                         obj = impCheckForNullPointer(obj);
13140
13141                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13142                         {
13143                             nullcheckNeeded = true;
13144                         }
13145
13146                         // If the object is a struct, what we really want is
13147                         // for the field to operate on the address of the struct.
13148                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13149                         {
13150                             assert(opcode == CEE_LDFLD && objType != nullptr);
13151
13152                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13153                         }
13154
13155                         /* Create the data member node */
13156                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13157
13158 #ifdef FEATURE_READYTORUN_COMPILER
13159                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13160                         {
13161                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13162                         }
13163 #endif
13164
13165                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13166
13167                         if (fgAddrCouldBeNull(obj))
13168                         {
13169                             op1->gtFlags |= GTF_EXCEPT;
13170                         }
13171
13172                         // If gtFldObj is a BYREF then our target is a value class and
13173                         // it could point anywhere, example a boxed class static int
13174                         if (obj->gtType == TYP_BYREF)
13175                         {
13176                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13177                         }
13178
13179                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13180                         if (StructHasOverlappingFields(typeFlags))
13181                         {
13182                             op1->gtField.gtFldMayOverlap = true;
13183                         }
13184
13185                         // wrap it in a address of operator if necessary
13186                         if (isLoadAddress)
13187                         {
13188                             op1 = gtNewOperNode(GT_ADDR,
13189                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13190                         }
13191                         else
13192                         {
13193                             if (compIsForInlining() &&
13194                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13195                                                                                    impInlineInfo->inlArgInfo))
13196                             {
13197                                 impInlineInfo->thisDereferencedFirst = true;
13198                             }
13199                         }
13200                     }
13201                     break;
13202
13203                     case CORINFO_FIELD_STATIC_TLS:
13204 #ifdef _TARGET_X86_
13205                         // Legacy TLS access is implemented as intrinsic on x86 only
13206
13207                         /* Create the data member node */
13208                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13209                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13210
13211                         if (isLoadAddress)
13212                         {
13213                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13214                         }
13215                         break;
13216 #else
13217                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13218
13219                         __fallthrough;
13220 #endif
13221
13222                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13223                     case CORINFO_FIELD_INSTANCE_HELPER:
13224                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13225                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13226                                                clsHnd, nullptr);
13227                         usesHelper = true;
13228                         break;
13229
13230                     case CORINFO_FIELD_STATIC_ADDRESS:
13231                         // Replace static read-only fields with constant if possible
13232                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13233                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13234                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13235                         {
13236                             CorInfoInitClassResult initClassResult =
13237                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13238                                                             impTokenLookupContextHandle);
13239
13240                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13241                             {
13242                                 void** pFldAddr = nullptr;
13243                                 void*  fldAddr =
13244                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13245
13246                                 // We should always be able to access this static's address directly
13247                                 assert(pFldAddr == nullptr);
13248
13249                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13250                                 goto FIELD_DONE;
13251                             }
13252                         }
13253
13254                         __fallthrough;
13255
13256                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13257                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13258                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13259                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13260                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13261                                                          lclTyp);
13262                         break;
13263
13264                     case CORINFO_FIELD_INTRINSIC_ZERO:
13265                     {
13266                         assert(aflags & CORINFO_ACCESS_GET);
13267                         op1 = gtNewIconNode(0, lclTyp);
13268                         goto FIELD_DONE;
13269                     }
13270                     break;
13271
13272                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13273                     {
13274                         assert(aflags & CORINFO_ACCESS_GET);
13275
13276                         LPVOID         pValue;
13277                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13278                         op1                = gtNewStringLiteralNode(iat, pValue);
13279                         goto FIELD_DONE;
13280                     }
13281                     break;
13282
13283                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13284                     {
13285                         assert(aflags & CORINFO_ACCESS_GET);
13286 #if BIGENDIAN
13287                         op1 = gtNewIconNode(0, lclTyp);
13288 #else
13289                         op1                     = gtNewIconNode(1, lclTyp);
13290 #endif
13291                         goto FIELD_DONE;
13292                     }
13293                     break;
13294
13295                     default:
13296                         assert(!"Unexpected fieldAccessor");
13297                 }
13298
13299                 if (!isLoadAddress)
13300                 {
13301
13302                     if (prefixFlags & PREFIX_VOLATILE)
13303                     {
13304                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13305                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13306
13307                         if (!usesHelper)
13308                         {
13309                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13310                                    (op1->OperGet() == GT_OBJ));
13311                             op1->gtFlags |= GTF_IND_VOLATILE;
13312                         }
13313                     }
13314
13315                     if (prefixFlags & PREFIX_UNALIGNED)
13316                     {
13317                         if (!usesHelper)
13318                         {
13319                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13320                                    (op1->OperGet() == GT_OBJ));
13321                             op1->gtFlags |= GTF_IND_UNALIGNED;
13322                         }
13323                     }
13324                 }
13325
13326                 /* Check if the class needs explicit initialization */
13327
13328                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13329                 {
13330                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13331                     if (compDonotInline())
13332                     {
13333                         return;
13334                     }
13335                     if (helperNode != nullptr)
13336                     {
13337                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13338                     }
13339                 }
13340
13341             FIELD_DONE:
13342                 impPushOnStack(op1, tiRetVal);
13343             }
13344             break;
13345
13346             case CEE_STFLD:
13347             case CEE_STSFLD:
13348             {
13349
13350                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13351
13352                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13353
13354                 /* Get the CP_Fieldref index */
13355
13356                 assertImp(sz == sizeof(unsigned));
13357
13358                 _impResolveToken(CORINFO_TOKENKIND_Field);
13359
13360                 JITDUMP(" %08X", resolvedToken.token);
13361
13362                 int        aflags = CORINFO_ACCESS_SET;
13363                 GenTreePtr obj    = nullptr;
13364                 typeInfo*  tiObj  = nullptr;
13365                 typeInfo   tiVal;
13366
13367                 /* Pull the value from the stack */
13368                 StackEntry se = impPopStack();
13369                 op2           = se.val;
13370                 tiVal         = se.seTypeInfo;
13371                 clsHnd        = tiVal.GetClassHandle();
13372
13373                 if (opcode == CEE_STFLD)
13374                 {
13375                     tiObj = &impStackTop().seTypeInfo;
13376                     obj   = impPopStack().val;
13377
13378                     if (impIsThis(obj))
13379                     {
13380                         aflags |= CORINFO_ACCESS_THIS;
13381
13382                         // An optimization for Contextful classes:
13383                         // we unwrap the proxy when we have a 'this reference'
13384
13385                         if (info.compUnwrapContextful)
13386                         {
13387                             aflags |= CORINFO_ACCESS_UNWRAP;
13388                         }
13389                     }
13390                 }
13391
13392                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13393
13394                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13395                 // handle
13396                 CorInfoType ciType = fieldInfo.fieldType;
13397                 fieldClsHnd        = fieldInfo.structType;
13398
13399                 lclTyp = JITtype2varType(ciType);
13400
13401                 if (compIsForInlining())
13402                 {
13403                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13404                      * per-inst static? */
13405
13406                     switch (fieldInfo.fieldAccessor)
13407                     {
13408                         case CORINFO_FIELD_INSTANCE_HELPER:
13409                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13410                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13411                         case CORINFO_FIELD_STATIC_TLS:
13412
13413                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13414                             return;
13415
13416                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13417                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13418                             /* We may be able to inline the field accessors in specific instantiations of generic
13419                              * methods */
13420                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13421                             return;
13422
13423                         default:
13424                             break;
13425                     }
13426                 }
13427
13428                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13429
13430                 if (tiVerificationNeeded)
13431                 {
13432                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13433                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13434                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13435                 }
13436                 else
13437                 {
13438                     // tiVerificationNeed is false.
13439                     // Raise InvalidProgramException if static store accesses non-static field
13440                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13441                     {
13442                         BADCODE("static access on an instance field");
13443                     }
13444                 }
13445
13446                 // We are using stfld on a static field.
13447                 // We allow it, but need to eval any side-effects for obj
13448                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13449                 {
13450                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13451                     {
13452                         obj = gtUnusedValNode(obj);
13453                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13454                     }
13455                     obj = nullptr;
13456                 }
13457
13458                 /* Preserve 'small' int types */
13459                 if (lclTyp > TYP_INT)
13460                 {
13461                     lclTyp = genActualType(lclTyp);
13462                 }
13463
13464                 switch (fieldInfo.fieldAccessor)
13465                 {
13466                     case CORINFO_FIELD_INSTANCE:
13467 #ifdef FEATURE_READYTORUN_COMPILER
13468                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13469 #endif
13470                     {
13471                         obj = impCheckForNullPointer(obj);
13472
13473                         /* Create the data member node */
13474                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13475                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13476                         if (StructHasOverlappingFields(typeFlags))
13477                         {
13478                             op1->gtField.gtFldMayOverlap = true;
13479                         }
13480
13481 #ifdef FEATURE_READYTORUN_COMPILER
13482                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13483                         {
13484                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13485                         }
13486 #endif
13487
13488                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13489
13490                         if (fgAddrCouldBeNull(obj))
13491                         {
13492                             op1->gtFlags |= GTF_EXCEPT;
13493                         }
13494
13495                         // If gtFldObj is a BYREF then our target is a value class and
13496                         // it could point anywhere, example a boxed class static int
13497                         if (obj->gtType == TYP_BYREF)
13498                         {
13499                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13500                         }
13501
13502                         if (compIsForInlining() &&
13503                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13504                         {
13505                             impInlineInfo->thisDereferencedFirst = true;
13506                         }
13507                     }
13508                     break;
13509
13510                     case CORINFO_FIELD_STATIC_TLS:
13511 #ifdef _TARGET_X86_
13512                         // Legacy TLS access is implemented as intrinsic on x86 only
13513
13514                         /* Create the data member node */
13515                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13516                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13517
13518                         break;
13519 #else
13520                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13521
13522                         __fallthrough;
13523 #endif
13524
13525                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13526                     case CORINFO_FIELD_INSTANCE_HELPER:
13527                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13528                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13529                                                clsHnd, op2);
13530                         goto SPILL_APPEND;
13531
13532                     case CORINFO_FIELD_STATIC_ADDRESS:
13533                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13534                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13535                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13536                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13537                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13538                                                          lclTyp);
13539                         break;
13540
13541                     default:
13542                         assert(!"Unexpected fieldAccessor");
13543                 }
13544
13545                 // Create the member assignment, unless we have a struct.
13546                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13547                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13548
13549                 if (!deferStructAssign)
13550                 {
13551                     if (prefixFlags & PREFIX_VOLATILE)
13552                     {
13553                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13554                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13555                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13556                         op1->gtFlags |= GTF_IND_VOLATILE;
13557                     }
13558                     if (prefixFlags & PREFIX_UNALIGNED)
13559                     {
13560                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13561                         op1->gtFlags |= GTF_IND_UNALIGNED;
13562                     }
13563
13564                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13565                        trust
13566                        apps).  The reason this works is that JIT stores an i4 constant in Gentree union during
13567                        importation
13568                        and reads from the union as if it were a long during code generation. Though this can potentially
13569                        read garbage, one can get lucky to have this working correctly.
13570
13571                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13572                        /O2
13573                        switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13574                        on
13575                        it.  To be backward compatible, we will explicitly add an upward cast here so that it works
13576                        correctly
13577                        always.
13578
13579                        Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13580                        V4.0.
13581                     */
13582                     CLANG_FORMAT_COMMENT_ANCHOR;
13583
13584 #ifdef _TARGET_X86_
13585                     if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13586                         varTypeIsLong(op1->TypeGet()))
13587                     {
13588                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13589                     }
13590 #endif
13591
13592 #ifdef _TARGET_64BIT_
13593                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13594                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13595                     {
13596                         op2->gtType = TYP_I_IMPL;
13597                     }
13598                     else
13599                     {
13600                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13601                         //
13602                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13603                         {
13604                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13605                         }
13606                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13607                         //
13608                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13609                         {
13610                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13611                         }
13612                     }
13613 #endif
13614
13615 #if !FEATURE_X87_DOUBLES
13616                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13617                     // We insert a cast to the dest 'op1' type
13618                     //
13619                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13620                         varTypeIsFloating(op2->gtType))
13621                     {
13622                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13623                     }
13624 #endif // !FEATURE_X87_DOUBLES
13625
13626                     op1 = gtNewAssignNode(op1, op2);
13627
13628                     /* Mark the expression as containing an assignment */
13629
13630                     op1->gtFlags |= GTF_ASG;
13631                 }
13632
13633                 /* Check if the class needs explicit initialization */
13634
13635                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13636                 {
13637                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13638                     if (compDonotInline())
13639                     {
13640                         return;
13641                     }
13642                     if (helperNode != nullptr)
13643                     {
13644                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13645                     }
13646                 }
13647
13648                 /* stfld can interfere with value classes (consider the sequence
13649                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13650                    spill all value class references from the stack. */
13651
13652                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13653                 {
13654                     assert(tiObj);
13655
13656                     if (impIsValueType(tiObj))
13657                     {
13658                         impSpillEvalStack();
13659                     }
13660                     else
13661                     {
13662                         impSpillValueClasses();
13663                     }
13664                 }
13665
13666                 /* Spill any refs to the same member from the stack */
13667
13668                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13669
13670                 /* stsfld also interferes with indirect accesses (for aliased
13671                    statics) and calls. But don't need to spill other statics
13672                    as we have explicitly spilled this particular static field. */
13673
13674                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13675
13676                 if (deferStructAssign)
13677                 {
13678                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13679                 }
13680             }
13681                 goto APPEND;
13682
13683             case CEE_NEWARR:
13684             {
13685
13686                 /* Get the class type index operand */
13687
13688                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13689
13690                 JITDUMP(" %08X", resolvedToken.token);
13691
13692                 if (!opts.IsReadyToRun())
13693                 {
13694                     // Need to restore array classes before creating array objects on the heap
13695                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13696                     if (op1 == nullptr)
13697                     { // compDonotInline()
13698                         return;
13699                     }
13700                 }
13701
13702                 if (tiVerificationNeeded)
13703                 {
13704                     // As per ECMA 'numElems' specified can be either int32 or native int.
13705                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13706
13707                     CORINFO_CLASS_HANDLE elemTypeHnd;
13708                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13709                     Verify(elemTypeHnd == nullptr ||
13710                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13711                            "array of byref-like type");
13712                 }
13713
13714                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13715
13716                 accessAllowedResult =
13717                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13718                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13719
13720                 /* Form the arglist: array class handle, size */
13721                 op2 = impPopStack().val;
13722                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13723
13724 #ifdef FEATURE_READYTORUN_COMPILER
13725                 if (opts.IsReadyToRun())
13726                 {
13727                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13728                                                     gtNewArgList(op2));
13729                     usingReadyToRunHelper = (op1 != nullptr);
13730
13731                     if (!usingReadyToRunHelper)
13732                     {
13733                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13734                         // and the newarr call with a single call to a dynamic R2R cell that will:
13735                         //      1) Load the context
13736                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13737                         //      3) Allocate the new array
13738                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13739
13740                         // Need to restore array classes before creating array objects on the heap
13741                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13742                         if (op1 == nullptr)
13743                         { // compDonotInline()
13744                             return;
13745                         }
13746                     }
13747                 }
13748
13749                 if (!usingReadyToRunHelper)
13750 #endif
13751                 {
13752                     args = gtNewArgList(op1, op2);
13753
13754                     /* Create a call to 'new' */
13755
13756                     // Note that this only works for shared generic code because the same helper is used for all
13757                     // reference array types
13758                     op1 =
13759                         gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13760                 }
13761
13762                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13763
13764                 /* Remember that this basic block contains 'new' of an sd array */
13765
13766                 block->bbFlags |= BBF_HAS_NEWARRAY;
13767                 optMethodFlags |= OMF_HAS_NEWARRAY;
13768
13769                 /* Push the result of the call on the stack */
13770
13771                 impPushOnStack(op1, tiRetVal);
13772
13773                 callTyp = TYP_REF;
13774             }
13775             break;
13776
13777             case CEE_LOCALLOC:
13778                 assert(!compIsForInlining());
13779
13780                 if (tiVerificationNeeded)
13781                 {
13782                     Verify(false, "bad opcode");
13783                 }
13784
13785                 // We don't allow locallocs inside handlers
13786                 if (block->hasHndIndex())
13787                 {
13788                     BADCODE("Localloc can't be inside handler");
13789                 }
13790
13791                 /* The FP register may not be back to the original value at the end
13792                    of the method, even if the frame size is 0, as localloc may
13793                    have modified it. So we will HAVE to reset it */
13794
13795                 compLocallocUsed = true;
13796                 setNeedsGSSecurityCookie();
13797
13798                 // Get the size to allocate
13799
13800                 op2 = impPopStack().val;
13801                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13802
13803                 if (verCurrentState.esStackDepth != 0)
13804                 {
13805                     BADCODE("Localloc can only be used when the stack is empty");
13806                 }
13807
13808                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13809
13810                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13811
13812                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13813
13814                 impPushOnStack(op1, tiRetVal);
13815                 break;
13816
13817             case CEE_ISINST:
13818
13819                 /* Get the type token */
13820                 assertImp(sz == sizeof(unsigned));
13821
13822                 _impResolveToken(CORINFO_TOKENKIND_Casting);
13823
13824                 JITDUMP(" %08X", resolvedToken.token);
13825
13826                 if (!opts.IsReadyToRun())
13827                 {
13828                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13829                     if (op2 == nullptr)
13830                     { // compDonotInline()
13831                         return;
13832                     }
13833                 }
13834
13835                 if (tiVerificationNeeded)
13836                 {
13837                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13838                     // Even if this is a value class, we know it is boxed.
13839                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13840                 }
13841                 accessAllowedResult =
13842                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13843                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13844
13845                 op1 = impPopStack().val;
13846
13847 #ifdef FEATURE_READYTORUN_COMPILER
13848                 if (opts.IsReadyToRun())
13849                 {
13850                     GenTreeCall* opLookup =
13851                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13852                                                   gtNewArgList(op1));
13853                     usingReadyToRunHelper = (opLookup != nullptr);
13854                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
13855
13856                     if (!usingReadyToRunHelper)
13857                     {
13858                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13859                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13860                         //      1) Load the context
13861                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13862                         //      3) Perform the 'is instance' check on the input object
13863                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13864
13865                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13866                         if (op2 == nullptr)
13867                         { // compDonotInline()
13868                             return;
13869                         }
13870                     }
13871                 }
13872
13873                 if (!usingReadyToRunHelper)
13874 #endif
13875                 {
13876                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13877                 }
13878                 if (compDonotInline())
13879                 {
13880                     return;
13881                 }
13882
13883                 impPushOnStack(op1, tiRetVal);
13884
13885                 break;
13886
13887             case CEE_REFANYVAL:
13888
13889                 // get the class handle and make a ICON node out of it
13890
13891                 _impResolveToken(CORINFO_TOKENKIND_Class);
13892
13893                 JITDUMP(" %08X", resolvedToken.token);
13894
13895                 op2 = impTokenToHandle(&resolvedToken);
13896                 if (op2 == nullptr)
13897                 { // compDonotInline()
13898                     return;
13899                 }
13900
13901                 if (tiVerificationNeeded)
13902                 {
13903                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13904                            "need refany");
13905                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13906                 }
13907
13908                 op1 = impPopStack().val;
13909                 // make certain it is normalized;
13910                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13911
13912                 // Call helper GETREFANY(classHandle, op1);
13913                 args = gtNewArgList(op2, op1);
13914                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13915
13916                 impPushOnStack(op1, tiRetVal);
13917                 break;
13918
13919             case CEE_REFANYTYPE:
13920
13921                 if (tiVerificationNeeded)
13922                 {
13923                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13924                            "need refany");
13925                 }
13926
13927                 op1 = impPopStack().val;
13928
13929                 // make certain it is normalized;
13930                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13931
13932                 if (op1->gtOper == GT_OBJ)
13933                 {
13934                     // Get the address of the refany
13935                     op1 = op1->gtOp.gtOp1;
13936
13937                     // Fetch the type from the correct slot
13938                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13939                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13940                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13941                 }
13942                 else
13943                 {
13944                     assertImp(op1->gtOper == GT_MKREFANY);
13945
13946                     // The pointer may have side-effects
13947                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13948                     {
13949                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13950 #ifdef DEBUG
13951                         impNoteLastILoffs();
13952 #endif
13953                     }
13954
13955                     // We already have the class handle
13956                     op1 = op1->gtOp.gtOp2;
13957                 }
13958
13959                 // convert native TypeHandle to RuntimeTypeHandle
13960                 {
13961                     GenTreeArgList* helperArgs = gtNewArgList(op1);
13962
13963                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13964                                               helperArgs);
13965
13966                     // The handle struct is returned in register
13967                     op1->gtCall.gtReturnType = TYP_REF;
13968
13969                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13970                 }
13971
13972                 impPushOnStack(op1, tiRetVal);
13973                 break;
13974
13975             case CEE_LDTOKEN:
13976             {
13977                 /* Get the Class index */
13978                 assertImp(sz == sizeof(unsigned));
13979                 lastLoadToken = codeAddr;
13980                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13981
13982                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13983
13984                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13985                 if (op1 == nullptr)
13986                 { // compDonotInline()
13987                     return;
13988                 }
13989
13990                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13991                 assert(resolvedToken.hClass != nullptr);
13992
13993                 if (resolvedToken.hMethod != nullptr)
13994                 {
13995                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13996                 }
13997                 else if (resolvedToken.hField != nullptr)
13998                 {
13999                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14000                 }
14001
14002                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14003
14004                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
14005
14006                 // The handle struct is returned in register
14007                 op1->gtCall.gtReturnType = TYP_REF;
14008
14009                 tiRetVal = verMakeTypeInfo(tokenType);
14010                 impPushOnStack(op1, tiRetVal);
14011             }
14012             break;
14013
14014             case CEE_UNBOX:
14015             case CEE_UNBOX_ANY:
14016             {
14017                 /* Get the Class index */
14018                 assertImp(sz == sizeof(unsigned));
14019
14020                 _impResolveToken(CORINFO_TOKENKIND_Class);
14021
14022                 JITDUMP(" %08X", resolvedToken.token);
14023
14024                 BOOL runtimeLookup;
14025                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14026                 if (op2 == nullptr)
14027                 { // compDonotInline()
14028                     return;
14029                 }
14030
14031                 // Run this always so we can get access exceptions even with SkipVerification.
14032                 accessAllowedResult =
14033                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14034                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14035
14036                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14037                 {
14038                     if (tiVerificationNeeded)
14039                     {
14040                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14041                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14042                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14043                         tiRetVal.NormaliseForStack();
14044                     }
14045                     op1 = impPopStack().val;
14046                     goto CASTCLASS;
14047                 }
14048
14049                 /* Pop the object and create the unbox helper call */
14050                 /* You might think that for UNBOX_ANY we need to push a different */
14051                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14052                 /* for the intermediate pointer which we then transfer onto the OBJ */
14053                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14054                 if (tiVerificationNeeded)
14055                 {
14056                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14057                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14058
14059                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14060                     Verify(tiRetVal.IsValueClass(), "not value class");
14061                     tiRetVal.MakeByRef();
14062
14063                     // We always come from an objref, so this is safe byref
14064                     tiRetVal.SetIsPermanentHomeByRef();
14065                     tiRetVal.SetIsReadonlyByRef();
14066                 }
14067
14068                 op1 = impPopStack().val;
14069                 assertImp(op1->gtType == TYP_REF);
14070
14071                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14072                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14073
14074                 // We only want to expand inline the normal UNBOX helper;
14075                 expandInline = (helper == CORINFO_HELP_UNBOX);
14076
14077                 if (expandInline)
14078                 {
14079                     if (compCurBB->isRunRarely())
14080                     {
14081                         expandInline = false; // not worth the code expansion
14082                     }
14083                 }
14084
14085                 if (expandInline)
14086                 {
14087                     // we are doing normal unboxing
14088                     // inline the common case of the unbox helper
14089                     // UNBOX(exp) morphs into
14090                     // clone = pop(exp);
14091                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14092                     // push(clone + sizeof(void*))
14093                     //
14094                     GenTreePtr cloneOperand;
14095                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14096                                        nullptr DEBUGARG("inline UNBOX clone1"));
14097                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14098
14099                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14100
14101                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14102                                        nullptr DEBUGARG("inline UNBOX clone2"));
14103                     op2 = impTokenToHandle(&resolvedToken);
14104                     if (op2 == nullptr)
14105                     { // compDonotInline()
14106                         return;
14107                     }
14108                     args = gtNewArgList(op2, op1);
14109                     op1  = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14110
14111                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14112                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14113                     condBox->gtFlags |= GTF_RELOP_QMARK;
14114
14115                     // QMARK nodes cannot reside on the evaluation stack. Because there
14116                     // may be other trees on the evaluation stack that side-effect the
14117                     // sources of the UNBOX operation we must spill the stack.
14118
14119                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14120
14121                     // Create the address-expression to reference past the object header
14122                     // to the beginning of the value-type. Today this means adjusting
14123                     // past the base of the objects vtable field which is pointer sized.
14124
14125                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14126                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14127                 }
14128                 else
14129                 {
14130                     unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14131
14132                     // Don't optimize, just call the helper and be done with it
14133                     args = gtNewArgList(op2, op1);
14134                     op1  = gtNewHelperCallNode(helper,
14135                                               (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14136                                               callFlags, args);
14137                 }
14138
14139                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14140                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14141                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14142                        );
14143
14144                 /*
14145                   ----------------------------------------------------------------------
14146                   | \ helper  |                         |                              |
14147                   |   \       |                         |                              |
14148                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14149                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14150                   | opcode  \ |                         |                              |
14151                   |---------------------------------------------------------------------
14152                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14153                   |           |                         | push the BYREF to this local |
14154                   |---------------------------------------------------------------------
14155                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14156                   |           | the BYREF               | For Linux when the           |
14157                   |           |                         |  struct is returned in two   |
14158                   |           |                         |  registers create a temp     |
14159                   |           |                         |  which address is passed to  |
14160                   |           |                         |  the unbox_nullable helper.  |
14161                   |---------------------------------------------------------------------
14162                 */
14163
14164                 if (opcode == CEE_UNBOX)
14165                 {
14166                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14167                     {
14168                         // Unbox nullable helper returns a struct type.
14169                         // We need to spill it to a temp so than can take the address of it.
14170                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14171                         // further along and potetially be exploitable.
14172
14173                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14174                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14175
14176                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14177                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14178                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14179
14180                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14181                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14182                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14183                     }
14184
14185                     assert(op1->gtType == TYP_BYREF);
14186                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14187                 }
14188                 else
14189                 {
14190                     assert(opcode == CEE_UNBOX_ANY);
14191
14192                     if (helper == CORINFO_HELP_UNBOX)
14193                     {
14194                         // Normal unbox helper returns a TYP_BYREF.
14195                         impPushOnStack(op1, tiRetVal);
14196                         oper = GT_OBJ;
14197                         goto OBJ;
14198                     }
14199
14200                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14201
14202 #if FEATURE_MULTIREG_RET
14203
14204                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14205                     {
14206                         // Unbox nullable helper returns a TYP_STRUCT.
14207                         // For the multi-reg case we need to spill it to a temp so that
14208                         // we can pass the address to the unbox_nullable jit helper.
14209
14210                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14211                         lvaTable[tmp].lvIsMultiRegArg = true;
14212                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14213
14214                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14215                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14216                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14217
14218                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14219                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14220                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14221
14222                         // In this case the return value of the unbox helper is TYP_BYREF.
14223                         // Make sure the right type is placed on the operand type stack.
14224                         impPushOnStack(op1, tiRetVal);
14225
14226                         // Load the struct.
14227                         oper = GT_OBJ;
14228
14229                         assert(op1->gtType == TYP_BYREF);
14230                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14231
14232                         goto OBJ;
14233                     }
14234                     else
14235
14236 #endif // !FEATURE_MULTIREG_RET
14237
14238                     {
14239                         // If non register passable struct we have it materialized in the RetBuf.
14240                         assert(op1->gtType == TYP_STRUCT);
14241                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14242                         assert(tiRetVal.IsValueClass());
14243                     }
14244                 }
14245
14246                 impPushOnStack(op1, tiRetVal);
14247             }
14248             break;
14249
14250             case CEE_BOX:
14251             {
14252                 /* Get the Class index */
14253                 assertImp(sz == sizeof(unsigned));
14254
14255                 _impResolveToken(CORINFO_TOKENKIND_Box);
14256
14257                 JITDUMP(" %08X", resolvedToken.token);
14258
14259                 if (tiVerificationNeeded)
14260                 {
14261                     typeInfo tiActual = impStackTop().seTypeInfo;
14262                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14263
14264                     Verify(verIsBoxable(tiBox), "boxable type expected");
14265
14266                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14267                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14268                            "boxed type has unsatisfied class constraints");
14269
14270                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14271
14272                     // Observation: the following code introduces a boxed value class on the stack, but,
14273                     // according to the ECMA spec, one would simply expect: tiRetVal =
14274                     // typeInfo(TI_REF,impGetObjectClass());
14275
14276                     // Push the result back on the stack,
14277                     // even if clsHnd is a value class we want the TI_REF
14278                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14279                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14280                 }
14281
14282                 accessAllowedResult =
14283                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14284                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14285
14286                 // Note BOX can be used on things that are not value classes, in which
14287                 // case we get a NOP.  However the verifier's view of the type on the
14288                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14289                 if (!eeIsValueClass(resolvedToken.hClass))
14290                 {
14291                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14292                     break;
14293                 }
14294
14295                 // Look ahead for unbox.any
14296                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14297                 {
14298                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14299                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14300                     {
14301                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14302
14303                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14304
14305                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14306                         {
14307                             // Skip the next unbox.any instruction
14308                             sz += sizeof(mdToken) + 1;
14309                             break;
14310                         }
14311                     }
14312                 }
14313
14314                 impImportAndPushBox(&resolvedToken);
14315                 if (compDonotInline())
14316                 {
14317                     return;
14318                 }
14319             }
14320             break;
14321
14322             case CEE_SIZEOF:
14323
14324                 /* Get the Class index */
14325                 assertImp(sz == sizeof(unsigned));
14326
14327                 _impResolveToken(CORINFO_TOKENKIND_Class);
14328
14329                 JITDUMP(" %08X", resolvedToken.token);
14330
14331                 if (tiVerificationNeeded)
14332                 {
14333                     tiRetVal = typeInfo(TI_INT);
14334                 }
14335
14336                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14337                 impPushOnStack(op1, tiRetVal);
14338                 break;
14339
14340             case CEE_CASTCLASS:
14341
14342                 /* Get the Class index */
14343
14344                 assertImp(sz == sizeof(unsigned));
14345
14346                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14347
14348                 JITDUMP(" %08X", resolvedToken.token);
14349
14350                 if (!opts.IsReadyToRun())
14351                 {
14352                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14353                     if (op2 == nullptr)
14354                     { // compDonotInline()
14355                         return;
14356                     }
14357                 }
14358
14359                 if (tiVerificationNeeded)
14360                 {
14361                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14362                     // box it
14363                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14364                 }
14365
14366                 accessAllowedResult =
14367                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14368                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14369
14370                 op1 = impPopStack().val;
14371
14372             /* Pop the address and create the 'checked cast' helper call */
14373
14374             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14375             // and op2 to contain code that creates the type handle corresponding to typeRef
14376             CASTCLASS:
14377
14378 #ifdef FEATURE_READYTORUN_COMPILER
14379                 if (opts.IsReadyToRun())
14380                 {
14381                     GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14382                                                                       TYP_REF, gtNewArgList(op1));
14383                     usingReadyToRunHelper = (opLookup != nullptr);
14384                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14385
14386                     if (!usingReadyToRunHelper)
14387                     {
14388                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14389                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14390                         //      1) Load the context
14391                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14392                         //      3) Check the object on the stack for the type-cast
14393                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14394
14395                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14396                         if (op2 == nullptr)
14397                         { // compDonotInline()
14398                             return;
14399                         }
14400                     }
14401                 }
14402
14403                 if (!usingReadyToRunHelper)
14404 #endif
14405                 {
14406                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14407                 }
14408                 if (compDonotInline())
14409                 {
14410                     return;
14411                 }
14412
14413                 /* Push the result back on the stack */
14414                 impPushOnStack(op1, tiRetVal);
14415                 break;
14416
14417             case CEE_THROW:
14418
14419                 if (compIsForInlining())
14420                 {
14421                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14422                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14423                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14424
14425                     /* Do we have just the exception on the stack ?*/
14426
14427                     if (verCurrentState.esStackDepth != 1)
14428                     {
14429                         /* if not, just don't inline the method */
14430
14431                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14432                         return;
14433                     }
14434                 }
14435
14436                 if (tiVerificationNeeded)
14437                 {
14438                     tiRetVal = impStackTop().seTypeInfo;
14439                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14440                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14441                     {
14442                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14443                     }
14444                 }
14445
14446                 block->bbSetRunRarely(); // any block with a throw is rare
14447                 /* Pop the exception object and create the 'throw' helper call */
14448
14449                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14450
14451             EVAL_APPEND:
14452                 if (verCurrentState.esStackDepth > 0)
14453                 {
14454                     impEvalSideEffects();
14455                 }
14456
14457                 assert(verCurrentState.esStackDepth == 0);
14458
14459                 goto APPEND;
14460
14461             case CEE_RETHROW:
14462
14463                 assert(!compIsForInlining());
14464
14465                 if (info.compXcptnsCount == 0)
14466                 {
14467                     BADCODE("rethrow outside catch");
14468                 }
14469
14470                 if (tiVerificationNeeded)
14471                 {
14472                     Verify(block->hasHndIndex(), "rethrow outside catch");
14473                     if (block->hasHndIndex())
14474                     {
14475                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14476                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14477                         if (HBtab->HasFilter())
14478                         {
14479                             // we better be in the handler clause part, not the filter part
14480                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14481                                    "rethrow in filter");
14482                         }
14483                     }
14484                 }
14485
14486                 /* Create the 'rethrow' helper call */
14487
14488                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14489
14490                 goto EVAL_APPEND;
14491
14492             case CEE_INITOBJ:
14493
14494                 assertImp(sz == sizeof(unsigned));
14495
14496                 _impResolveToken(CORINFO_TOKENKIND_Class);
14497
14498                 JITDUMP(" %08X", resolvedToken.token);
14499
14500                 if (tiVerificationNeeded)
14501                 {
14502                     typeInfo tiTo    = impStackTop().seTypeInfo;
14503                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14504
14505                     Verify(tiTo.IsByRef(), "byref expected");
14506                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14507
14508                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14509                            "type operand incompatible with type of address");
14510                 }
14511
14512                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14513                 op2  = gtNewIconNode(0);                                     // Value
14514                 op1  = impPopStack().val;                                    // Dest
14515                 op1  = gtNewBlockVal(op1, size);
14516                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14517                 goto SPILL_APPEND;
14518
14519             case CEE_INITBLK:
14520
14521                 if (tiVerificationNeeded)
14522                 {
14523                     Verify(false, "bad opcode");
14524                 }
14525
14526                 op3 = impPopStack().val; // Size
14527                 op2 = impPopStack().val; // Value
14528                 op1 = impPopStack().val; // Dest
14529
14530                 if (op3->IsCnsIntOrI())
14531                 {
14532                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14533                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14534                 }
14535                 else
14536                 {
14537                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14538                     size = 0;
14539                 }
14540                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14541
14542                 goto SPILL_APPEND;
14543
14544             case CEE_CPBLK:
14545
14546                 if (tiVerificationNeeded)
14547                 {
14548                     Verify(false, "bad opcode");
14549                 }
14550                 op3 = impPopStack().val; // Size
14551                 op2 = impPopStack().val; // Src
14552                 op1 = impPopStack().val; // Dest
14553
14554                 if (op3->IsCnsIntOrI())
14555                 {
14556                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14557                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14558                 }
14559                 else
14560                 {
14561                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14562                     size = 0;
14563                 }
14564                 if (op2->OperGet() == GT_ADDR)
14565                 {
14566                     op2 = op2->gtOp.gtOp1;
14567                 }
14568                 else
14569                 {
14570                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14571                 }
14572
14573                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14574                 goto SPILL_APPEND;
14575
14576             case CEE_CPOBJ:
14577
14578                 assertImp(sz == sizeof(unsigned));
14579
14580                 _impResolveToken(CORINFO_TOKENKIND_Class);
14581
14582                 JITDUMP(" %08X", resolvedToken.token);
14583
14584                 if (tiVerificationNeeded)
14585                 {
14586                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14587                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14588                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14589
14590                     Verify(tiFrom.IsByRef(), "expected byref source");
14591                     Verify(tiTo.IsByRef(), "expected byref destination");
14592
14593                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14594                            "type of source address incompatible with type operand");
14595                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14596                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14597                            "type operand incompatible with type of destination address");
14598                 }
14599
14600                 if (!eeIsValueClass(resolvedToken.hClass))
14601                 {
14602                     op1 = impPopStack().val; // address to load from
14603
14604                     impBashVarAddrsToI(op1);
14605
14606                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14607
14608                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14609                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14610
14611                     impPushOnStack(op1, typeInfo());
14612                     opcode = CEE_STIND_REF;
14613                     lclTyp = TYP_REF;
14614                     goto STIND_POST_VERIFY;
14615                 }
14616
14617                 op2 = impPopStack().val; // Src
14618                 op1 = impPopStack().val; // Dest
14619                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14620                 goto SPILL_APPEND;
14621
14622             case CEE_STOBJ:
14623             {
14624                 assertImp(sz == sizeof(unsigned));
14625
14626                 _impResolveToken(CORINFO_TOKENKIND_Class);
14627
14628                 JITDUMP(" %08X", resolvedToken.token);
14629
14630                 if (eeIsValueClass(resolvedToken.hClass))
14631                 {
14632                     lclTyp = TYP_STRUCT;
14633                 }
14634                 else
14635                 {
14636                     lclTyp = TYP_REF;
14637                 }
14638
14639                 if (tiVerificationNeeded)
14640                 {
14641
14642                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14643
14644                     // Make sure we have a good looking byref
14645                     Verify(tiPtr.IsByRef(), "pointer not byref");
14646                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14647                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14648                     {
14649                         compUnsafeCastUsed = true;
14650                     }
14651
14652                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14653                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14654
14655                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14656                     {
14657                         Verify(false, "type of value incompatible with type operand");
14658                         compUnsafeCastUsed = true;
14659                     }
14660
14661                     if (!tiCompatibleWith(argVal, ptrVal, false))
14662                     {
14663                         Verify(false, "type operand incompatible with type of address");
14664                         compUnsafeCastUsed = true;
14665                     }
14666                 }
14667                 else
14668                 {
14669                     compUnsafeCastUsed = true;
14670                 }
14671
14672                 if (lclTyp == TYP_REF)
14673                 {
14674                     opcode = CEE_STIND_REF;
14675                     goto STIND_POST_VERIFY;
14676                 }
14677
14678                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14679                 if (impIsPrimitive(jitTyp))
14680                 {
14681                     lclTyp = JITtype2varType(jitTyp);
14682                     goto STIND_POST_VERIFY;
14683                 }
14684
14685                 op2 = impPopStack().val; // Value
14686                 op1 = impPopStack().val; // Ptr
14687
14688                 assertImp(varTypeIsStruct(op2));
14689
14690                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14691                 goto SPILL_APPEND;
14692             }
14693
14694             case CEE_MKREFANY:
14695
14696                 assert(!compIsForInlining());
14697
14698                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14699                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14700
14701                 JITDUMP("disabling struct promotion because of mkrefany\n");
14702                 fgNoStructPromotion = true;
14703
14704                 oper = GT_MKREFANY;
14705                 assertImp(sz == sizeof(unsigned));
14706
14707                 _impResolveToken(CORINFO_TOKENKIND_Class);
14708
14709                 JITDUMP(" %08X", resolvedToken.token);
14710
14711                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14712                 if (op2 == nullptr)
14713                 { // compDonotInline()
14714                     return;
14715                 }
14716
14717                 if (tiVerificationNeeded)
14718                 {
14719                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14720                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14721
14722                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14723                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14724                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14725                 }
14726
14727                 accessAllowedResult =
14728                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14729                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14730
14731                 op1 = impPopStack().val;
14732
14733                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14734                 // But JIT32 allowed it, so we continue to allow it.
14735                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14736
14737                 // MKREFANY returns a struct.  op2 is the class token.
14738                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14739
14740                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14741                 break;
14742
14743             case CEE_LDOBJ:
14744             {
14745                 oper = GT_OBJ;
14746                 assertImp(sz == sizeof(unsigned));
14747
14748                 _impResolveToken(CORINFO_TOKENKIND_Class);
14749
14750                 JITDUMP(" %08X", resolvedToken.token);
14751
14752             OBJ:
14753
14754                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14755
14756                 if (tiVerificationNeeded)
14757                 {
14758                     typeInfo tiPtr = impStackTop().seTypeInfo;
14759
14760                     // Make sure we have a byref
14761                     if (!tiPtr.IsByRef())
14762                     {
14763                         Verify(false, "pointer not byref");
14764                         compUnsafeCastUsed = true;
14765                     }
14766                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14767
14768                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14769                     {
14770                         Verify(false, "type of address incompatible with type operand");
14771                         compUnsafeCastUsed = true;
14772                     }
14773                     tiRetVal.NormaliseForStack();
14774                 }
14775                 else
14776                 {
14777                     compUnsafeCastUsed = true;
14778                 }
14779
14780                 if (eeIsValueClass(resolvedToken.hClass))
14781                 {
14782                     lclTyp = TYP_STRUCT;
14783                 }
14784                 else
14785                 {
14786                     lclTyp = TYP_REF;
14787                     opcode = CEE_LDIND_REF;
14788                     goto LDIND_POST_VERIFY;
14789                 }
14790
14791                 op1 = impPopStack().val;
14792
14793                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14794
14795                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14796                 if (impIsPrimitive(jitTyp))
14797                 {
14798                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14799
14800                     // Could point anywhere, example a boxed class static int
14801                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14802                     assertImp(varTypeIsArithmetic(op1->gtType));
14803                 }
14804                 else
14805                 {
14806                     // OBJ returns a struct
14807                     // and an inline argument which is the class token of the loaded obj
14808                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
14809                 }
14810                 op1->gtFlags |= GTF_EXCEPT;
14811
14812                 impPushOnStack(op1, tiRetVal);
14813                 break;
14814             }
14815
14816             case CEE_LDLEN:
14817                 if (tiVerificationNeeded)
14818                 {
14819                     typeInfo tiArray = impStackTop().seTypeInfo;
14820                     Verify(verIsSDArray(tiArray), "bad array");
14821                     tiRetVal = typeInfo(TI_INT);
14822                 }
14823
14824                 op1 = impPopStack().val;
14825                 if (!opts.MinOpts() && !opts.compDbgCode)
14826                 {
14827                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
14828                     GenTreeArrLen* arrLen =
14829                         new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14830
14831                     /* Mark the block as containing a length expression */
14832
14833                     if (op1->gtOper == GT_LCL_VAR)
14834                     {
14835                         block->bbFlags |= BBF_HAS_IDX_LEN;
14836                     }
14837
14838                     op1 = arrLen;
14839                 }
14840                 else
14841                 {
14842                     /* Create the expression "*(array_addr + ArrLenOffs)" */
14843                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14844                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14845                     op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14846                     op1->gtFlags |= GTF_IND_ARR_LEN;
14847                 }
14848
14849                 /* An indirection will cause a GPF if the address is null */
14850                 op1->gtFlags |= GTF_EXCEPT;
14851
14852                 /* Push the result back on the stack */
14853                 impPushOnStack(op1, tiRetVal);
14854                 break;
14855
14856             case CEE_BREAK:
14857                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14858                 goto SPILL_APPEND;
14859
14860             case CEE_NOP:
14861                 if (opts.compDbgCode)
14862                 {
14863                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14864                     goto SPILL_APPEND;
14865                 }
14866                 break;
14867
14868             /******************************** NYI *******************************/
14869
14870             case 0xCC:
14871                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14872
14873             case CEE_ILLEGAL:
14874             case CEE_MACRO_END:
14875
14876             default:
14877                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14878         }
14879
14880         codeAddr += sz;
14881         prevOpcode = opcode;
14882
14883         prefixFlags = 0;
14884     }
14885
14886     return;
14887 #undef _impResolveToken
14888 }
14889 #ifdef _PREFAST_
14890 #pragma warning(pop)
14891 #endif
14892
14893 // Push a local/argument treeon the operand stack
14894 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14895 {
14896     tiRetVal.NormaliseForStack();
14897
14898     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14899     {
14900         tiRetVal.SetUninitialisedObjRef();
14901     }
14902
14903     impPushOnStack(op, tiRetVal);
14904 }
14905
14906 // Load a local/argument on the operand stack
14907 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14908 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14909 {
14910     var_types lclTyp;
14911
14912     if (lvaTable[lclNum].lvNormalizeOnLoad())
14913     {
14914         lclTyp = lvaGetRealType(lclNum);
14915     }
14916     else
14917     {
14918         lclTyp = lvaGetActualType(lclNum);
14919     }
14920
14921     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14922 }
14923
14924 // Load an argument on the operand stack
14925 // Shared by the various CEE_LDARG opcodes
14926 // ilArgNum is the argument index as specified in IL.
14927 // It will be mapped to the correct lvaTable index
14928 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14929 {
14930     Verify(ilArgNum < info.compILargsCount, "bad arg num");
14931
14932     if (compIsForInlining())
14933     {
14934         if (ilArgNum >= info.compArgsCount)
14935         {
14936             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14937             return;
14938         }
14939
14940         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14941                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14942     }
14943     else
14944     {
14945         if (ilArgNum >= info.compArgsCount)
14946         {
14947             BADCODE("Bad IL");
14948         }
14949
14950         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14951
14952         if (lclNum == info.compThisArg)
14953         {
14954             lclNum = lvaArg0Var;
14955         }
14956
14957         impLoadVar(lclNum, offset);
14958     }
14959 }
14960
14961 // Load a local on the operand stack
14962 // Shared by the various CEE_LDLOC opcodes
14963 // ilLclNum is the local index as specified in IL.
14964 // It will be mapped to the correct lvaTable index
14965 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14966 {
14967     if (tiVerificationNeeded)
14968     {
14969         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14970         Verify(info.compInitMem, "initLocals not set");
14971     }
14972
14973     if (compIsForInlining())
14974     {
14975         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14976         {
14977             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14978             return;
14979         }
14980
14981         // Get the local type
14982         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14983
14984         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14985
14986         /* Have we allocated a temp for this local? */
14987
14988         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14989
14990         // All vars of inlined methods should be !lvNormalizeOnLoad()
14991
14992         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14993         lclTyp = genActualType(lclTyp);
14994
14995         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14996     }
14997     else
14998     {
14999         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15000         {
15001             BADCODE("Bad IL");
15002         }
15003
15004         unsigned lclNum = info.compArgsCount + ilLclNum;
15005
15006         impLoadVar(lclNum, offset);
15007     }
15008 }
15009
15010 #ifdef _TARGET_ARM_
15011 /**************************************************************************************
15012  *
15013  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15014  *  dst struct, because struct promotion will turn it into a float/double variable while
15015  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15016  *  a float, but there is nothing that might prevent us from doing so. The tree however
15017  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15018  *
15019  *  tmpNum - the lcl dst variable num that is a struct.
15020  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15021  *  hClass - the type handle for the struct variable.
15022  *
15023  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15024  *        however, we could do a codegen of transferring from int to float registers
15025  *        (transfer, not a cast.)
15026  *
15027  */
15028 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15029 {
15030     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15031     {
15032         int       hfaSlots = GetHfaCount(hClass);
15033         var_types hfaType  = GetHfaType(hClass);
15034
15035         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15036         // type: struct/float at importer because the ABI calls out return in integer registers.
15037         // We don't want struct promotion to replace an expression like this:
15038         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15039         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15040         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15041             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15042         {
15043             // Make sure this struct type stays as struct so we can receive the call in a struct.
15044             lvaTable[tmpNum].lvIsMultiRegRet = true;
15045         }
15046     }
15047 }
15048 #endif // _TARGET_ARM_
15049
15050 #if FEATURE_MULTIREG_RET
15051 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15052 {
15053     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15054     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15055     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15056
15057     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15058     ret->gtFlags |= GTF_DONT_CSE;
15059
15060     assert(IsMultiRegReturnedType(hClass));
15061
15062     // Mark the var so that fields are not promoted and stay together.
15063     lvaTable[tmpNum].lvIsMultiRegRet = true;
15064
15065     return ret;
15066 }
15067 #endif // FEATURE_MULTIREG_RET
15068
15069 // do import for a return
15070 // returns false if inlining was aborted
15071 // opcode can be ret or call in the case of a tail.call
15072 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15073 {
15074     if (tiVerificationNeeded)
15075     {
15076         verVerifyThisPtrInitialised();
15077
15078         unsigned expectedStack = 0;
15079         if (info.compRetType != TYP_VOID)
15080         {
15081             typeInfo tiVal = impStackTop().seTypeInfo;
15082             typeInfo tiDeclared =
15083                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15084
15085             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15086
15087             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15088             expectedStack = 1;
15089         }
15090         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15091     }
15092
15093 #ifdef DEBUG
15094     // If we are importing an inlinee and have GC ref locals we always
15095     // need to have a spill temp for the return value.  This temp
15096     // should have been set up in advance, over in fgFindBasicBlocks.
15097     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15098     {
15099         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15100     }
15101 #endif // DEBUG
15102
15103     GenTree*             op2       = nullptr;
15104     GenTree*             op1       = nullptr;
15105     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15106
15107     if (info.compRetType != TYP_VOID)
15108     {
15109         StackEntry se = impPopStack();
15110         retClsHnd     = se.seTypeInfo.GetClassHandle();
15111         op2           = se.val;
15112
15113         if (!compIsForInlining())
15114         {
15115             impBashVarAddrsToI(op2);
15116             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15117             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15118             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15119                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15120                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15121                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15122                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15123
15124 #ifdef DEBUG
15125             if (opts.compGcChecks && info.compRetType == TYP_REF)
15126             {
15127                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15128                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15129                 // one-return BB.
15130
15131                 assert(op2->gtType == TYP_REF);
15132
15133                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15134                 GenTreeArgList* args = gtNewArgList(op2);
15135                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15136
15137                 if (verbose)
15138                 {
15139                     printf("\ncompGcChecks tree:\n");
15140                     gtDispTree(op2);
15141                 }
15142             }
15143 #endif
15144         }
15145         else
15146         {
15147             // inlinee's stack should be empty now.
15148             assert(verCurrentState.esStackDepth == 0);
15149
15150 #ifdef DEBUG
15151             if (verbose)
15152             {
15153                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15154                 gtDispTree(op2);
15155             }
15156 #endif
15157
15158             // Make sure the type matches the original call.
15159
15160             var_types returnType       = genActualType(op2->gtType);
15161             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15162             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15163             {
15164                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15165             }
15166
15167             if (returnType != originalCallType)
15168             {
15169                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15170                 return false;
15171             }
15172
15173             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15174             // expression. At this point, retExpr could already be set if there are multiple
15175             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15176             // the other blocks already set it. If there is only a single return block,
15177             // retExpr shouldn't be set. However, this is not true if we reimport a block
15178             // with a return. In that case, retExpr will be set, then the block will be
15179             // reimported, but retExpr won't get cleared as part of setting the block to
15180             // be reimported. The reimported retExpr value should be the same, so even if
15181             // we don't unconditionally overwrite it, it shouldn't matter.
15182             if (info.compRetNativeType != TYP_STRUCT)
15183             {
15184                 // compRetNativeType is not TYP_STRUCT.
15185                 // This implies it could be either a scalar type or SIMD vector type or
15186                 // a struct type that can be normalized to a scalar type.
15187
15188                 if (varTypeIsStruct(info.compRetType))
15189                 {
15190                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15191                     // adjust the type away from struct to integral
15192                     // and no normalizing
15193                     op2 = impFixupStructReturnType(op2, retClsHnd);
15194                 }
15195                 else
15196                 {
15197                     // Do we have to normalize?
15198                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15199                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15200                         fgCastNeeded(op2, fncRealRetType))
15201                     {
15202                         // Small-typed return values are normalized by the callee
15203                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15204                     }
15205                 }
15206
15207                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15208                 {
15209                     assert(info.compRetNativeType != TYP_VOID &&
15210                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15211
15212                     // This is a bit of a workaround...
15213                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15214                     // not a struct (for example, the struct is composed of exactly one int, and the native
15215                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15216                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15217                     // to the *native* return type), and at least one of the return blocks is the result of
15218                     // a call, then we have a problem. The situation is like this (from a failed test case):
15219                     //
15220                     // inliner:
15221                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15222                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15223                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15224                     //
15225                     // inlinee:
15226                     //      ...
15227                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15228                     //      ret
15229                     //      ...
15230                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15231                     //      object&, class System.Func`1<!!0>)
15232                     //      ret
15233                     //
15234                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15235                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15236                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15237                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15238                     //
15239                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15240                     // native return type, which is what it will be set to eventually. We generate the
15241                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15242                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15243
15244                     bool restoreType = false;
15245                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15246                     {
15247                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15248                         op2->gtType = info.compRetNativeType;
15249                         restoreType = true;
15250                     }
15251
15252                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15253                                      (unsigned)CHECK_SPILL_ALL);
15254
15255                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15256
15257                     if (restoreType)
15258                     {
15259                         op2->gtType = TYP_STRUCT; // restore it to what it was
15260                     }
15261
15262                     op2 = tmpOp2;
15263
15264 #ifdef DEBUG
15265                     if (impInlineInfo->retExpr)
15266                     {
15267                         // Some other block(s) have seen the CEE_RET first.
15268                         // Better they spilled to the same temp.
15269                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15270                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15271                     }
15272 #endif
15273                 }
15274
15275 #ifdef DEBUG
15276                 if (verbose)
15277                 {
15278                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15279                     gtDispTree(op2);
15280                 }
15281 #endif
15282
15283                 // Report the return expression
15284                 impInlineInfo->retExpr = op2;
15285             }
15286             else
15287             {
15288                 // compRetNativeType is TYP_STRUCT.
15289                 // This implies that struct return via RetBuf arg or multi-reg struct return
15290
15291                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15292
15293                 // Assign the inlinee return into a spill temp.
15294                 // spill temp only exists if there are multiple return points
15295                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15296                 {
15297                     // in this case we have to insert multiple struct copies to the temp
15298                     // and the retexpr is just the temp.
15299                     assert(info.compRetNativeType != TYP_VOID);
15300                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15301
15302                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15303                                      (unsigned)CHECK_SPILL_ALL);
15304                 }
15305
15306 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15307 #if defined(_TARGET_ARM_)
15308                 // TODO-ARM64-NYI: HFA
15309                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15310                 // next ifdefs could be refactored in a single method with the ifdef inside.
15311                 if (IsHfa(retClsHnd))
15312                 {
15313 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15314 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15315                 ReturnTypeDesc retTypeDesc;
15316                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15317                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15318
15319                 if (retRegCount != 0)
15320                 {
15321                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15322                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15323                     // max allowed.)
15324                     assert(retRegCount == MAX_RET_REG_COUNT);
15325                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15326                     CLANG_FORMAT_COMMENT_ANCHOR;
15327 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15328
15329                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15330                     {
15331                         if (!impInlineInfo->retExpr)
15332                         {
15333 #if defined(_TARGET_ARM_)
15334                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15335 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15336                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15337                             impInlineInfo->retExpr =
15338                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15339 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15340                         }
15341                     }
15342                     else
15343                     {
15344                         impInlineInfo->retExpr = op2;
15345                     }
15346                 }
15347                 else
15348 #elif defined(_TARGET_ARM64_)
15349                 ReturnTypeDesc retTypeDesc;
15350                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15351                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15352
15353                 if (retRegCount != 0)
15354                 {
15355                     assert(!iciCall->HasRetBufArg());
15356                     assert(retRegCount >= 2);
15357                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15358                     {
15359                         if (!impInlineInfo->retExpr)
15360                         {
15361                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15362                             impInlineInfo->retExpr =
15363                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15364                         }
15365                     }
15366                     else
15367                     {
15368                         impInlineInfo->retExpr = op2;
15369                     }
15370                 }
15371                 else
15372 #endif // defined(_TARGET_ARM64_)
15373                 {
15374                     assert(iciCall->HasRetBufArg());
15375                     GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
15376                     // spill temp only exists if there are multiple return points
15377                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15378                     {
15379                         // if this is the first return we have seen set the retExpr
15380                         if (!impInlineInfo->retExpr)
15381                         {
15382                             impInlineInfo->retExpr =
15383                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15384                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15385                         }
15386                     }
15387                     else
15388                     {
15389                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15390                     }
15391                 }
15392             }
15393         }
15394     }
15395
15396     if (compIsForInlining())
15397     {
15398         return true;
15399     }
15400
15401     if (info.compRetType == TYP_VOID)
15402     {
15403         // return void
15404         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15405     }
15406     else if (info.compRetBuffArg != BAD_VAR_NUM)
15407     {
15408         // Assign value to return buff (first param)
15409         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15410
15411         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15412         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15413
15414         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15415         CLANG_FORMAT_COMMENT_ANCHOR;
15416
15417 #if defined(_TARGET_AMD64_)
15418
15419         // x64 (System V and Win64) calling convention requires to
15420         // return the implicit return buffer explicitly (in RAX).
15421         // Change the return type to be BYREF.
15422         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15423 #else  // !defined(_TARGET_AMD64_)
15424         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15425         // In such case the return value of the function is changed to BYREF.
15426         // If profiler hook is not needed the return type of the function is TYP_VOID.
15427         if (compIsProfilerHookNeeded())
15428         {
15429             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15430         }
15431         else
15432         {
15433             // return void
15434             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15435         }
15436 #endif // !defined(_TARGET_AMD64_)
15437     }
15438     else if (varTypeIsStruct(info.compRetType))
15439     {
15440 #if !FEATURE_MULTIREG_RET
15441         // For both ARM architectures the HFA native types are maintained as structs.
15442         // Also on System V AMD64 the multireg structs returns are also left as structs.
15443         noway_assert(info.compRetNativeType != TYP_STRUCT);
15444 #endif
15445         op2 = impFixupStructReturnType(op2, retClsHnd);
15446         // return op2
15447         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15448     }
15449     else
15450     {
15451         // return op2
15452         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15453     }
15454
15455     // We must have imported a tailcall and jumped to RET
15456     if (prefixFlags & PREFIX_TAILCALL)
15457     {
15458 #ifndef _TARGET_AMD64_
15459         // Jit64 compat:
15460         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15461         //      tail.call
15462         //      pop
15463         //      ret
15464         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15465 #endif
15466
15467         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15468
15469         // impImportCall() would have already appended TYP_VOID calls
15470         if (info.compRetType == TYP_VOID)
15471         {
15472             return true;
15473         }
15474     }
15475
15476     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15477 #ifdef DEBUG
15478     // Remember at which BC offset the tree was finished
15479     impNoteLastILoffs();
15480 #endif
15481     return true;
15482 }
15483
15484 /*****************************************************************************
15485  *  Mark the block as unimported.
15486  *  Note that the caller is responsible for calling impImportBlockPending(),
15487  *  with the appropriate stack-state
15488  */
15489
15490 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15491 {
15492 #ifdef DEBUG
15493     if (verbose && (block->bbFlags & BBF_IMPORTED))
15494     {
15495         printf("\nBB%02u will be reimported\n", block->bbNum);
15496     }
15497 #endif
15498
15499     block->bbFlags &= ~BBF_IMPORTED;
15500 }
15501
15502 /*****************************************************************************
15503  *  Mark the successors of the given block as unimported.
15504  *  Note that the caller is responsible for calling impImportBlockPending()
15505  *  for all the successors, with the appropriate stack-state.
15506  */
15507
15508 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15509 {
15510     const unsigned numSuccs = block->NumSucc();
15511     for (unsigned i = 0; i < numSuccs; i++)
15512     {
15513         impReimportMarkBlock(block->GetSucc(i));
15514     }
15515 }
15516
15517 /*****************************************************************************
15518  *
15519  *  Filter wrapper to handle only passed in exception code
15520  *  from it).
15521  */
15522
15523 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15524 {
15525     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15526     {
15527         return EXCEPTION_EXECUTE_HANDLER;
15528     }
15529
15530     return EXCEPTION_CONTINUE_SEARCH;
15531 }
15532
15533 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15534 {
15535     assert(block->hasTryIndex());
15536     assert(!compIsForInlining());
15537
15538     unsigned  tryIndex = block->getTryIndex();
15539     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15540
15541     if (isTryStart)
15542     {
15543         assert(block->bbFlags & BBF_TRY_BEG);
15544
15545         // The Stack must be empty
15546         //
15547         if (block->bbStkDepth != 0)
15548         {
15549             BADCODE("Evaluation stack must be empty on entry into a try block");
15550         }
15551     }
15552
15553     // Save the stack contents, we'll need to restore it later
15554     //
15555     SavedStack blockState;
15556     impSaveStackState(&blockState, false);
15557
15558     while (HBtab != nullptr)
15559     {
15560         if (isTryStart)
15561         {
15562             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15563             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15564             //
15565             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15566             {
15567                 // We  trigger an invalid program exception here unless we have a try/fault region.
15568                 //
15569                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15570                 {
15571                     BADCODE(
15572                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15573                 }
15574                 else
15575                 {
15576                     // Allow a try/fault region to proceed.
15577                     assert(HBtab->HasFaultHandler());
15578                 }
15579             }
15580
15581             /* Recursively process the handler block */
15582             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15583
15584             //  Construct the proper verification stack state
15585             //   either empty or one that contains just
15586             //   the Exception Object that we are dealing with
15587             //
15588             verCurrentState.esStackDepth = 0;
15589
15590             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15591             {
15592                 CORINFO_CLASS_HANDLE clsHnd;
15593
15594                 if (HBtab->HasFilter())
15595                 {
15596                     clsHnd = impGetObjectClass();
15597                 }
15598                 else
15599                 {
15600                     CORINFO_RESOLVED_TOKEN resolvedToken;
15601
15602                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15603                     resolvedToken.tokenScope   = info.compScopeHnd;
15604                     resolvedToken.token        = HBtab->ebdTyp;
15605                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15606                     info.compCompHnd->resolveToken(&resolvedToken);
15607
15608                     clsHnd = resolvedToken.hClass;
15609                 }
15610
15611                 // push catch arg the stack, spill to a temp if necessary
15612                 // Note: can update HBtab->ebdHndBeg!
15613                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15614             }
15615
15616             // Queue up the handler for importing
15617             //
15618             impImportBlockPending(hndBegBB);
15619
15620             if (HBtab->HasFilter())
15621             {
15622                 /* @VERIFICATION : Ideally the end of filter state should get
15623                    propagated to the catch handler, this is an incompleteness,
15624                    but is not a security/compliance issue, since the only
15625                    interesting state is the 'thisInit' state.
15626                    */
15627
15628                 verCurrentState.esStackDepth = 0;
15629
15630                 BasicBlock* filterBB = HBtab->ebdFilter;
15631
15632                 // push catch arg the stack, spill to a temp if necessary
15633                 // Note: can update HBtab->ebdFilter!
15634                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15635
15636                 impImportBlockPending(filterBB);
15637             }
15638         }
15639         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15640         {
15641             /* Recursively process the handler block */
15642
15643             verCurrentState.esStackDepth = 0;
15644
15645             // Queue up the fault handler for importing
15646             //
15647             impImportBlockPending(HBtab->ebdHndBeg);
15648         }
15649
15650         // Now process our enclosing try index (if any)
15651         //
15652         tryIndex = HBtab->ebdEnclosingTryIndex;
15653         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15654         {
15655             HBtab = nullptr;
15656         }
15657         else
15658         {
15659             HBtab = ehGetDsc(tryIndex);
15660         }
15661     }
15662
15663     // Restore the stack contents
15664     impRestoreStackState(&blockState);
15665 }
15666
15667 //***************************************************************
15668 // Import the instructions for the given basic block.  Perform
15669 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15670 // time, or whose verification pre-state is changed.
15671
15672 #ifdef _PREFAST_
15673 #pragma warning(push)
15674 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15675 #endif
15676 void Compiler::impImportBlock(BasicBlock* block)
15677 {
15678     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15679     // handle them specially. In particular, there is no IL to import for them, but we do need
15680     // to mark them as imported and put their successors on the pending import list.
15681     if (block->bbFlags & BBF_INTERNAL)
15682     {
15683         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15684         block->bbFlags |= BBF_IMPORTED;
15685
15686         const unsigned numSuccs = block->NumSucc();
15687         for (unsigned i = 0; i < numSuccs; i++)
15688         {
15689             impImportBlockPending(block->GetSucc(i));
15690         }
15691
15692         return;
15693     }
15694
15695     bool markImport;
15696
15697     assert(block);
15698
15699     /* Make the block globaly available */
15700
15701     compCurBB = block;
15702
15703 #ifdef DEBUG
15704     /* Initialize the debug variables */
15705     impCurOpcName = "unknown";
15706     impCurOpcOffs = block->bbCodeOffs;
15707 #endif
15708
15709     /* Set the current stack state to the merged result */
15710     verResetCurrentState(block, &verCurrentState);
15711
15712     /* Now walk the code and import the IL into GenTrees */
15713
15714     struct FilterVerificationExceptionsParam
15715     {
15716         Compiler*   pThis;
15717         BasicBlock* block;
15718     };
15719     FilterVerificationExceptionsParam param;
15720
15721     param.pThis = this;
15722     param.block = block;
15723
15724     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15725     {
15726         /* @VERIFICATION : For now, the only state propagation from try
15727            to it's handler is "thisInit" state (stack is empty at start of try).
15728            In general, for state that we track in verification, we need to
15729            model the possibility that an exception might happen at any IL
15730            instruction, so we really need to merge all states that obtain
15731            between IL instructions in a try block into the start states of
15732            all handlers.
15733
15734            However we do not allow the 'this' pointer to be uninitialized when
15735            entering most kinds try regions (only try/fault are allowed to have
15736            an uninitialized this pointer on entry to the try)
15737
15738            Fortunately, the stack is thrown away when an exception
15739            leads to a handler, so we don't have to worry about that.
15740            We DO, however, have to worry about the "thisInit" state.
15741            But only for the try/fault case.
15742
15743            The only allowed transition is from TIS_Uninit to TIS_Init.
15744
15745            So for a try/fault region for the fault handler block
15746            we will merge the start state of the try begin
15747            and the post-state of each block that is part of this try region
15748         */
15749
15750         // merge the start state of the try begin
15751         //
15752         if (pParam->block->bbFlags & BBF_TRY_BEG)
15753         {
15754             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15755         }
15756
15757         pParam->pThis->impImportBlockCode(pParam->block);
15758
15759         // As discussed above:
15760         // merge the post-state of each block that is part of this try region
15761         //
15762         if (pParam->block->hasTryIndex())
15763         {
15764             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15765         }
15766     }
15767     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15768     {
15769         verHandleVerificationFailure(block DEBUGARG(false));
15770     }
15771     PAL_ENDTRY
15772
15773     if (compDonotInline())
15774     {
15775         return;
15776     }
15777
15778     assert(!compDonotInline());
15779
15780     markImport = false;
15781
15782 SPILLSTACK:
15783
15784     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15785     bool        reimportSpillClique = false;
15786     BasicBlock* tgtBlock            = nullptr;
15787
15788     /* If the stack is non-empty, we might have to spill its contents */
15789
15790     if (verCurrentState.esStackDepth != 0)
15791     {
15792         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15793                                   // on the stack, its lifetime is hard to determine, simply
15794                                   // don't reuse such temps.
15795
15796         GenTreePtr addStmt = nullptr;
15797
15798         /* Do the successors of 'block' have any other predecessors ?
15799            We do not want to do some of the optimizations related to multiRef
15800            if we can reimport blocks */
15801
15802         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15803
15804         switch (block->bbJumpKind)
15805         {
15806             case BBJ_COND:
15807
15808                 /* Temporarily remove the 'jtrue' from the end of the tree list */
15809
15810                 assert(impTreeLast);
15811                 assert(impTreeLast->gtOper == GT_STMT);
15812                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15813
15814                 addStmt     = impTreeLast;
15815                 impTreeLast = impTreeLast->gtPrev;
15816
15817                 /* Note if the next block has more than one ancestor */
15818
15819                 multRef |= block->bbNext->bbRefs;
15820
15821                 /* Does the next block have temps assigned? */
15822
15823                 baseTmp  = block->bbNext->bbStkTempsIn;
15824                 tgtBlock = block->bbNext;
15825
15826                 if (baseTmp != NO_BASE_TMP)
15827                 {
15828                     break;
15829                 }
15830
15831                 /* Try the target of the jump then */
15832
15833                 multRef |= block->bbJumpDest->bbRefs;
15834                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15835                 tgtBlock = block->bbJumpDest;
15836                 break;
15837
15838             case BBJ_ALWAYS:
15839                 multRef |= block->bbJumpDest->bbRefs;
15840                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
15841                 tgtBlock = block->bbJumpDest;
15842                 break;
15843
15844             case BBJ_NONE:
15845                 multRef |= block->bbNext->bbRefs;
15846                 baseTmp  = block->bbNext->bbStkTempsIn;
15847                 tgtBlock = block->bbNext;
15848                 break;
15849
15850             case BBJ_SWITCH:
15851
15852                 BasicBlock** jmpTab;
15853                 unsigned     jmpCnt;
15854
15855                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15856
15857                 assert(impTreeLast);
15858                 assert(impTreeLast->gtOper == GT_STMT);
15859                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15860
15861                 addStmt     = impTreeLast;
15862                 impTreeLast = impTreeLast->gtPrev;
15863
15864                 jmpCnt = block->bbJumpSwt->bbsCount;
15865                 jmpTab = block->bbJumpSwt->bbsDstTab;
15866
15867                 do
15868                 {
15869                     tgtBlock = (*jmpTab);
15870
15871                     multRef |= tgtBlock->bbRefs;
15872
15873                     // Thanks to spill cliques, we should have assigned all or none
15874                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15875                     baseTmp = tgtBlock->bbStkTempsIn;
15876                     if (multRef > 1)
15877                     {
15878                         break;
15879                     }
15880                 } while (++jmpTab, --jmpCnt);
15881
15882                 break;
15883
15884             case BBJ_CALLFINALLY:
15885             case BBJ_EHCATCHRET:
15886             case BBJ_RETURN:
15887             case BBJ_EHFINALLYRET:
15888             case BBJ_EHFILTERRET:
15889             case BBJ_THROW:
15890                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15891                 break;
15892
15893             default:
15894                 noway_assert(!"Unexpected bbJumpKind");
15895                 break;
15896         }
15897
15898         assert(multRef >= 1);
15899
15900         /* Do we have a base temp number? */
15901
15902         bool newTemps = (baseTmp == NO_BASE_TMP);
15903
15904         if (newTemps)
15905         {
15906             /* Grab enough temps for the whole stack */
15907             baseTmp = impGetSpillTmpBase(block);
15908         }
15909
15910         /* Spill all stack entries into temps */
15911         unsigned level, tempNum;
15912
15913         JITDUMP("\nSpilling stack entries into temps\n");
15914         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15915         {
15916             GenTreePtr tree = verCurrentState.esStack[level].val;
15917
15918             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15919                the other. This should merge to a byref in unverifiable code.
15920                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15921                successor would be imported assuming there was a TYP_I_IMPL on
15922                the stack. Thus the value would not get GC-tracked. Hence,
15923                change the temp to TYP_BYREF and reimport the successors.
15924                Note: We should only allow this in unverifiable code.
15925             */
15926             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15927             {
15928                 lvaTable[tempNum].lvType = TYP_BYREF;
15929                 impReimportMarkSuccessors(block);
15930                 markImport = true;
15931             }
15932
15933 #ifdef _TARGET_64BIT_
15934             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15935             {
15936                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15937                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15938                 {
15939                     // Merge the current state into the entry state of block;
15940                     // the call to verMergeEntryStates must have changed
15941                     // the entry state of the block by merging the int local var
15942                     // and the native-int stack entry.
15943                     bool changed = false;
15944                     if (verMergeEntryStates(tgtBlock, &changed))
15945                     {
15946                         impRetypeEntryStateTemps(tgtBlock);
15947                         impReimportBlockPending(tgtBlock);
15948                         assert(changed);
15949                     }
15950                     else
15951                     {
15952                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15953                         break;
15954                     }
15955                 }
15956
15957                 // Some other block in the spill clique set this to "int", but now we have "native int".
15958                 // Change the type and go back to re-import any blocks that used the wrong type.
15959                 lvaTable[tempNum].lvType = TYP_I_IMPL;
15960                 reimportSpillClique      = true;
15961             }
15962             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15963             {
15964                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15965                 // Insert a sign-extension to "native int" so we match the clique.
15966                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15967             }
15968
15969             // Consider the case where one branch left a 'byref' on the stack and the other leaves
15970             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15971             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15972             // behavior instead of asserting and then generating bad code (where we save/restore the
15973             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15974             // imported already, we need to change the type of the local and reimport the spill clique.
15975             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15976             // the 'byref' size.
15977             if (!tiVerificationNeeded)
15978             {
15979                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15980                 {
15981                     // Some other block in the spill clique set this to "int", but now we have "byref".
15982                     // Change the type and go back to re-import any blocks that used the wrong type.
15983                     lvaTable[tempNum].lvType = TYP_BYREF;
15984                     reimportSpillClique      = true;
15985                 }
15986                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15987                 {
15988                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
15989                     // Insert a sign-extension to "native int" so we match the clique size.
15990                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15991                 }
15992             }
15993 #endif // _TARGET_64BIT_
15994
15995 #if FEATURE_X87_DOUBLES
15996             // X87 stack doesn't differentiate between float/double
15997             // so promoting is no big deal.
15998             // For everybody else keep it as float until we have a collision and then promote
15999             // Just like for x64's TYP_INT<->TYP_I_IMPL
16000
16001             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16002             {
16003                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16004             }
16005
16006 #else // !FEATURE_X87_DOUBLES
16007
16008             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16009             {
16010                 // Some other block in the spill clique set this to "float", but now we have "double".
16011                 // Change the type and go back to re-import any blocks that used the wrong type.
16012                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16013                 reimportSpillClique      = true;
16014             }
16015             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16016             {
16017                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16018                 // Insert a cast to "double" so we match the clique.
16019                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16020             }
16021
16022 #endif // FEATURE_X87_DOUBLES
16023
16024             /* If addStmt has a reference to tempNum (can only happen if we
16025                are spilling to the temps already used by a previous block),
16026                we need to spill addStmt */
16027
16028             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16029             {
16030                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
16031
16032                 if (addTree->gtOper == GT_JTRUE)
16033                 {
16034                     GenTreePtr relOp = addTree->gtOp.gtOp1;
16035                     assert(relOp->OperIsCompare());
16036
16037                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16038
16039                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16040                     {
16041                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16042                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16043                         type              = genActualType(lvaTable[temp].TypeGet());
16044                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16045                     }
16046
16047                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16048                     {
16049                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16050                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16051                         type              = genActualType(lvaTable[temp].TypeGet());
16052                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16053                     }
16054                 }
16055                 else
16056                 {
16057                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16058
16059                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16060                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16061                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16062                 }
16063             }
16064
16065             /* Spill the stack entry, and replace with the temp */
16066
16067             if (!impSpillStackEntry(level, tempNum
16068 #ifdef DEBUG
16069                                     ,
16070                                     true, "Spill Stack Entry"
16071 #endif
16072                                     ))
16073             {
16074                 if (markImport)
16075                 {
16076                     BADCODE("bad stack state");
16077                 }
16078
16079                 // Oops. Something went wrong when spilling. Bad code.
16080                 verHandleVerificationFailure(block DEBUGARG(true));
16081
16082                 goto SPILLSTACK;
16083             }
16084         }
16085
16086         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16087
16088         if (addStmt)
16089         {
16090             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16091         }
16092     }
16093
16094     // Some of the append/spill logic works on compCurBB
16095
16096     assert(compCurBB == block);
16097
16098     /* Save the tree list in the block */
16099     impEndTreeList(block);
16100
16101     // impEndTreeList sets BBF_IMPORTED on the block
16102     // We do *NOT* want to set it later than this because
16103     // impReimportSpillClique might clear it if this block is both a
16104     // predecessor and successor in the current spill clique
16105     assert(block->bbFlags & BBF_IMPORTED);
16106
16107     // If we had a int/native int, or float/double collision, we need to re-import
16108     if (reimportSpillClique)
16109     {
16110         // This will re-import all the successors of block (as well as each of their predecessors)
16111         impReimportSpillClique(block);
16112
16113         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16114         const unsigned numSuccs = block->NumSucc();
16115         for (unsigned i = 0; i < numSuccs; i++)
16116         {
16117             BasicBlock* succ = block->GetSucc(i);
16118             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16119             {
16120                 impImportBlockPending(succ);
16121             }
16122         }
16123     }
16124     else // the normal case
16125     {
16126         // otherwise just import the successors of block
16127
16128         /* Does this block jump to any other blocks? */
16129         const unsigned numSuccs = block->NumSucc();
16130         for (unsigned i = 0; i < numSuccs; i++)
16131         {
16132             impImportBlockPending(block->GetSucc(i));
16133         }
16134     }
16135 }
16136 #ifdef _PREFAST_
16137 #pragma warning(pop)
16138 #endif
16139
16140 /*****************************************************************************/
16141 //
16142 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16143 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16144 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16145 // (its "pre-state").
16146
16147 void Compiler::impImportBlockPending(BasicBlock* block)
16148 {
16149 #ifdef DEBUG
16150     if (verbose)
16151     {
16152         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16153     }
16154 #endif
16155
16156     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16157     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16158     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16159
16160     // If the block has not been imported, add to pending set.
16161     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16162
16163     // Initialize bbEntryState just the first time we try to add this block to the pending list
16164     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16165     // We use NULL to indicate the 'common' state to avoid memory allocation
16166     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16167         (impGetPendingBlockMember(block) == 0))
16168     {
16169         verInitBBEntryState(block, &verCurrentState);
16170         assert(block->bbStkDepth == 0);
16171         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16172         assert(addToPending);
16173         assert(impGetPendingBlockMember(block) == 0);
16174     }
16175     else
16176     {
16177         // The stack should have the same height on entry to the block from all its predecessors.
16178         if (block->bbStkDepth != verCurrentState.esStackDepth)
16179         {
16180 #ifdef DEBUG
16181             char buffer[400];
16182             sprintf_s(buffer, sizeof(buffer),
16183                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16184                       "Previous depth was %d, current depth is %d",
16185                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16186                       verCurrentState.esStackDepth);
16187             buffer[400 - 1] = 0;
16188             NO_WAY(buffer);
16189 #else
16190             NO_WAY("Block entered with different stack depths");
16191 #endif
16192         }
16193
16194         // Additionally, if we need to verify, merge the verification state.
16195         if (tiVerificationNeeded)
16196         {
16197             // Merge the current state into the entry state of block; if this does not change the entry state
16198             // by merging, do not add the block to the pending-list.
16199             bool changed = false;
16200             if (!verMergeEntryStates(block, &changed))
16201             {
16202                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16203                 addToPending = true; // We will pop it off, and check the flag set above.
16204             }
16205             else if (changed)
16206             {
16207                 addToPending = true;
16208
16209                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16210             }
16211         }
16212
16213         if (!addToPending)
16214         {
16215             return;
16216         }
16217
16218         if (block->bbStkDepth > 0)
16219         {
16220             // We need to fix the types of any spill temps that might have changed:
16221             //   int->native int, float->double, int->byref, etc.
16222             impRetypeEntryStateTemps(block);
16223         }
16224
16225         // OK, we must add to the pending list, if it's not already in it.
16226         if (impGetPendingBlockMember(block) != 0)
16227         {
16228             return;
16229         }
16230     }
16231
16232     // Get an entry to add to the pending list
16233
16234     PendingDsc* dsc;
16235
16236     if (impPendingFree)
16237     {
16238         // We can reuse one of the freed up dscs.
16239         dsc            = impPendingFree;
16240         impPendingFree = dsc->pdNext;
16241     }
16242     else
16243     {
16244         // We have to create a new dsc
16245         dsc = new (this, CMK_Unknown) PendingDsc;
16246     }
16247
16248     dsc->pdBB                 = block;
16249     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16250     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16251
16252     // Save the stack trees for later
16253
16254     if (verCurrentState.esStackDepth)
16255     {
16256         impSaveStackState(&dsc->pdSavedStack, false);
16257     }
16258
16259     // Add the entry to the pending list
16260
16261     dsc->pdNext    = impPendingList;
16262     impPendingList = dsc;
16263     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16264
16265     // Various assertions require us to now to consider the block as not imported (at least for
16266     // the final time...)
16267     block->bbFlags &= ~BBF_IMPORTED;
16268
16269 #ifdef DEBUG
16270     if (verbose && 0)
16271     {
16272         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16273     }
16274 #endif
16275 }
16276
16277 /*****************************************************************************/
16278 //
16279 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16280 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16281 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16282
16283 void Compiler::impReimportBlockPending(BasicBlock* block)
16284 {
16285     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16286
16287     assert(block->bbFlags & BBF_IMPORTED);
16288
16289     // OK, we must add to the pending list, if it's not already in it.
16290     if (impGetPendingBlockMember(block) != 0)
16291     {
16292         return;
16293     }
16294
16295     // Get an entry to add to the pending list
16296
16297     PendingDsc* dsc;
16298
16299     if (impPendingFree)
16300     {
16301         // We can reuse one of the freed up dscs.
16302         dsc            = impPendingFree;
16303         impPendingFree = dsc->pdNext;
16304     }
16305     else
16306     {
16307         // We have to create a new dsc
16308         dsc = new (this, CMK_ImpStack) PendingDsc;
16309     }
16310
16311     dsc->pdBB = block;
16312
16313     if (block->bbEntryState)
16314     {
16315         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16316         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16317         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16318     }
16319     else
16320     {
16321         dsc->pdThisPtrInit        = TIS_Bottom;
16322         dsc->pdSavedStack.ssDepth = 0;
16323         dsc->pdSavedStack.ssTrees = nullptr;
16324     }
16325
16326     // Add the entry to the pending list
16327
16328     dsc->pdNext    = impPendingList;
16329     impPendingList = dsc;
16330     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16331
16332     // Various assertions require us to now to consider the block as not imported (at least for
16333     // the final time...)
16334     block->bbFlags &= ~BBF_IMPORTED;
16335
16336 #ifdef DEBUG
16337     if (verbose && 0)
16338     {
16339         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16340     }
16341 #endif
16342 }
16343
16344 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16345 {
16346     if (comp->impBlockListNodeFreeList == nullptr)
16347     {
16348         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16349     }
16350     else
16351     {
16352         BlockListNode* res             = comp->impBlockListNodeFreeList;
16353         comp->impBlockListNodeFreeList = res->m_next;
16354         return res;
16355     }
16356 }
16357
16358 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16359 {
16360     node->m_next             = impBlockListNodeFreeList;
16361     impBlockListNodeFreeList = node;
16362 }
16363
16364 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16365 {
16366     bool toDo = true;
16367
16368     noway_assert(!fgComputePredsDone);
16369     if (!fgCheapPredsValid)
16370     {
16371         fgComputeCheapPreds();
16372     }
16373
16374     BlockListNode* succCliqueToDo = nullptr;
16375     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16376     while (toDo)
16377     {
16378         toDo = false;
16379         // Look at the successors of every member of the predecessor to-do list.
16380         while (predCliqueToDo != nullptr)
16381         {
16382             BlockListNode* node = predCliqueToDo;
16383             predCliqueToDo      = node->m_next;
16384             BasicBlock* blk     = node->m_blk;
16385             FreeBlockListNode(node);
16386
16387             const unsigned numSuccs = blk->NumSucc();
16388             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
16389             {
16390                 BasicBlock* succ = blk->GetSucc(succNum);
16391                 // If it's not already in the clique, add it, and also add it
16392                 // as a member of the successor "toDo" set.
16393                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16394                 {
16395                     callback->Visit(SpillCliqueSucc, succ);
16396                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16397                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16398                     toDo           = true;
16399                 }
16400             }
16401         }
16402         // Look at the predecessors of every member of the successor to-do list.
16403         while (succCliqueToDo != nullptr)
16404         {
16405             BlockListNode* node = succCliqueToDo;
16406             succCliqueToDo      = node->m_next;
16407             BasicBlock* blk     = node->m_blk;
16408             FreeBlockListNode(node);
16409
16410             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16411             {
16412                 BasicBlock* predBlock = pred->block;
16413                 // If it's not already in the clique, add it, and also add it
16414                 // as a member of the predecessor "toDo" set.
16415                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16416                 {
16417                     callback->Visit(SpillCliquePred, predBlock);
16418                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16419                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16420                     toDo           = true;
16421                 }
16422             }
16423         }
16424     }
16425
16426     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16427     // miss walking back to include the predecessor we started from.
16428     // This most likely cause: missing or out of date bbPreds
16429     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16430 }
16431
16432 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16433 {
16434     if (predOrSucc == SpillCliqueSucc)
16435     {
16436         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16437         blk->bbStkTempsIn = m_baseTmp;
16438     }
16439     else
16440     {
16441         assert(predOrSucc == SpillCliquePred);
16442         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16443         blk->bbStkTempsOut = m_baseTmp;
16444     }
16445 }
16446
16447 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16448 {
16449     // For Preds we could be a little smarter and just find the existing store
16450     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16451     // just re-import the whole block (just like we do for successors)
16452
16453     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16454     {
16455         // If we haven't imported this block and we're not going to (because it isn't on
16456         // the pending list) then just ignore it for now.
16457
16458         // This block has either never been imported (EntryState == NULL) or it failed
16459         // verification. Neither state requires us to force it to be imported now.
16460         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16461         return;
16462     }
16463
16464     // For successors we have a valid verCurrentState, so just mark them for reimport
16465     // the 'normal' way
16466     // Unlike predecessors, we *DO* need to reimport the current block because the
16467     // initial import had the wrong entry state types.
16468     // Similarly, blocks that are currently on the pending list, still need to call
16469     // impImportBlockPending to fixup their entry state.
16470     if (predOrSucc == SpillCliqueSucc)
16471     {
16472         m_pComp->impReimportMarkBlock(blk);
16473
16474         // Set the current stack state to that of the blk->bbEntryState
16475         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16476         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16477
16478         m_pComp->impImportBlockPending(blk);
16479     }
16480     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16481     {
16482         // As described above, we are only visiting predecessors so they can
16483         // add the appropriate casts, since we have already done that for the current
16484         // block, it does not need to be reimported.
16485         // Nor do we need to reimport blocks that are still pending, but not yet
16486         // imported.
16487         //
16488         // For predecessors, we have no state to seed the EntryState, so we just have
16489         // to assume the existing one is correct.
16490         // If the block is also a successor, it will get the EntryState properly
16491         // updated when it is visited as a successor in the above "if" block.
16492         assert(predOrSucc == SpillCliquePred);
16493         m_pComp->impReimportBlockPending(blk);
16494     }
16495 }
16496
16497 // Re-type the incoming lclVar nodes to match the varDsc.
16498 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16499 {
16500     if (blk->bbEntryState != nullptr)
16501     {
16502         EntryState* es = blk->bbEntryState;
16503         for (unsigned level = 0; level < es->esStackDepth; level++)
16504         {
16505             GenTreePtr tree = es->esStack[level].val;
16506             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16507             {
16508                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16509                 noway_assert(lclNum < lvaCount);
16510                 LclVarDsc* varDsc              = lvaTable + lclNum;
16511                 es->esStack[level].val->gtType = varDsc->TypeGet();
16512             }
16513         }
16514     }
16515 }
16516
16517 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16518 {
16519     if (block->bbStkTempsOut != NO_BASE_TMP)
16520     {
16521         return block->bbStkTempsOut;
16522     }
16523
16524 #ifdef DEBUG
16525     if (verbose)
16526     {
16527         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16528     }
16529 #endif // DEBUG
16530
16531     // Otherwise, choose one, and propagate to all members of the spill clique.
16532     // Grab enough temps for the whole stack.
16533     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16534     SetSpillTempsBase callback(baseTmp);
16535
16536     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16537     // to one spill clique, and similarly can only be the sucessor to one spill clique
16538     impWalkSpillCliqueFromPred(block, &callback);
16539
16540     return baseTmp;
16541 }
16542
16543 void Compiler::impReimportSpillClique(BasicBlock* block)
16544 {
16545 #ifdef DEBUG
16546     if (verbose)
16547     {
16548         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16549     }
16550 #endif // DEBUG
16551
16552     // If we get here, it is because this block is already part of a spill clique
16553     // and one predecessor had an outgoing live stack slot of type int, and this
16554     // block has an outgoing live stack slot of type native int.
16555     // We need to reset these before traversal because they have already been set
16556     // by the previous walk to determine all the members of the spill clique.
16557     impInlineRoot()->impSpillCliquePredMembers.Reset();
16558     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16559
16560     ReimportSpillClique callback(this);
16561
16562     impWalkSpillCliqueFromPred(block, &callback);
16563 }
16564
16565 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16566 // a copy of "srcState", cloning tree pointers as required.
16567 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16568 {
16569     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16570     {
16571         block->bbEntryState = nullptr;
16572         return;
16573     }
16574
16575     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16576
16577     // block->bbEntryState.esRefcount = 1;
16578
16579     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16580     block->bbEntryState->thisInitialized = TIS_Bottom;
16581
16582     if (srcState->esStackDepth > 0)
16583     {
16584         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16585         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16586
16587         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16588         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16589         {
16590             GenTreePtr tree                         = srcState->esStack[level].val;
16591             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16592         }
16593     }
16594
16595     if (verTrackObjCtorInitState)
16596     {
16597         verSetThisInit(block, srcState->thisInitialized);
16598     }
16599
16600     return;
16601 }
16602
16603 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16604 {
16605     assert(tis != TIS_Bottom); // Precondition.
16606     if (block->bbEntryState == nullptr)
16607     {
16608         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16609     }
16610
16611     block->bbEntryState->thisInitialized = tis;
16612 }
16613
16614 /*
16615  * Resets the current state to the state at the start of the basic block
16616  */
16617 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16618 {
16619
16620     if (block->bbEntryState == nullptr)
16621     {
16622         destState->esStackDepth    = 0;
16623         destState->thisInitialized = TIS_Bottom;
16624         return;
16625     }
16626
16627     destState->esStackDepth = block->bbEntryState->esStackDepth;
16628
16629     if (destState->esStackDepth > 0)
16630     {
16631         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16632
16633         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16634     }
16635
16636     destState->thisInitialized = block->bbThisOnEntry();
16637
16638     return;
16639 }
16640
16641 ThisInitState BasicBlock::bbThisOnEntry()
16642 {
16643     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16644 }
16645
16646 unsigned BasicBlock::bbStackDepthOnEntry()
16647 {
16648     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16649 }
16650
16651 void BasicBlock::bbSetStack(void* stackBuffer)
16652 {
16653     assert(bbEntryState);
16654     assert(stackBuffer);
16655     bbEntryState->esStack = (StackEntry*)stackBuffer;
16656 }
16657
16658 StackEntry* BasicBlock::bbStackOnEntry()
16659 {
16660     assert(bbEntryState);
16661     return bbEntryState->esStack;
16662 }
16663
16664 void Compiler::verInitCurrentState()
16665 {
16666     verTrackObjCtorInitState        = FALSE;
16667     verCurrentState.thisInitialized = TIS_Bottom;
16668
16669     if (tiVerificationNeeded)
16670     {
16671         // Track this ptr initialization
16672         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16673         {
16674             verTrackObjCtorInitState        = TRUE;
16675             verCurrentState.thisInitialized = TIS_Uninit;
16676         }
16677     }
16678
16679     // initialize stack info
16680
16681     verCurrentState.esStackDepth = 0;
16682     assert(verCurrentState.esStack != nullptr);
16683
16684     // copy current state to entry state of first BB
16685     verInitBBEntryState(fgFirstBB, &verCurrentState);
16686 }
16687
16688 Compiler* Compiler::impInlineRoot()
16689 {
16690     if (impInlineInfo == nullptr)
16691     {
16692         return this;
16693     }
16694     else
16695     {
16696         return impInlineInfo->InlineRoot;
16697     }
16698 }
16699
16700 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16701 {
16702     if (predOrSucc == SpillCliquePred)
16703     {
16704         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16705     }
16706     else
16707     {
16708         assert(predOrSucc == SpillCliqueSucc);
16709         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16710     }
16711 }
16712
16713 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16714 {
16715     if (predOrSucc == SpillCliquePred)
16716     {
16717         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16718     }
16719     else
16720     {
16721         assert(predOrSucc == SpillCliqueSucc);
16722         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16723     }
16724 }
16725
16726 /*****************************************************************************
16727  *
16728  *  Convert the instrs ("import") into our internal format (trees). The
16729  *  basic flowgraph has already been constructed and is passed in.
16730  */
16731
16732 void Compiler::impImport(BasicBlock* method)
16733 {
16734 #ifdef DEBUG
16735     if (verbose)
16736     {
16737         printf("*************** In impImport() for %s\n", info.compFullName);
16738     }
16739 #endif
16740
16741     /* Allocate the stack contents */
16742
16743     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16744     {
16745         /* Use local variable, don't waste time allocating on the heap */
16746
16747         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16748         verCurrentState.esStack = impSmallStack;
16749     }
16750     else
16751     {
16752         impStkSize              = info.compMaxStack;
16753         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16754     }
16755
16756     // initialize the entry state at start of method
16757     verInitCurrentState();
16758
16759     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16760     Compiler* inlineRoot = impInlineRoot();
16761     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16762     {
16763         // We have initialized these previously, but to size 0.  Make them larger.
16764         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16765         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16766         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16767     }
16768     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16769     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16770     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16771     impBlockListNodeFreeList = nullptr;
16772
16773 #ifdef DEBUG
16774     impLastILoffsStmt   = nullptr;
16775     impNestedStackSpill = false;
16776 #endif
16777     impBoxTemp = BAD_VAR_NUM;
16778
16779     impPendingList = impPendingFree = nullptr;
16780
16781     /* Add the entry-point to the worker-list */
16782
16783     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16784     // from EH normalization.
16785     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16786     // out.
16787     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16788     {
16789         // Treat these as imported.
16790         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16791         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16792         method->bbFlags |= BBF_IMPORTED;
16793     }
16794
16795     impImportBlockPending(method);
16796
16797     /* Import blocks in the worker-list until there are no more */
16798
16799     while (impPendingList)
16800     {
16801         /* Remove the entry at the front of the list */
16802
16803         PendingDsc* dsc = impPendingList;
16804         impPendingList  = impPendingList->pdNext;
16805         impSetPendingBlockMember(dsc->pdBB, 0);
16806
16807         /* Restore the stack state */
16808
16809         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16810         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
16811         if (verCurrentState.esStackDepth)
16812         {
16813             impRestoreStackState(&dsc->pdSavedStack);
16814         }
16815
16816         /* Add the entry to the free list for reuse */
16817
16818         dsc->pdNext    = impPendingFree;
16819         impPendingFree = dsc;
16820
16821         /* Now import the block */
16822
16823         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16824         {
16825
16826 #ifdef _TARGET_64BIT_
16827             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16828             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
16829             // method for further explanation on why we raise this exception instead of making the jitted
16830             // code throw the verification exception during execution.
16831             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16832             {
16833                 BADCODE("Basic block marked as not verifiable");
16834             }
16835             else
16836 #endif // _TARGET_64BIT_
16837             {
16838                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16839                 impEndTreeList(dsc->pdBB);
16840             }
16841         }
16842         else
16843         {
16844             impImportBlock(dsc->pdBB);
16845
16846             if (compDonotInline())
16847             {
16848                 return;
16849             }
16850             if (compIsForImportOnly() && !tiVerificationNeeded)
16851             {
16852                 return;
16853             }
16854         }
16855     }
16856
16857 #ifdef DEBUG
16858     if (verbose && info.compXcptnsCount)
16859     {
16860         printf("\nAfter impImport() added block for try,catch,finally");
16861         fgDispBasicBlocks();
16862         printf("\n");
16863     }
16864
16865     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16866     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16867     {
16868         block->bbFlags &= ~BBF_VISITED;
16869     }
16870 #endif
16871
16872     assert(!compIsForInlining() || !tiVerificationNeeded);
16873 }
16874
16875 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16876 // The invariant here is that if it's not a ref or a method and has a class handle
16877 // it's a valuetype
16878 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16879 {
16880     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16881     {
16882         return true;
16883     }
16884     else
16885     {
16886         return false;
16887     }
16888 }
16889
16890 /*****************************************************************************
16891  *  Check to see if the tree is the address of a local or
16892     the address of a field in a local.
16893
16894     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16895
16896  */
16897
16898 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16899 {
16900     if (tree->gtOper != GT_ADDR)
16901     {
16902         return FALSE;
16903     }
16904
16905     GenTreePtr op = tree->gtOp.gtOp1;
16906     while (op->gtOper == GT_FIELD)
16907     {
16908         op = op->gtField.gtFldObj;
16909         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16910         {
16911             op = op->gtOp.gtOp1;
16912         }
16913         else
16914         {
16915             return false;
16916         }
16917     }
16918
16919     if (op->gtOper == GT_LCL_VAR)
16920     {
16921         *lclVarTreeOut = op;
16922         return TRUE;
16923     }
16924     else
16925     {
16926         return FALSE;
16927     }
16928 }
16929
16930 //------------------------------------------------------------------------
16931 // impMakeDiscretionaryInlineObservations: make observations that help
16932 // determine the profitability of a discretionary inline
16933 //
16934 // Arguments:
16935 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16936 //    inlineResult -- InlineResult accumulating information about this inline
16937 //
16938 // Notes:
16939 //    If inlining or prejitting the root, this method also makes
16940 //    various observations about the method that factor into inline
16941 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
16942
16943 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16944 {
16945     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16946            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
16947            );
16948
16949     // If we're really inlining, we should just have one result in play.
16950     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16951
16952     // If this is a "forceinline" method, the JIT probably shouldn't have gone
16953     // to the trouble of estimating the native code size. Even if it did, it
16954     // shouldn't be relying on the result of this method.
16955     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16956
16957     // Note if the caller contains NEWOBJ or NEWARR.
16958     Compiler* rootCompiler = impInlineRoot();
16959
16960     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16961     {
16962         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16963     }
16964
16965     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16966     {
16967         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16968     }
16969
16970     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16971     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16972
16973     if (isSpecialMethod)
16974     {
16975         if (calleeIsStatic)
16976         {
16977             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16978         }
16979         else
16980         {
16981             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16982         }
16983     }
16984     else if (!calleeIsStatic)
16985     {
16986         // Callee is an instance method.
16987         //
16988         // Check if the callee has the same 'this' as the root.
16989         if (pInlineInfo != nullptr)
16990         {
16991             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16992             assert(thisArg);
16993             bool isSameThis = impIsThis(thisArg);
16994             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16995         }
16996     }
16997
16998     // Note if the callee's class is a promotable struct
16999     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17000     {
17001         lvaStructPromotionInfo structPromotionInfo;
17002         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17003         if (structPromotionInfo.canPromote)
17004         {
17005             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17006         }
17007     }
17008
17009 #ifdef FEATURE_SIMD
17010
17011     // Note if this method is has SIMD args or return value
17012     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17013     {
17014         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17015     }
17016
17017 #endif // FEATURE_SIMD
17018
17019     // Roughly classify callsite frequency.
17020     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17021
17022     // If this is a prejit root, or a maximally hot block...
17023     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17024     {
17025         frequency = InlineCallsiteFrequency::HOT;
17026     }
17027     // No training data.  Look for loop-like things.
17028     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17029     // However, give it to things nearby.
17030     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17031              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17032     {
17033         frequency = InlineCallsiteFrequency::LOOP;
17034     }
17035     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17036     {
17037         frequency = InlineCallsiteFrequency::WARM;
17038     }
17039     // Now modify the multiplier based on where we're called from.
17040     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17041     {
17042         frequency = InlineCallsiteFrequency::RARE;
17043     }
17044     else
17045     {
17046         frequency = InlineCallsiteFrequency::BORING;
17047     }
17048
17049     // Also capture the block weight of the call site.  In the prejit
17050     // root case, assume there's some hot call site for this method.
17051     unsigned weight = 0;
17052
17053     if (pInlineInfo != nullptr)
17054     {
17055         weight = pInlineInfo->iciBlock->bbWeight;
17056     }
17057     else
17058     {
17059         weight = BB_MAX_WEIGHT;
17060     }
17061
17062     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17063     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17064 }
17065
17066 /*****************************************************************************
17067  This method makes STATIC inlining decision based on the IL code.
17068  It should not make any inlining decision based on the context.
17069  If forceInline is true, then the inlining decision should not depend on
17070  performance heuristics (code size, etc.).
17071  */
17072
17073 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17074                               CORINFO_METHOD_INFO*  methInfo,
17075                               bool                  forceInline,
17076                               InlineResult*         inlineResult)
17077 {
17078     unsigned codeSize = methInfo->ILCodeSize;
17079
17080     // We shouldn't have made up our minds yet...
17081     assert(!inlineResult->IsDecided());
17082
17083     if (methInfo->EHcount)
17084     {
17085         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17086         return;
17087     }
17088
17089     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17090     {
17091         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17092         return;
17093     }
17094
17095     // For now we don't inline varargs (import code can't handle it)
17096
17097     if (methInfo->args.isVarArg())
17098     {
17099         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17100         return;
17101     }
17102
17103     // Reject if it has too many locals.
17104     // This is currently an implementation limit due to fixed-size arrays in the
17105     // inline info, rather than a performance heuristic.
17106
17107     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17108
17109     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17110     {
17111         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17112         return;
17113     }
17114
17115     // Make sure there aren't too many arguments.
17116     // This is currently an implementation limit due to fixed-size arrays in the
17117     // inline info, rather than a performance heuristic.
17118
17119     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17120
17121     if (methInfo->args.numArgs > MAX_INL_ARGS)
17122     {
17123         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17124         return;
17125     }
17126
17127     // Note force inline state
17128
17129     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17130
17131     // Note IL code size
17132
17133     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17134
17135     if (inlineResult->IsFailure())
17136     {
17137         return;
17138     }
17139
17140     // Make sure maxstack is not too big
17141
17142     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17143
17144     if (inlineResult->IsFailure())
17145     {
17146         return;
17147     }
17148 }
17149
17150 /*****************************************************************************
17151  */
17152
17153 void Compiler::impCheckCanInline(GenTreePtr             call,
17154                                  CORINFO_METHOD_HANDLE  fncHandle,
17155                                  unsigned               methAttr,
17156                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17157                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17158                                  InlineResult*          inlineResult)
17159 {
17160     // Either EE or JIT might throw exceptions below.
17161     // If that happens, just don't inline the method.
17162
17163     struct Param
17164     {
17165         Compiler*              pThis;
17166         GenTreePtr             call;
17167         CORINFO_METHOD_HANDLE  fncHandle;
17168         unsigned               methAttr;
17169         CORINFO_CONTEXT_HANDLE exactContextHnd;
17170         InlineResult*          result;
17171         InlineCandidateInfo**  ppInlineCandidateInfo;
17172     } param;
17173     memset(&param, 0, sizeof(param));
17174
17175     param.pThis                 = this;
17176     param.call                  = call;
17177     param.fncHandle             = fncHandle;
17178     param.methAttr              = methAttr;
17179     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17180     param.result                = inlineResult;
17181     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17182
17183     bool success = eeRunWithErrorTrap<Param>(
17184         [](Param* pParam) {
17185             DWORD                  dwRestrictions = 0;
17186             CorInfoInitClassResult initClassResult;
17187
17188 #ifdef DEBUG
17189             const char* methodName;
17190             const char* className;
17191             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17192
17193             if (JitConfig.JitNoInline())
17194             {
17195                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17196                 goto _exit;
17197             }
17198 #endif
17199
17200             /* Try to get the code address/size for the method */
17201
17202             CORINFO_METHOD_INFO methInfo;
17203             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17204             {
17205                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17206                 goto _exit;
17207             }
17208
17209             bool forceInline;
17210             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17211
17212             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17213
17214             if (pParam->result->IsFailure())
17215             {
17216                 assert(pParam->result->IsNever());
17217                 goto _exit;
17218             }
17219
17220             // Speculatively check if initClass() can be done.
17221             // If it can be done, we will try to inline the method. If inlining
17222             // succeeds, then we will do the non-speculative initClass() and commit it.
17223             // If this speculative call to initClass() fails, there is no point
17224             // trying to inline this method.
17225             initClassResult =
17226                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17227                                                            pParam->exactContextHnd /* context */,
17228                                                            TRUE /* speculative */);
17229
17230             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17231             {
17232                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17233                 goto _exit;
17234             }
17235
17236             // Given the EE the final say in whether to inline or not.
17237             // This should be last since for verifiable code, this can be expensive
17238
17239             /* VM Inline check also ensures that the method is verifiable if needed */
17240             CorInfoInline vmResult;
17241             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17242                                                                   &dwRestrictions);
17243
17244             if (vmResult == INLINE_FAIL)
17245             {
17246                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17247             }
17248             else if (vmResult == INLINE_NEVER)
17249             {
17250                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17251             }
17252
17253             if (pParam->result->IsFailure())
17254             {
17255                 // Make sure not to report this one.  It was already reported by the VM.
17256                 pParam->result->SetReported();
17257                 goto _exit;
17258             }
17259
17260             // check for unsupported inlining restrictions
17261             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17262
17263             if (dwRestrictions & INLINE_SAME_THIS)
17264             {
17265                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17266                 assert(thisArg);
17267
17268                 if (!pParam->pThis->impIsThis(thisArg))
17269                 {
17270                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17271                     goto _exit;
17272                 }
17273             }
17274
17275             /* Get the method properties */
17276
17277             CORINFO_CLASS_HANDLE clsHandle;
17278             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17279             unsigned clsAttr;
17280             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17281
17282             /* Get the return type */
17283
17284             var_types fncRetType;
17285             fncRetType = pParam->call->TypeGet();
17286
17287 #ifdef DEBUG
17288             var_types fncRealRetType;
17289             fncRealRetType = JITtype2varType(methInfo.args.retType);
17290
17291             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17292                    // <BUGNUM> VSW 288602 </BUGNUM>
17293                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17294                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17295                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17296 #endif
17297
17298             //
17299             // Allocate an InlineCandidateInfo structure
17300             //
17301             InlineCandidateInfo* pInfo;
17302             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17303
17304             pInfo->dwRestrictions  = dwRestrictions;
17305             pInfo->methInfo        = methInfo;
17306             pInfo->methAttr        = pParam->methAttr;
17307             pInfo->clsHandle       = clsHandle;
17308             pInfo->clsAttr         = clsAttr;
17309             pInfo->fncRetType      = fncRetType;
17310             pInfo->exactContextHnd = pParam->exactContextHnd;
17311             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17312             pInfo->initClassResult = initClassResult;
17313
17314             *(pParam->ppInlineCandidateInfo) = pInfo;
17315
17316         _exit:;
17317         },
17318         &param);
17319     if (!success)
17320     {
17321         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17322     }
17323 }
17324
17325 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17326                                       GenTreePtr    curArgVal,
17327                                       unsigned      argNum,
17328                                       InlineResult* inlineResult)
17329 {
17330     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17331
17332     if (curArgVal->gtOper == GT_MKREFANY)
17333     {
17334         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17335         return;
17336     }
17337
17338     inlCurArgInfo->argNode = curArgVal;
17339
17340     GenTreePtr lclVarTree;
17341     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17342     {
17343         inlCurArgInfo->argIsByRefToStructLocal = true;
17344 #ifdef FEATURE_SIMD
17345         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17346         {
17347             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17348         }
17349 #endif // FEATURE_SIMD
17350     }
17351
17352     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17353     {
17354         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17355         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17356     }
17357
17358     if (curArgVal->gtOper == GT_LCL_VAR)
17359     {
17360         inlCurArgInfo->argIsLclVar = true;
17361
17362         /* Remember the "original" argument number */
17363         curArgVal->gtLclVar.gtLclILoffs = argNum;
17364     }
17365
17366     if ((curArgVal->OperKind() & GTK_CONST) ||
17367         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17368     {
17369         inlCurArgInfo->argIsInvariant = true;
17370         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17371         {
17372             /* Abort, but do not mark as not inlinable */
17373             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17374             return;
17375         }
17376     }
17377
17378     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17379     {
17380         inlCurArgInfo->argHasLdargaOp = true;
17381     }
17382
17383 #ifdef DEBUG
17384     if (verbose)
17385     {
17386         if (inlCurArgInfo->argIsThis)
17387         {
17388             printf("thisArg:");
17389         }
17390         else
17391         {
17392             printf("\nArgument #%u:", argNum);
17393         }
17394         if (inlCurArgInfo->argIsLclVar)
17395         {
17396             printf(" is a local var");
17397         }
17398         if (inlCurArgInfo->argIsInvariant)
17399         {
17400             printf(" is a constant");
17401         }
17402         if (inlCurArgInfo->argHasGlobRef)
17403         {
17404             printf(" has global refs");
17405         }
17406         if (inlCurArgInfo->argHasSideEff)
17407         {
17408             printf(" has side effects");
17409         }
17410         if (inlCurArgInfo->argHasLdargaOp)
17411         {
17412             printf(" has ldarga effect");
17413         }
17414         if (inlCurArgInfo->argHasStargOp)
17415         {
17416             printf(" has starg effect");
17417         }
17418         if (inlCurArgInfo->argIsByRefToStructLocal)
17419         {
17420             printf(" is byref to a struct local");
17421         }
17422
17423         printf("\n");
17424         gtDispTree(curArgVal);
17425         printf("\n");
17426     }
17427 #endif
17428 }
17429
17430 /*****************************************************************************
17431  *
17432  */
17433
17434 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17435 {
17436     assert(!compIsForInlining());
17437
17438     GenTreePtr           call         = pInlineInfo->iciCall;
17439     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17440     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17441     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17442     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17443     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17444
17445     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17446
17447     /* init the argument stuct */
17448
17449     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17450
17451     /* Get hold of the 'this' pointer and the argument list proper */
17452
17453     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17454     GenTreePtr argList = call->gtCall.gtCallArgs;
17455     unsigned   argCnt  = 0; // Count of the arguments
17456
17457     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17458
17459     if (thisArg)
17460     {
17461         inlArgInfo[0].argIsThis = true;
17462
17463         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17464
17465         if (inlineResult->IsFailure())
17466         {
17467             return;
17468         }
17469
17470         /* Increment the argument count */
17471         argCnt++;
17472     }
17473
17474     /* Record some information about each of the arguments */
17475     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17476
17477 #if USER_ARGS_COME_LAST
17478     unsigned typeCtxtArg = thisArg ? 1 : 0;
17479 #else  // USER_ARGS_COME_LAST
17480     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17481 #endif // USER_ARGS_COME_LAST
17482
17483     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17484     {
17485         if (argTmp == argList && hasRetBuffArg)
17486         {
17487             continue;
17488         }
17489
17490         // Ignore the type context argument
17491         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17492         {
17493             pInlineInfo->typeContextArg = typeCtxtArg;
17494             typeCtxtArg                 = 0xFFFFFFFF;
17495             continue;
17496         }
17497
17498         assert(argTmp->gtOper == GT_LIST);
17499         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17500
17501         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17502
17503         if (inlineResult->IsFailure())
17504         {
17505             return;
17506         }
17507
17508         /* Increment the argument count */
17509         argCnt++;
17510     }
17511
17512     /* Make sure we got the arg number right */
17513     assert(argCnt == methInfo->args.totalILArgs());
17514
17515 #ifdef FEATURE_SIMD
17516     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17517 #endif // FEATURE_SIMD
17518
17519     /* We have typeless opcodes, get type information from the signature */
17520
17521     if (thisArg)
17522     {
17523         var_types sigType;
17524
17525         if (clsAttr & CORINFO_FLG_VALUECLASS)
17526         {
17527             sigType = TYP_BYREF;
17528         }
17529         else
17530         {
17531             sigType = TYP_REF;
17532         }
17533
17534         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17535         lclVarInfo[0].lclHasLdlocaOp = false;
17536
17537 #ifdef FEATURE_SIMD
17538         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17539         // the inlining multiplier) for anything in that assembly.
17540         // But we only need to normalize it if it is a TYP_STRUCT
17541         // (which we need to do even if we have already set foundSIMDType).
17542         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17543         {
17544             if (sigType == TYP_STRUCT)
17545             {
17546                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17547             }
17548             foundSIMDType = true;
17549         }
17550 #endif // FEATURE_SIMD
17551         lclVarInfo[0].lclTypeInfo = sigType;
17552
17553         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17554                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17555                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17556
17557         if (genActualType(thisArg->gtType) != genActualType(sigType))
17558         {
17559             if (sigType == TYP_REF)
17560             {
17561                 /* The argument cannot be bashed into a ref (see bug 750871) */
17562                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17563                 return;
17564             }
17565
17566             /* This can only happen with byrefs <-> ints/shorts */
17567
17568             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17569             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17570
17571             if (sigType == TYP_BYREF)
17572             {
17573                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17574             }
17575             else if (thisArg->gtType == TYP_BYREF)
17576             {
17577                 assert(sigType == TYP_I_IMPL);
17578
17579                 /* If possible change the BYREF to an int */
17580                 if (thisArg->IsVarAddr())
17581                 {
17582                     thisArg->gtType              = TYP_I_IMPL;
17583                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17584                 }
17585                 else
17586                 {
17587                     /* Arguments 'int <- byref' cannot be bashed */
17588                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17589                     return;
17590                 }
17591             }
17592         }
17593     }
17594
17595     /* Init the types of the arguments and make sure the types
17596      * from the trees match the types in the signature */
17597
17598     CORINFO_ARG_LIST_HANDLE argLst;
17599     argLst = methInfo->args.args;
17600
17601     unsigned i;
17602     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17603     {
17604         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17605
17606         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17607
17608 #ifdef FEATURE_SIMD
17609         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17610         {
17611             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17612             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17613             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17614             foundSIMDType = true;
17615             if (sigType == TYP_STRUCT)
17616             {
17617                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17618                 sigType              = structType;
17619             }
17620         }
17621 #endif // FEATURE_SIMD
17622
17623         lclVarInfo[i].lclTypeInfo    = sigType;
17624         lclVarInfo[i].lclHasLdlocaOp = false;
17625
17626         /* Does the tree type match the signature type? */
17627
17628         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17629
17630         if (sigType != inlArgNode->gtType)
17631         {
17632             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17633                but in bad IL cases with caller-callee signature mismatches we can see other types.
17634                Intentionally reject cases with mismatches so the jit is more flexible when
17635                encountering bad IL. */
17636
17637             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17638                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17639                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17640
17641             if (!isPlausibleTypeMatch)
17642             {
17643                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17644                 return;
17645             }
17646
17647             /* Is it a narrowing or widening cast?
17648              * Widening casts are ok since the value computed is already
17649              * normalized to an int (on the IL stack) */
17650
17651             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17652             {
17653                 if (sigType == TYP_BYREF)
17654                 {
17655                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17656                 }
17657                 else if (inlArgNode->gtType == TYP_BYREF)
17658                 {
17659                     assert(varTypeIsIntOrI(sigType));
17660
17661                     /* If possible bash the BYREF to an int */
17662                     if (inlArgNode->IsVarAddr())
17663                     {
17664                         inlArgNode->gtType           = TYP_I_IMPL;
17665                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17666                     }
17667                     else
17668                     {
17669                         /* Arguments 'int <- byref' cannot be changed */
17670                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17671                         return;
17672                     }
17673                 }
17674                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17675                 {
17676                     /* Narrowing cast */
17677
17678                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17679                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17680                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17681                     {
17682                         /* We don't need to insert a cast here as the variable
17683                            was assigned a normalized value of the right type */
17684
17685                         continue;
17686                     }
17687
17688                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17689
17690                     inlArgInfo[i].argIsLclVar = false;
17691
17692                     /* Try to fold the node in case we have constant arguments */
17693
17694                     if (inlArgInfo[i].argIsInvariant)
17695                     {
17696                         inlArgNode            = gtFoldExprConst(inlArgNode);
17697                         inlArgInfo[i].argNode = inlArgNode;
17698                         assert(inlArgNode->OperIsConst());
17699                     }
17700                 }
17701 #ifdef _TARGET_64BIT_
17702                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17703                 {
17704                     // This should only happen for int -> native int widening
17705                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17706
17707                     inlArgInfo[i].argIsLclVar = false;
17708
17709                     /* Try to fold the node in case we have constant arguments */
17710
17711                     if (inlArgInfo[i].argIsInvariant)
17712                     {
17713                         inlArgNode            = gtFoldExprConst(inlArgNode);
17714                         inlArgInfo[i].argNode = inlArgNode;
17715                         assert(inlArgNode->OperIsConst());
17716                     }
17717                 }
17718 #endif // _TARGET_64BIT_
17719             }
17720         }
17721     }
17722
17723     /* Init the types of the local variables */
17724
17725     CORINFO_ARG_LIST_HANDLE localsSig;
17726     localsSig = methInfo->locals.args;
17727
17728     for (i = 0; i < methInfo->locals.numArgs; i++)
17729     {
17730         bool      isPinned;
17731         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17732
17733         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17734         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17735         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17736
17737         if (varTypeIsGC(type))
17738         {
17739             pInlineInfo->numberOfGcRefLocals++;
17740         }
17741
17742         if (isPinned)
17743         {
17744             // Pinned locals may cause inlines to fail.
17745             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17746             if (inlineResult->IsFailure())
17747             {
17748                 return;
17749             }
17750         }
17751
17752         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17753
17754         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17755         // out on the inline.
17756         if (type == TYP_STRUCT)
17757         {
17758             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17759             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17760             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17761             {
17762                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17763                 if (inlineResult->IsFailure())
17764                 {
17765                     return;
17766                 }
17767
17768                 // Do further notification in the case where the call site is rare; some policies do
17769                 // not track the relative hotness of call sites for "always" inline cases.
17770                 if (pInlineInfo->iciBlock->isRunRarely())
17771                 {
17772                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17773                     if (inlineResult->IsFailure())
17774                     {
17775
17776                         return;
17777                     }
17778                 }
17779             }
17780         }
17781
17782         localsSig = info.compCompHnd->getArgNext(localsSig);
17783
17784 #ifdef FEATURE_SIMD
17785         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17786         {
17787             foundSIMDType = true;
17788             if (featureSIMD && type == TYP_STRUCT)
17789             {
17790                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17791                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17792             }
17793         }
17794 #endif // FEATURE_SIMD
17795     }
17796
17797 #ifdef FEATURE_SIMD
17798     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17799     {
17800         foundSIMDType = true;
17801     }
17802     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17803 #endif // FEATURE_SIMD
17804 }
17805
17806 //------------------------------------------------------------------------
17807 // impInlineFetchLocal: get a local var that represents an inlinee local
17808 //
17809 // Arguments:
17810 //    lclNum -- number of the inlinee local
17811 //    reason -- debug string describing purpose of the local var
17812 //
17813 // Returns:
17814 //    Number of the local to use
17815 //
17816 // Notes:
17817 //    This method is invoked only for locals actually used in the
17818 //    inlinee body.
17819 //
17820 //    Allocates a new temp if necessary, and copies key properties
17821 //    over from the inlinee local var info.
17822
17823 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17824 {
17825     assert(compIsForInlining());
17826
17827     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17828
17829     if (tmpNum == BAD_VAR_NUM)
17830     {
17831         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
17832         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
17833
17834         // The lifetime of this local might span multiple BBs.
17835         // So it is a long lifetime local.
17836         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17837
17838         // Copy over key info
17839         lvaTable[tmpNum].lvType                 = lclTyp;
17840         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
17841         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
17842         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
17843         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
17844
17845         // Copy over class handle for ref types. Note this may be a
17846         // shared type -- someday perhaps we can get the exact
17847         // signature and pass in a more precise type.
17848         if (lclTyp == TYP_REF)
17849         {
17850             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
17851         }
17852
17853         if (inlineeLocal.lclVerTypeInfo.IsStruct())
17854         {
17855             if (varTypeIsStruct(lclTyp))
17856             {
17857                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
17858             }
17859             else
17860             {
17861                 // This is a wrapped primitive.  Make sure the verstate knows that
17862                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
17863             }
17864         }
17865
17866 #ifdef DEBUG
17867         // Sanity check that we're properly prepared for gc ref locals.
17868         if (varTypeIsGC(lclTyp))
17869         {
17870             // Since there are gc locals we should have seen them earlier
17871             // and if there was a return value, set up the spill temp.
17872             assert(impInlineInfo->HasGcRefLocals());
17873             assert((info.compRetNativeType == TYP_VOID) || (lvaInlineeReturnSpillTemp != BAD_VAR_NUM));
17874         }
17875         else
17876         {
17877             // Make sure all pinned locals count as gc refs.
17878             assert(!inlineeLocal.lclIsPinned);
17879         }
17880 #endif // DEBUG
17881     }
17882
17883     return tmpNum;
17884 }
17885
17886 //------------------------------------------------------------------------
17887 // impInlineFetchArg: return tree node for argument value in an inlinee
17888 //
17889 // Arguments:
17890 //    lclNum -- argument number in inlinee IL
17891 //    inlArgInfo -- argument info for inlinee
17892 //    lclVarInfo -- var info for inlinee
17893 //
17894 // Returns:
17895 //    Tree for the argument's value. Often an inlinee-scoped temp
17896 //    GT_LCL_VAR but can be other tree kinds, if the argument
17897 //    expression from the caller can be directly substituted into the
17898 //    inlinee body.
17899 //
17900 // Notes:
17901 //    Must be used only for arguments -- use impInlineFetchLocal for
17902 //    inlinee locals.
17903 //
17904 //    Direct substitution is performed when the formal argument cannot
17905 //    change value in the inlinee body (no starg or ldarga), and the
17906 //    actual argument expression's value cannot be changed if it is
17907 //    substituted it into the inlinee body.
17908 //
17909 //    Even if an inlinee-scoped temp is returned here, it may later be
17910 //    "bashed" to a caller-supplied tree when arguments are actually
17911 //    passed (see fgInlinePrependStatements). Bashing can happen if
17912 //    the argument ends up being single use and other conditions are
17913 //    met. So the contents of the tree returned here may not end up
17914 //    being the ones ultimately used for the argument.
17915 //
17916 //    This method will side effect inlArgInfo. It should only be called
17917 //    for actual uses of the argument in the inlinee.
17918
17919 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17920 {
17921     // Cache the relevant arg and lcl info for this argument.
17922     // We will modify argInfo but not lclVarInfo.
17923     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
17924     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
17925     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
17926     const var_types      lclTyp           = lclInfo.lclTypeInfo;
17927     GenTreePtr           op1              = nullptr;
17928
17929     if (argInfo.argIsInvariant && !argCanBeModified)
17930     {
17931         // Directly substitute constants or addresses of locals
17932         //
17933         // Clone the constant. Note that we cannot directly use
17934         // argNode in the trees even if !argInfo.argIsUsed as this
17935         // would introduce aliasing between inlArgInfo[].argNode and
17936         // impInlineExpr. Then gtFoldExpr() could change it, causing
17937         // further references to the argument working off of the
17938         // bashed copy.
17939         op1 = gtCloneExpr(argInfo.argNode);
17940         PREFIX_ASSUME(op1 != nullptr);
17941         argInfo.argTmpNum = BAD_VAR_NUM;
17942     }
17943     else if (argInfo.argIsLclVar && !argCanBeModified)
17944     {
17945         // Directly substitute caller locals
17946         //
17947         // Use the caller-supplied node if this is the first use.
17948         op1               = argInfo.argNode;
17949         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
17950
17951         // Use an equivalent copy if this is the second or subsequent use.
17952         if (argInfo.argIsUsed)
17953         {
17954             assert(op1->gtOper == GT_LCL_VAR);
17955             assert(lclNum == op1->gtLclVar.gtLclILoffs);
17956
17957             var_types newTyp = lclTyp;
17958
17959             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17960             {
17961                 newTyp = genActualType(lclTyp);
17962             }
17963
17964             // Create a new lcl var node - remember the argument lclNum
17965             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
17966         }
17967     }
17968     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
17969     {
17970         /* Argument is a by-ref address to a struct, a normed struct, or its field.
17971            In these cases, don't spill the byref to a local, simply clone the tree and use it.
17972            This way we will increase the chance for this byref to be optimized away by
17973            a subsequent "dereference" operation.
17974
17975            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17976            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17977            For example, if the caller is:
17978                 ldloca.s   V_1  // V_1 is a local struct
17979                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
17980            and the callee being inlined has:
17981                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17982                     ldarga.s   ptrToInts
17983                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17984            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17985            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17986         */
17987         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
17988         op1 = gtCloneExpr(argInfo.argNode);
17989     }
17990     else
17991     {
17992         /* Argument is a complex expression - it must be evaluated into a temp */
17993
17994         if (argInfo.argHasTmp)
17995         {
17996             assert(argInfo.argIsUsed);
17997             assert(argInfo.argTmpNum < lvaCount);
17998
17999             /* Create a new lcl var node - remember the argument lclNum */
18000             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18001
18002             /* This is the second or later use of the this argument,
18003             so we have to use the temp (instead of the actual arg) */
18004             argInfo.argBashTmpNode = nullptr;
18005         }
18006         else
18007         {
18008             /* First time use */
18009             assert(!argInfo.argIsUsed);
18010
18011             /* Reserve a temp for the expression.
18012             * Use a large size node as we may change it later */
18013
18014             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18015
18016             lvaTable[tmpNum].lvType = lclTyp;
18017
18018             // Copy over class handle for ref types. Note this may be
18019             // further improved if it is a shared type and we know the exact context.
18020             if (lclTyp == TYP_REF)
18021             {
18022                 lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18023             }
18024
18025             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18026             if (argInfo.argHasLdargaOp)
18027             {
18028                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18029             }
18030
18031             if (lclInfo.lclVerTypeInfo.IsStruct())
18032             {
18033                 if (varTypeIsStruct(lclTyp))
18034                 {
18035                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18036                 }
18037                 else
18038                 {
18039                     // This is a wrapped primitive.  Make sure the verstate knows that
18040                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18041                 }
18042             }
18043
18044             argInfo.argHasTmp = true;
18045             argInfo.argTmpNum = tmpNum;
18046
18047             // If we require strict exception order, then arguments must
18048             // be evaluated in sequence before the body of the inlined method.
18049             // So we need to evaluate them to a temp.
18050             // Also, if arguments have global references, we need to
18051             // evaluate them to a temp before the inlined body as the
18052             // inlined body may be modifying the global ref.
18053             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18054             // if it is a struct, because it requires some additional handling.
18055
18056             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef)
18057             {
18058                 /* Get a *LARGE* LCL_VAR node */
18059                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18060
18061                 /* Record op1 as the very first use of this argument.
18062                 If there are no further uses of the arg, we may be
18063                 able to use the actual arg node instead of the temp.
18064                 If we do see any further uses, we will clear this. */
18065                 argInfo.argBashTmpNode = op1;
18066             }
18067             else
18068             {
18069                 /* Get a small LCL_VAR node */
18070                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18071                 /* No bashing of this argument */
18072                 argInfo.argBashTmpNode = nullptr;
18073             }
18074         }
18075     }
18076
18077     // Mark this argument as used.
18078     argInfo.argIsUsed = true;
18079
18080     return op1;
18081 }
18082
18083 /******************************************************************************
18084  Is this the original "this" argument to the call being inlined?
18085
18086  Note that we do not inline methods with "starg 0", and so we do not need to
18087  worry about it.
18088 */
18089
18090 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
18091 {
18092     assert(compIsForInlining());
18093     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18094 }
18095
18096 //-----------------------------------------------------------------------------
18097 // This function checks if a dereference in the inlinee can guarantee that
18098 // the "this" is non-NULL.
18099 // If we haven't hit a branch or a side effect, and we are dereferencing
18100 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18101 // then we can avoid a separate null pointer check.
18102 //
18103 // "additionalTreesToBeEvaluatedBefore"
18104 // is the set of pending trees that have not yet been added to the statement list,
18105 // and which have been removed from verCurrentState.esStack[]
18106
18107 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
18108                                                                   GenTreePtr  variableBeingDereferenced,
18109                                                                   InlArgInfo* inlArgInfo)
18110 {
18111     assert(compIsForInlining());
18112     assert(opts.OptEnabled(CLFLG_INLINING));
18113
18114     BasicBlock* block = compCurBB;
18115
18116     GenTreePtr stmt;
18117     GenTreePtr expr;
18118
18119     if (block != fgFirstBB)
18120     {
18121         return FALSE;
18122     }
18123
18124     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18125     {
18126         return FALSE;
18127     }
18128
18129     if (additionalTreesToBeEvaluatedBefore &&
18130         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18131     {
18132         return FALSE;
18133     }
18134
18135     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18136     {
18137         expr = stmt->gtStmt.gtStmtExpr;
18138
18139         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18140         {
18141             return FALSE;
18142         }
18143     }
18144
18145     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18146     {
18147         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18148         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18149         {
18150             return FALSE;
18151         }
18152     }
18153
18154     return TRUE;
18155 }
18156
18157 //------------------------------------------------------------------------
18158 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18159 //
18160 // Arguments:
18161 //    callNode -- call under scrutiny
18162 //    exactContextHnd -- context handle for inlining
18163 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18164 //    callInfo -- call info from VM
18165 //
18166 // Notes:
18167 //    If callNode is an inline candidate, this method sets the flag
18168 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18169 //    filled in the associated InlineCandidateInfo.
18170 //
18171 //    If callNode is not an inline candidate, and the reason is
18172 //    something that is inherent to the method being called, the
18173 //    method may be marked as "noinline" to short-circuit any
18174 //    future assessments of calls to this method.
18175
18176 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
18177                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18178                                       bool                   exactContextNeedsRuntimeLookup,
18179                                       CORINFO_CALL_INFO*     callInfo)
18180 {
18181     // Let the strategy know there's another call
18182     impInlineRoot()->m_inlineStrategy->NoteCall();
18183
18184     if (!opts.OptEnabled(CLFLG_INLINING))
18185     {
18186         /* XXX Mon 8/18/2008
18187          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18188          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18189          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18190          * figure out why we did not set MAXOPT for this compile.
18191          */
18192         assert(!compIsForInlining());
18193         return;
18194     }
18195
18196     if (compIsForImportOnly())
18197     {
18198         // Don't bother creating the inline candidate during verification.
18199         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18200         // that leads to the creation of multiple instances of Compiler.
18201         return;
18202     }
18203
18204     GenTreeCall* call = callNode->AsCall();
18205     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18206
18207     // Don't inline if not optimizing root method
18208     if (opts.compDbgCode)
18209     {
18210         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18211         return;
18212     }
18213
18214     // Don't inline if inlining into root method is disabled.
18215     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18216     {
18217         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18218         return;
18219     }
18220
18221     // Inlining candidate determination needs to honor only IL tail prefix.
18222     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18223     if (call->IsTailPrefixedCall())
18224     {
18225         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18226         return;
18227     }
18228
18229     // Tail recursion elimination takes precedence over inlining.
18230     // TODO: We may want to do some of the additional checks from fgMorphCall
18231     // here to reduce the chance we don't inline a call that won't be optimized
18232     // as a fast tail call or turned into a loop.
18233     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18234     {
18235         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18236         return;
18237     }
18238
18239     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18240     {
18241         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18242         return;
18243     }
18244
18245     /* Ignore helper calls */
18246
18247     if (call->gtCallType == CT_HELPER)
18248     {
18249         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18250         return;
18251     }
18252
18253     /* Ignore indirect calls */
18254     if (call->gtCallType == CT_INDIRECT)
18255     {
18256         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18257         return;
18258     }
18259
18260     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18261      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18262      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18263
18264     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18265     unsigned              methAttr;
18266
18267     // Reuse method flags from the original callInfo if possible
18268     if (fncHandle == callInfo->hMethod)
18269     {
18270         methAttr = callInfo->methodFlags;
18271     }
18272     else
18273     {
18274         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18275     }
18276
18277 #ifdef DEBUG
18278     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18279     {
18280         methAttr |= CORINFO_FLG_FORCEINLINE;
18281     }
18282 #endif
18283
18284     // Check for COMPlus_AggressiveInlining
18285     if (compDoAggressiveInlining)
18286     {
18287         methAttr |= CORINFO_FLG_FORCEINLINE;
18288     }
18289
18290     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18291     {
18292         /* Don't bother inline blocks that are in the filter region */
18293         if (bbInCatchHandlerILRange(compCurBB))
18294         {
18295 #ifdef DEBUG
18296             if (verbose)
18297             {
18298                 printf("\nWill not inline blocks that are in the catch handler region\n");
18299             }
18300
18301 #endif
18302
18303             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18304             return;
18305         }
18306
18307         if (bbInFilterILRange(compCurBB))
18308         {
18309 #ifdef DEBUG
18310             if (verbose)
18311             {
18312                 printf("\nWill not inline blocks that are in the filter region\n");
18313             }
18314 #endif
18315
18316             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18317             return;
18318         }
18319     }
18320
18321     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18322
18323     if (opts.compNeedSecurityCheck)
18324     {
18325         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18326         return;
18327     }
18328
18329     /* Check if we tried to inline this method before */
18330
18331     if (methAttr & CORINFO_FLG_DONT_INLINE)
18332     {
18333         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18334         return;
18335     }
18336
18337     /* Cannot inline synchronized methods */
18338
18339     if (methAttr & CORINFO_FLG_SYNCH)
18340     {
18341         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18342         return;
18343     }
18344
18345     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18346
18347     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18348     {
18349         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18350         return;
18351     }
18352
18353     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18354     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18355
18356     if (inlineResult.IsFailure())
18357     {
18358         return;
18359     }
18360
18361     // The old value should be NULL
18362     assert(call->gtInlineCandidateInfo == nullptr);
18363
18364     // The new value should not be NULL.
18365     assert(inlineCandidateInfo != nullptr);
18366     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
18367
18368     call->gtInlineCandidateInfo = inlineCandidateInfo;
18369
18370     // Mark the call node as inline candidate.
18371     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18372
18373     // Let the strategy know there's another candidate.
18374     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18375
18376     // Since we're not actually inlining yet, and this call site is
18377     // still just an inline candidate, there's nothing to report.
18378     inlineResult.SetReported();
18379 }
18380
18381 /******************************************************************************/
18382 // Returns true if the given intrinsic will be implemented by target-specific
18383 // instructions
18384
18385 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18386 {
18387 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18388     switch (intrinsicId)
18389     {
18390         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18391         //
18392         // TODO: Because the x86 backend only targets SSE for floating-point code,
18393         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18394         //       implemented those intrinsics as x87 instructions). If this poses
18395         //       a CQ problem, it may be necessary to change the implementation of
18396         //       the helper calls to decrease call overhead or switch back to the
18397         //       x87 instructions. This is tracked by #7097.
18398         case CORINFO_INTRINSIC_Sqrt:
18399         case CORINFO_INTRINSIC_Abs:
18400             return true;
18401
18402         default:
18403             return false;
18404     }
18405 #elif defined(_TARGET_ARM64_)
18406     switch (intrinsicId)
18407     {
18408         case CORINFO_INTRINSIC_Sqrt:
18409         case CORINFO_INTRINSIC_Abs:
18410         case CORINFO_INTRINSIC_Round:
18411             return true;
18412
18413         default:
18414             return false;
18415     }
18416 #elif defined(_TARGET_ARM_)
18417     switch (intrinsicId)
18418     {
18419         case CORINFO_INTRINSIC_Sqrt:
18420         case CORINFO_INTRINSIC_Abs:
18421         case CORINFO_INTRINSIC_Round:
18422             return true;
18423
18424         default:
18425             return false;
18426     }
18427 #elif defined(_TARGET_X86_)
18428     switch (intrinsicId)
18429     {
18430         case CORINFO_INTRINSIC_Sin:
18431         case CORINFO_INTRINSIC_Cos:
18432         case CORINFO_INTRINSIC_Sqrt:
18433         case CORINFO_INTRINSIC_Abs:
18434         case CORINFO_INTRINSIC_Round:
18435             return true;
18436
18437         default:
18438             return false;
18439     }
18440 #else
18441     // TODO: This portion of logic is not implemented for other arch.
18442     // The reason for returning true is that on all other arch the only intrinsic
18443     // enabled are target intrinsics.
18444     return true;
18445 #endif //_TARGET_AMD64_
18446 }
18447
18448 /******************************************************************************/
18449 // Returns true if the given intrinsic will be implemented by calling System.Math
18450 // methods.
18451
18452 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18453 {
18454     // Currently, if an math intrisic is not implemented by target-specific
18455     // intructions, it will be implemented by a System.Math call. In the
18456     // future, if we turn to implementing some of them with helper callers,
18457     // this predicate needs to be revisited.
18458     return !IsTargetIntrinsic(intrinsicId);
18459 }
18460
18461 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18462 {
18463     switch (intrinsicId)
18464     {
18465         case CORINFO_INTRINSIC_Sin:
18466         case CORINFO_INTRINSIC_Sqrt:
18467         case CORINFO_INTRINSIC_Abs:
18468         case CORINFO_INTRINSIC_Cos:
18469         case CORINFO_INTRINSIC_Round:
18470         case CORINFO_INTRINSIC_Cosh:
18471         case CORINFO_INTRINSIC_Sinh:
18472         case CORINFO_INTRINSIC_Tan:
18473         case CORINFO_INTRINSIC_Tanh:
18474         case CORINFO_INTRINSIC_Asin:
18475         case CORINFO_INTRINSIC_Acos:
18476         case CORINFO_INTRINSIC_Atan:
18477         case CORINFO_INTRINSIC_Atan2:
18478         case CORINFO_INTRINSIC_Log10:
18479         case CORINFO_INTRINSIC_Pow:
18480         case CORINFO_INTRINSIC_Exp:
18481         case CORINFO_INTRINSIC_Ceiling:
18482         case CORINFO_INTRINSIC_Floor:
18483             return true;
18484         default:
18485             return false;
18486     }
18487 }
18488
18489 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18490 {
18491     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18492 }
18493
18494 //------------------------------------------------------------------------
18495 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
18496 //   normal call
18497 //
18498 // Arguments:
18499 //     call -- the call node to examine/modify
18500 //     thisObj  -- the value of 'this' for the call
18501 //     callInfo -- [IN/OUT] info about the call from the VM
18502 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
18503 //
18504 // Notes:
18505 //     Virtual calls in IL will always "invoke" the base class method.
18506 //
18507 //     This transformation looks for evidence that the type of 'this'
18508 //     in the call is exactly known, is a final class or would invoke
18509 //     a final method, and if that and other safety checks pan out,
18510 //     modifies the call and the call info to create a direct call.
18511 //
18512 //     This transformation is initially done in the importer and not
18513 //     in some subsequent optimization pass because we want it to be
18514 //     upstream of inline candidate identification.
18515 //
18516 //     However, later phases may supply improved type information that
18517 //     can enable further devirtualization. We currently reinvoke this
18518 //     code after inlining, if the return value of the inlined call is
18519 //     the 'this obj' of a subsequent virtual call.
18520 //
18521 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
18522                                    GenTreePtr              thisObj,
18523                                    CORINFO_CALL_INFO*      callInfo,
18524                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
18525 {
18526     // This should be a virtual vtable or virtual stub call.
18527     assert(call->IsVirtual());
18528
18529     // Bail if not optimizing
18530     if (opts.MinOpts())
18531     {
18532         return;
18533     }
18534
18535     // Bail if debuggable codegen
18536     if (opts.compDbgCode)
18537     {
18538         return;
18539     }
18540
18541 #if defined(DEBUG)
18542     // Bail if devirt is disabled.
18543     if (JitConfig.JitEnableDevirtualization() == 0)
18544     {
18545         return;
18546     }
18547
18548     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
18549 #endif // DEBUG
18550
18551     // Fetch information about the virtual method we're calling.
18552     CORINFO_METHOD_HANDLE baseMethod        = callInfo->hMethod;
18553     unsigned              baseMethodAttribs = callInfo->methodFlags;
18554
18555     if (baseMethodAttribs == 0)
18556     {
18557         // For late devirt we may not have method attributes, so fetch them.
18558         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18559     }
18560     else
18561     {
18562 #if defined(DEBUG)
18563         // Validate that callInfo has up to date method flags
18564         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18565         assert(freshBaseMethodAttribs == baseMethodAttribs);
18566 #endif // DEBUG
18567     }
18568
18569     // In R2R mode, we might see virtual stub calls to
18570     // non-virtuals. For instance cases where the non-virtual method
18571     // is in a different assembly but is called via CALLVIRT. For
18572     // verison resilience we must allow for the fact that the method
18573     // might become virtual in some update.
18574     //
18575     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
18576     // regular call+nullcheck upstream, so we won't reach this
18577     // point.
18578     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
18579     {
18580         assert(call->IsVirtualStub());
18581         assert(opts.IsReadyToRun());
18582         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
18583         return;
18584     }
18585
18586     // See what we know about the type of 'this' in the call.
18587     bool                 isExact      = false;
18588     bool                 objIsNonNull = false;
18589     CORINFO_CLASS_HANDLE objClass     = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
18590
18591     // Bail if we know nothing.
18592     if (objClass == nullptr)
18593     {
18594         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
18595         return;
18596     }
18597
18598     // Fetch information about the class that introduced the virtual method.
18599     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
18600     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
18601
18602 #if !defined(FEATURE_CORECLR)
18603     // If base class is not beforefieldinit then devirtualizing may
18604     // cause us to miss a base class init trigger. Spec says we don't
18605     // need a trigger for ref class callvirts but desktop seems to
18606     // have one anyways. So defer.
18607     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
18608     {
18609         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
18610         return;
18611     }
18612 #endif // FEATURE_CORECLR
18613
18614     // Is the call an interface call?
18615     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
18616
18617     // If the objClass is sealed (final), then we may be able to devirtualize.
18618     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
18619     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
18620
18621 #if defined(DEBUG)
18622     const char* callKind       = isInterface ? "interface" : "virtual";
18623     const char* objClassNote   = "[?]";
18624     const char* objClassName   = "?objClass";
18625     const char* baseClassName  = "?baseClass";
18626     const char* baseMethodName = "?baseMethod";
18627
18628     if (verbose || doPrint)
18629     {
18630         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
18631         objClassName   = info.compCompHnd->getClassName(objClass);
18632         baseClassName  = info.compCompHnd->getClassName(baseClass);
18633         baseMethodName = eeGetMethodName(baseMethod, nullptr);
18634
18635         if (verbose)
18636         {
18637             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
18638                    "    class for 'this' is %s%s (attrib %08x)\n"
18639                    "    base method is %s::%s\n",
18640                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
18641         }
18642     }
18643 #endif // defined(DEBUG)
18644
18645     // Bail if obj class is an interface.
18646     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
18647     //   IL_021d:  ldloc.0
18648     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
18649     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
18650     {
18651         JITDUMP("--- obj class is interface, sorry\n");
18652         return;
18653     }
18654
18655     if (isInterface)
18656     {
18657         assert(call->IsVirtualStub());
18658         JITDUMP("--- base class is interface\n");
18659     }
18660
18661     // Fetch the method that would be called based on the declared type of 'this'
18662     CORINFO_CONTEXT_HANDLE ownerType     = callInfo->contextHandle;
18663     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
18664
18665     // If we failed to get a handle, we can't devirtualize.  This can
18666     // happen when prejitting, if the devirtualization crosses
18667     // servicing bubble boundaries.
18668     if (derivedMethod == nullptr)
18669     {
18670         JITDUMP("--- no derived method, sorry\n");
18671         return;
18672     }
18673
18674     // Fetch method attributes to see if method is marked final.
18675     const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
18676     const bool  derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
18677
18678 #if defined(DEBUG)
18679     const char* derivedClassName  = "?derivedClass";
18680     const char* derivedMethodName = "?derivedMethod";
18681
18682     const char* note = "speculative";
18683     if (isExact)
18684     {
18685         note = "exact";
18686     }
18687     else if (objClassIsFinal)
18688     {
18689         note = "final class";
18690     }
18691     else if (derivedMethodIsFinal)
18692     {
18693         note = "final method";
18694     }
18695
18696     if (verbose || doPrint)
18697     {
18698         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
18699         if (verbose)
18700         {
18701             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
18702             gtDispTree(call);
18703         }
18704     }
18705 #endif // defined(DEBUG)
18706
18707     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
18708     {
18709         // Type is not exact, and neither class or method is final.
18710         //
18711         // We could speculatively devirtualize, but there's no
18712         // reason to believe the derived method is the one that
18713         // is likely to be invoked.
18714         //
18715         // If there's currently no further overriding (that is, at
18716         // the time of jitting, objClass has no subclasses that
18717         // override this method), then perhaps we'd be willing to
18718         // make a bet...?
18719         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
18720         return;
18721     }
18722
18723     // For interface calls we must have an exact type or final class.
18724     if (isInterface && !isExact && !objClassIsFinal)
18725     {
18726         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
18727         return;
18728     }
18729
18730     JITDUMP("    %s; can devirtualize\n", note);
18731
18732     // Make the updates.
18733     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
18734     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
18735     call->gtCallMethHnd = derivedMethod;
18736     call->gtCallType    = CT_USER_FUNC;
18737
18738     // Virtual calls include an implicit null check, which we may
18739     // now need to make explicit.
18740     if (!objIsNonNull)
18741     {
18742         call->gtFlags |= GTF_CALL_NULLCHECK;
18743     }
18744
18745     // Clear the inline candidate info (may be non-null since
18746     // it's a union field used for other things by virtual
18747     // stubs)
18748     call->gtInlineCandidateInfo = nullptr;
18749
18750     // Fetch the class that introduced the derived method.
18751     //
18752     // Note this may not equal objClass, if there is a
18753     // final method that objClass inherits.
18754     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
18755
18756 #ifdef FEATURE_READYTORUN_COMPILER
18757     if (opts.IsReadyToRun())
18758     {
18759         // For R2R, getCallInfo triggers bookkeeping on the zap
18760         // side so we need to call it here.
18761         //
18762         // First, cons up a suitable resolved token.
18763         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
18764
18765         derivedResolvedToken.tokenScope   = info.compScopeHnd;
18766         derivedResolvedToken.tokenContext = callInfo->contextHandle;
18767         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
18768         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
18769         derivedResolvedToken.hClass       = derivedClass;
18770         derivedResolvedToken.hMethod      = derivedMethod;
18771
18772         // Look up the new call info.
18773         CORINFO_CALL_INFO derivedCallInfo;
18774         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
18775
18776         // Update the call.
18777         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
18778         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
18779         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
18780     }
18781 #endif // FEATURE_READYTORUN_COMPILER
18782
18783     // Need to update call info too. This is fragile
18784     // but hopefully the derived method conforms to
18785     // the base in most other ways.
18786     callInfo->hMethod       = derivedMethod;
18787     callInfo->methodFlags   = derivedMethodAttribs;
18788     callInfo->contextHandle = MAKE_METHODCONTEXT(derivedMethod);
18789
18790     // Update context handle.
18791     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
18792     {
18793         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
18794     }
18795
18796 #if defined(DEBUG)
18797     if (verbose)
18798     {
18799         printf("... after devirt...\n");
18800         gtDispTree(call);
18801     }
18802
18803     if (doPrint)
18804     {
18805         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
18806                baseMethodName, derivedClassName, derivedMethodName, note);
18807     }
18808 #endif // defined(DEBUG)
18809 }
18810
18811 //------------------------------------------------------------------------
18812 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
18813 //
18814 // Arguments:
18815 //    token - init value for the allocated token.
18816 //
18817 // Return Value:
18818 //    pointer to token into jit-allocated memory.
18819 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
18820 {
18821     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
18822     *memory                        = token;
18823     return memory;
18824 }