New jit intrinsic support (#13815)
[platform/upstream/coreclr.git] / src / jit / importer.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
7 XX                                                                           XX
8 XX                           Importer                                        XX
9 XX                                                                           XX
10 XX   Imports the given method and converts it to semantic trees              XX
11 XX                                                                           XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
14 */
15
16 #include "jitpch.h"
17 #ifdef _MSC_VER
18 #pragma hdrstop
19 #endif
20
21 #include "corexcep.h"
22
23 #define Verify(cond, msg)                                                                                              \
24     do                                                                                                                 \
25     {                                                                                                                  \
26         if (!(cond))                                                                                                   \
27         {                                                                                                              \
28             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
29         }                                                                                                              \
30     } while (0)
31
32 #define VerifyOrReturn(cond, msg)                                                                                      \
33     do                                                                                                                 \
34     {                                                                                                                  \
35         if (!(cond))                                                                                                   \
36         {                                                                                                              \
37             verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                       \
38             return;                                                                                                    \
39         }                                                                                                              \
40     } while (0)
41
42 #define VerifyOrReturnSpeculative(cond, msg, speculative)                                                              \
43     do                                                                                                                 \
44     {                                                                                                                  \
45         if (speculative)                                                                                               \
46         {                                                                                                              \
47             if (!(cond))                                                                                               \
48             {                                                                                                          \
49                 return false;                                                                                          \
50             }                                                                                                          \
51         }                                                                                                              \
52         else                                                                                                           \
53         {                                                                                                              \
54             if (!(cond))                                                                                               \
55             {                                                                                                          \
56                 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__));                   \
57                 return false;                                                                                          \
58             }                                                                                                          \
59         }                                                                                                              \
60     } while (0)
61
62 /*****************************************************************************/
63
64 void Compiler::impInit()
65 {
66
67 #ifdef DEBUG
68     impTreeList        = nullptr;
69     impTreeLast        = nullptr;
70     impInlinedCodeSize = 0;
71 #endif
72 }
73
74 /*****************************************************************************
75  *
76  *  Pushes the given tree on the stack.
77  */
78
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
80 {
81     /* Check for overflow. If inlining, we may be using a bigger stack */
82
83     if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84         (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
85     {
86         BADCODE("stack overflow");
87     }
88
89 #ifdef DEBUG
90     // If we are pushing a struct, make certain we know the precise type!
91     if (tree->TypeGet() == TYP_STRUCT)
92     {
93         assert(ti.IsType(TI_STRUCT));
94         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95         assert(clsHnd != NO_CLASS_HANDLE);
96     }
97
98     if (tiVerificationNeeded && !ti.IsDead())
99     {
100         assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
101
102         // The ti type is consistent with the tree type.
103         //
104
105         // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106         // In the verification type system, we always transform "native int" to "TI_INT".
107         // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108         // attempts to do that have proved too difficult.  Instead, we'll assume that in checks like this,
109         // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110         // method used in the last disjunct allows exactly this mismatch.
111         assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112                ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113                ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114                ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115                typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116                                                       NormaliseForStack(typeInfo(tree->TypeGet()))));
117
118         // If it is a struct type, make certain we normalized the primitive types
119         assert(!ti.IsType(TI_STRUCT) ||
120                info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
121     }
122
123 #if VERBOSE_VERIFY
124     if (VERBOSE && tiVerificationNeeded)
125     {
126         printf("\n");
127         printf(TI_DUMP_PADDING);
128         printf("About to push to stack: ");
129         ti.Dump();
130     }
131 #endif // VERBOSE_VERIFY
132
133 #endif // DEBUG
134
135     verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136     verCurrentState.esStack[verCurrentState.esStackDepth++].val      = tree;
137
138     if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
139     {
140         compLongUsed = true;
141     }
142     else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
143     {
144         compFloatingPointUsed = true;
145     }
146 }
147
148 inline void Compiler::impPushNullObjRefOnStack()
149 {
150     impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
151 }
152
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
155
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157                                                           DEBUGARG(unsigned line))
158 {
159     // Remember that the code is not verifiable
160     // Note that the method may yet pass canSkipMethodVerification(),
161     // and so the presence of unverifiable code may not be an issue.
162     tiIsVerifiableCode = FALSE;
163
164 #ifdef DEBUG
165     const char* tail = strrchr(file, '\\');
166     if (tail)
167     {
168         file = tail + 1;
169     }
170
171     if (JitConfig.JitBreakOnUnsafeCode())
172     {
173         assert(!"Unsafe code detected");
174     }
175 #endif
176
177     JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
179
180     if (verNeedsVerification() || compIsForImportOnly())
181     {
182         JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183                 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184         verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
185     }
186 }
187
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189                                                                     DEBUGARG(unsigned line))
190 {
191     JITLOG((LL_ERROR, "Verification failure:  %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192             msg, info.compFullName, impCurOpcName, impCurOpcOffs));
193
194 #ifdef DEBUG
195     //    BreakIfDebuggerPresent();
196     if (getBreakOnBadCode())
197     {
198         assert(!"Typechecking error");
199     }
200 #endif
201
202     RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
203     UNREACHABLE();
204 }
205
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
208 // us lvAddrTaken
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
210 {
211     assert(!compIsForInlining());
212
213     OPCODE opcode;
214
215     opcode = (OPCODE)getU1LittleEndian(codeAddr);
216
217     switch (opcode)
218     {
219         // case CEE_LDFLDA: We're taking this one out as if you have a sequence
220         // like
221         //
222         //          ldloca.0
223         //          ldflda whatever
224         //
225         // of a primitivelike struct, you end up after morphing with addr of a local
226         // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227         // for structs that contain other structs, which isnt a case we handle very
228         // well now for other reasons.
229
230         case CEE_LDFLD:
231         {
232             // We won't collapse small fields. This is probably not the right place to have this
233             // check, but we're only using the function for this purpose, and is easy to factor
234             // out if we need to do so.
235
236             CORINFO_RESOLVED_TOKEN resolvedToken;
237             impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
238
239             CORINFO_CLASS_HANDLE clsHnd;
240             var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
241
242             // Preserve 'small' int types
243             if (lclTyp > TYP_INT)
244             {
245                 lclTyp = genActualType(lclTyp);
246             }
247
248             if (varTypeIsSmall(lclTyp))
249             {
250                 return false;
251             }
252
253             return true;
254         }
255         default:
256             break;
257     }
258
259     return false;
260 }
261
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 {
264     pResolvedToken->tokenContext = impTokenLookupContextHandle;
265     pResolvedToken->tokenScope   = info.compScopeHnd;
266     pResolvedToken->token        = getU4LittleEndian(addr);
267     pResolvedToken->tokenType    = kind;
268
269     if (!tiVerificationNeeded)
270     {
271         info.compCompHnd->resolveToken(pResolvedToken);
272     }
273     else
274     {
275         Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
276     }
277 }
278
279 /*****************************************************************************
280  *
281  *  Pop one tree from the stack.
282  */
283
284 StackEntry Compiler::impPopStack()
285 {
286     if (verCurrentState.esStackDepth == 0)
287     {
288         BADCODE("stack underflow");
289     }
290
291 #ifdef DEBUG
292 #if VERBOSE_VERIFY
293     if (VERBOSE && tiVerificationNeeded)
294     {
295         JITDUMP("\n");
296         printf(TI_DUMP_PADDING);
297         printf("About to pop from the stack: ");
298         const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
299         ti.Dump();
300     }
301 #endif // VERBOSE_VERIFY
302 #endif // DEBUG
303
304     return verCurrentState.esStack[--verCurrentState.esStackDepth];
305 }
306
307 /*****************************************************************************
308  *
309  *  Peep at n'th (0-based) tree on the top of the stack.
310  */
311
312 StackEntry& Compiler::impStackTop(unsigned n)
313 {
314     if (verCurrentState.esStackDepth <= n)
315     {
316         BADCODE("stack underflow");
317     }
318
319     return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
320 }
321
322 unsigned Compiler::impStackHeight()
323 {
324     return verCurrentState.esStackDepth;
325 }
326
327 /*****************************************************************************
328  *  Some of the trees are spilled specially. While unspilling them, or
329  *  making a copy, these need to be handled specially. The function
330  *  enumerates the operators possible after spilling.
331  */
332
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTreePtr tree)
335 {
336     if (tree->gtOper == GT_LCL_VAR)
337     {
338         return true;
339     }
340
341     if (tree->OperIsConst())
342     {
343         return true;
344     }
345
346     return false;
347 }
348 #endif
349
350 /*****************************************************************************
351  *
352  *  The following logic is used to save/restore stack contents.
353  *  If 'copy' is true, then we make a copy of the trees on the stack. These
354  *  have to all be cloneable/spilled values.
355  */
356
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 {
359     savePtr->ssDepth = verCurrentState.esStackDepth;
360
361     if (verCurrentState.esStackDepth)
362     {
363         savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364         size_t saveSize  = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
365
366         if (copy)
367         {
368             StackEntry* table = savePtr->ssTrees;
369
370             /* Make a fresh copy of all the stack entries */
371
372             for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373             {
374                 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375                 GenTreePtr tree   = verCurrentState.esStack[level].val;
376
377                 assert(impValidSpilledStackEntry(tree));
378
379                 switch (tree->gtOper)
380                 {
381                     case GT_CNS_INT:
382                     case GT_CNS_LNG:
383                     case GT_CNS_DBL:
384                     case GT_CNS_STR:
385                     case GT_LCL_VAR:
386                         table->val = gtCloneExpr(tree);
387                         break;
388
389                     default:
390                         assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
391                         break;
392                 }
393             }
394         }
395         else
396         {
397             memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
398         }
399     }
400 }
401
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 {
404     verCurrentState.esStackDepth = savePtr->ssDepth;
405
406     if (verCurrentState.esStackDepth)
407     {
408         memcpy(verCurrentState.esStack, savePtr->ssTrees,
409                verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
410     }
411 }
412
413 /*****************************************************************************
414  *
415  *  Get the tree list started for a new basic block.
416  */
417 inline void Compiler::impBeginTreeList()
418 {
419     assert(impTreeList == nullptr && impTreeLast == nullptr);
420
421     impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
422 }
423
424 /*****************************************************************************
425  *
426  *  Store the given start and end stmt in the given basic block. This is
427  *  mostly called by impEndTreeList(BasicBlock *block). It is called
428  *  directly only for handling CEE_LEAVEs out of finally-protected try's.
429  */
430
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
432 {
433     assert(firstStmt->gtOper == GT_STMT);
434     assert(lastStmt->gtOper == GT_STMT);
435
436     /* Make the list circular, so that we can easily walk it backwards */
437
438     firstStmt->gtPrev = lastStmt;
439
440     /* Store the tree list in the basic block */
441
442     block->bbTreeList = firstStmt;
443
444     /* The block should not already be marked as imported */
445     assert((block->bbFlags & BBF_IMPORTED) == 0);
446
447     block->bbFlags |= BBF_IMPORTED;
448 }
449
450 /*****************************************************************************
451  *
452  *  Store the current tree list in the given basic block.
453  */
454
455 inline void Compiler::impEndTreeList(BasicBlock* block)
456 {
457     assert(impTreeList->gtOper == GT_BEG_STMTS);
458
459     GenTreePtr firstTree = impTreeList->gtNext;
460
461     if (!firstTree)
462     {
463         /* The block should not already be marked as imported */
464         assert((block->bbFlags & BBF_IMPORTED) == 0);
465
466         // Empty block. Just mark it as imported
467         block->bbFlags |= BBF_IMPORTED;
468     }
469     else
470     {
471         // Ignore the GT_BEG_STMTS
472         assert(firstTree->gtPrev == impTreeList);
473
474         impEndTreeList(block, firstTree, impTreeLast);
475     }
476
477 #ifdef DEBUG
478     if (impLastILoffsStmt != nullptr)
479     {
480         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481         impLastILoffsStmt                          = nullptr;
482     }
483
484     impTreeList = impTreeLast = nullptr;
485 #endif
486 }
487
488 /*****************************************************************************
489  *
490  *  Check that storing the given tree doesnt mess up the semantic order. Note
491  *  that this has only limited value as we can only check [0..chkLevel).
492  */
493
494 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
495 {
496 #ifndef DEBUG
497     return;
498 #else
499     assert(stmt->gtOper == GT_STMT);
500
501     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502     {
503         chkLevel = verCurrentState.esStackDepth;
504     }
505
506     if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
507     {
508         return;
509     }
510
511     GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
512
513     // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514
515     if (tree->gtFlags & GTF_CALL)
516     {
517         for (unsigned level = 0; level < chkLevel; level++)
518         {
519             assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
520         }
521     }
522
523     if (tree->gtOper == GT_ASG)
524     {
525         // For an assignment to a local variable, all references of that
526         // variable have to be spilled. If it is aliased, all calls and
527         // indirect accesses have to be spilled
528
529         if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530         {
531             unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532             for (unsigned level = 0; level < chkLevel; level++)
533             {
534                 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535                 assert(!lvaTable[lclNum].lvAddrExposed ||
536                        (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
537             }
538         }
539
540         // If the access may be to global memory, all side effects have to be spilled.
541
542         else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543         {
544             for (unsigned level = 0; level < chkLevel; level++)
545             {
546                 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
547             }
548         }
549     }
550 #endif
551 }
552
553 /*****************************************************************************
554  *
555  *  Append the given GT_STMT node to the current block's tree list.
556  *  [0..chkLevel) is the portion of the stack which we will check for
557  *    interference with stmt and spill if needed.
558  */
559
560 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
561 {
562     assert(stmt->gtOper == GT_STMT);
563     noway_assert(impTreeLast != nullptr);
564
565     /* If the statement being appended has any side-effects, check the stack
566        to see if anything needs to be spilled to preserve correct ordering. */
567
568     GenTreePtr expr  = stmt->gtStmt.gtStmtExpr;
569     unsigned   flags = expr->gtFlags & GTF_GLOB_EFFECT;
570
571     // Assignment to (unaliased) locals don't count as a side-effect as
572     // we handle them specially using impSpillLclRefs(). Temp locals should
573     // be fine too.
574
575     if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576         !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577     {
578         unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579         assert(flags == (op2Flags | GTF_ASG));
580         flags = op2Flags;
581     }
582
583     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584     {
585         chkLevel = verCurrentState.esStackDepth;
586     }
587
588     if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589     {
590         assert(chkLevel <= verCurrentState.esStackDepth);
591
592         if (flags)
593         {
594             // If there is a call, we have to spill global refs
595             bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596
597             if (expr->gtOper == GT_ASG)
598             {
599                 GenTree* lhs = expr->gtGetOp1();
600                 // If we are assigning to a global ref, we have to spill global refs on stack.
601                 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602                 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603                 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604                 if (!expr->OperIsBlkOp())
605                 {
606                     // If we are assigning to a global ref, we have to spill global refs on stack
607                     if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608                     {
609                         spillGlobEffects = true;
610                     }
611                 }
612                 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613                          ((lhs->OperGet() == GT_LCL_VAR) &&
614                           (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615                 {
616                     spillGlobEffects = true;
617                 }
618             }
619
620             impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
621         }
622         else
623         {
624             impSpillSpecialSideEff();
625         }
626     }
627
628     impAppendStmtCheck(stmt, chkLevel);
629
630     /* Point 'prev' at the previous node, so that we can walk backwards */
631
632     stmt->gtPrev = impTreeLast;
633
634     /* Append the expression statement to the list */
635
636     impTreeLast->gtNext = stmt;
637     impTreeLast         = stmt;
638
639 #ifdef FEATURE_SIMD
640     impMarkContiguousSIMDFieldAssignments(stmt);
641 #endif
642
643     /* Once we set impCurStmtOffs in an appended tree, we are ready to
644        report the following offsets. So reset impCurStmtOffs */
645
646     if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647     {
648         impCurStmtOffsSet(BAD_IL_OFFSET);
649     }
650
651 #ifdef DEBUG
652     if (impLastILoffsStmt == nullptr)
653     {
654         impLastILoffsStmt = stmt;
655     }
656
657     if (verbose)
658     {
659         printf("\n\n");
660         gtDispTree(stmt);
661     }
662 #endif
663 }
664
665 /*****************************************************************************
666  *
667  *  Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
668  */
669
670 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
671 {
672     assert(stmt->gtOper == GT_STMT);
673     assert(stmtBefore->gtOper == GT_STMT);
674
675     GenTreePtr stmtPrev = stmtBefore->gtPrev;
676     stmt->gtPrev        = stmtPrev;
677     stmt->gtNext        = stmtBefore;
678     stmtPrev->gtNext    = stmt;
679     stmtBefore->gtPrev  = stmt;
680 }
681
682 /*****************************************************************************
683  *
684  *  Append the given expression tree to the current block's tree list.
685  *  Return the newly created statement.
686  */
687
688 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
689 {
690     assert(tree);
691
692     /* Allocate an 'expression statement' node */
693
694     GenTreePtr expr = gtNewStmt(tree, offset);
695
696     /* Append the statement to the current block's stmt list */
697
698     impAppendStmt(expr, chkLevel);
699
700     return expr;
701 }
702
703 /*****************************************************************************
704  *
705  *  Insert the given exression tree before GT_STMT "stmtBefore"
706  */
707
708 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
709 {
710     assert(stmtBefore->gtOper == GT_STMT);
711
712     /* Allocate an 'expression statement' node */
713
714     GenTreePtr expr = gtNewStmt(tree, offset);
715
716     /* Append the statement to the current block's stmt list */
717
718     impInsertStmtBefore(expr, stmtBefore);
719 }
720
721 /*****************************************************************************
722  *
723  *  Append an assignment of the given value to a temp to the current tree list.
724  *  curLevel is the stack level for which the spill to the temp is being done.
725  */
726
727 void Compiler::impAssignTempGen(unsigned    tmp,
728                                 GenTreePtr  val,
729                                 unsigned    curLevel,
730                                 GenTreePtr* pAfterStmt, /* = NULL */
731                                 IL_OFFSETX  ilOffset,   /* = BAD_IL_OFFSET */
732                                 BasicBlock* block       /* = NULL */
733                                 )
734 {
735     GenTreePtr asg = gtNewTempAssign(tmp, val);
736
737     if (!asg->IsNothingNode())
738     {
739         if (pAfterStmt)
740         {
741             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
742             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
743         }
744         else
745         {
746             impAppendTree(asg, curLevel, impCurStmtOffs);
747         }
748     }
749 }
750
751 /*****************************************************************************
752  * same as above, but handle the valueclass case too
753  */
754
755 void Compiler::impAssignTempGen(unsigned             tmpNum,
756                                 GenTreePtr           val,
757                                 CORINFO_CLASS_HANDLE structType,
758                                 unsigned             curLevel,
759                                 GenTreePtr*          pAfterStmt, /* = NULL */
760                                 IL_OFFSETX           ilOffset,   /* = BAD_IL_OFFSET */
761                                 BasicBlock*          block       /* = NULL */
762                                 )
763 {
764     GenTreePtr asg;
765
766     if (varTypeIsStruct(val))
767     {
768         assert(tmpNum < lvaCount);
769         assert(structType != NO_CLASS_HANDLE);
770
771         // if the method is non-verifiable the assert is not true
772         // so at least ignore it in the case when verification is turned on
773         // since any block that tries to use the temp would have failed verification.
774         var_types varType = lvaTable[tmpNum].lvType;
775         assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776         lvaSetStruct(tmpNum, structType, false);
777
778         // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779         // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780         // that has been passed in for the value being assigned to the temp, in which case we
781         // need to set 'val' to that same type.
782         // Note also that if we always normalized the types of any node that might be a struct
783         // type, this would not be necessary - but that requires additional JIT/EE interface
784         // calls that may not actually be required - e.g. if we only access a field of a struct.
785
786         val->gtType = lvaTable[tmpNum].lvType;
787
788         GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
789         asg            = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
790     }
791     else
792     {
793         asg = gtNewTempAssign(tmpNum, val);
794     }
795
796     if (!asg->IsNothingNode())
797     {
798         if (pAfterStmt)
799         {
800             GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
801             *pAfterStmt        = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
802         }
803         else
804         {
805             impAppendTree(asg, curLevel, impCurStmtOffs);
806         }
807     }
808 }
809
810 /*****************************************************************************
811  *
812  *  Pop the given number of values from the stack and return a list node with
813  *  their values.
814  *  The 'prefixTree' argument may optionally contain an argument
815  *  list that is prepended to the list returned from this function.
816  *
817  *  The notion of prepended is a bit misleading in that the list is backwards
818  *  from the way I would expect: The first element popped is at the end of
819  *  the returned list, and prefixTree is 'before' that, meaning closer to
820  *  the end of the list.  To get to prefixTree, you have to walk to the
821  *  end of the list.
822  *
823  *  For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824  *  such we reverse its meaning such that returnValue has a reversed
825  *  prefixTree at the head of the list.
826  */
827
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 {
830     assert(sig == nullptr || count == sig->numArgs);
831
832     CORINFO_CLASS_HANDLE structType;
833     GenTreeArgList*      treeList;
834
835     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
836     {
837         treeList = nullptr;
838     }
839     else
840     { // ARG_ORDER_L2R
841         treeList = prefixTree;
842     }
843
844     while (count--)
845     {
846         StackEntry se   = impPopStack();
847         typeInfo   ti   = se.seTypeInfo;
848         GenTreePtr temp = se.val;
849
850         if (varTypeIsStruct(temp))
851         {
852             // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853             assert(ti.IsType(TI_STRUCT));
854             structType = ti.GetClassHandleForValueClass();
855             temp       = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
856         }
857
858         /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
859         treeList = gtNewListNode(temp, treeList);
860     }
861
862     if (sig != nullptr)
863     {
864         if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
865             sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
866         {
867             // Make sure that all valuetypes (including enums) that we push are loaded.
868             // This is to guarantee that if a GC is triggerred from the prestub of this methods,
869             // all valuetypes in the method signature are already loaded.
870             // We need to be able to find the size of the valuetypes, but we cannot
871             // do a class-load from within GC.
872             info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
873         }
874
875         CORINFO_ARG_LIST_HANDLE argLst = sig->args;
876         CORINFO_CLASS_HANDLE    argClass;
877         CORINFO_CLASS_HANDLE    argRealClass;
878         GenTreeArgList*         args;
879         unsigned                sigSize;
880
881         for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
882         {
883             PREFIX_ASSUME(args != nullptr);
884
885             CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
886
887             // insert implied casts (from float to double or double to float)
888
889             if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
890             {
891                 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
892             }
893             else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
894             {
895                 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
896             }
897
898             // insert any widening or narrowing casts for backwards compatibility
899
900             args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
901
902             if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
903                 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
904             {
905                 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
906                 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
907                 // primitive types.
908                 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
909                 // details).
910                 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
911                 {
912                     args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
913                 }
914
915                 // Make sure that all valuetypes (including enums) that we push are loaded.
916                 // This is to guarantee that if a GC is triggered from the prestub of this methods,
917                 // all valuetypes in the method signature are already loaded.
918                 // We need to be able to find the size of the valuetypes, but we cannot
919                 // do a class-load from within GC.
920                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
921             }
922
923             argLst = info.compCompHnd->getArgNext(argLst);
924         }
925     }
926
927     if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
928     {
929         // Prepend the prefixTree
930
931         // Simple in-place reversal to place treeList
932         // at the end of a reversed prefixTree
933         while (prefixTree != nullptr)
934         {
935             GenTreeArgList* next = prefixTree->Rest();
936             prefixTree->Rest()   = treeList;
937             treeList             = prefixTree;
938             prefixTree           = next;
939         }
940     }
941     return treeList;
942 }
943
944 /*****************************************************************************
945  *
946  *  Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
947  *  The first "skipReverseCount" items are not reversed.
948  */
949
950 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
951
952 {
953     assert(skipReverseCount <= count);
954
955     GenTreeArgList* list = impPopList(count, sig);
956
957     // reverse the list
958     if (list == nullptr || skipReverseCount == count)
959     {
960         return list;
961     }
962
963     GenTreeArgList* ptr          = nullptr; // Initialized to the first node that needs to be reversed
964     GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
965
966     if (skipReverseCount == 0)
967     {
968         ptr = list;
969     }
970     else
971     {
972         lastSkipNode = list;
973         // Get to the first node that needs to be reversed
974         for (unsigned i = 0; i < skipReverseCount - 1; i++)
975         {
976             lastSkipNode = lastSkipNode->Rest();
977         }
978
979         PREFIX_ASSUME(lastSkipNode != nullptr);
980         ptr = lastSkipNode->Rest();
981     }
982
983     GenTreeArgList* reversedList = nullptr;
984
985     do
986     {
987         GenTreeArgList* tmp = ptr->Rest();
988         ptr->Rest()         = reversedList;
989         reversedList        = ptr;
990         ptr                 = tmp;
991     } while (ptr != nullptr);
992
993     if (skipReverseCount)
994     {
995         lastSkipNode->Rest() = reversedList;
996         return list;
997     }
998     else
999     {
1000         return reversedList;
1001     }
1002 }
1003
1004 /*****************************************************************************
1005    Assign (copy) the structure from 'src' to 'dest'.  The structure is a value
1006    class of type 'clsHnd'.  It returns the tree that should be appended to the
1007    statement list that represents the assignment.
1008    Temp assignments may be appended to impTreeList if spilling is necessary.
1009    curLevel is the stack level for which a spill may be being done.
1010  */
1011
1012 GenTreePtr Compiler::impAssignStruct(GenTreePtr           dest,
1013                                      GenTreePtr           src,
1014                                      CORINFO_CLASS_HANDLE structHnd,
1015                                      unsigned             curLevel,
1016                                      GenTreePtr*          pAfterStmt, /* = NULL */
1017                                      BasicBlock*          block       /* = NULL */
1018                                      )
1019 {
1020     assert(varTypeIsStruct(dest));
1021
1022     while (dest->gtOper == GT_COMMA)
1023     {
1024         assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1025
1026         // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1027         if (pAfterStmt)
1028         {
1029             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1030         }
1031         else
1032         {
1033             impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1034         }
1035
1036         // set dest to the second thing
1037         dest = dest->gtOp.gtOp2;
1038     }
1039
1040     assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1041            dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1042
1043     if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1044         src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1045     {
1046         // Make this a NOP
1047         return gtNewNothingNode();
1048     }
1049
1050     // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1051     // or re-creating a Blk node if it is.
1052     GenTreePtr destAddr;
1053
1054     if (dest->gtOper == GT_IND || dest->OperIsBlk())
1055     {
1056         destAddr = dest->gtOp.gtOp1;
1057     }
1058     else
1059     {
1060         destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1061     }
1062
1063     return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1064 }
1065
1066 /*****************************************************************************/
1067
1068 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr           destAddr,
1069                                         GenTreePtr           src,
1070                                         CORINFO_CLASS_HANDLE structHnd,
1071                                         unsigned             curLevel,
1072                                         GenTreePtr*          pAfterStmt, /* = NULL */
1073                                         BasicBlock*          block       /* = NULL */
1074                                         )
1075 {
1076     var_types  destType;
1077     GenTreePtr dest      = nullptr;
1078     unsigned   destFlags = 0;
1079
1080 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1081     assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1082     // TODO-ARM-BUG: Does ARM need this?
1083     // TODO-ARM64-BUG: Does ARM64 need this?
1084     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1085            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1086            src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1087            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1088 #else  // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1089     assert(varTypeIsStruct(src));
1090
1091     assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1092            src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1093            src->gtOper == GT_COMMA ||
1094            (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1095 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1096     if (destAddr->OperGet() == GT_ADDR)
1097     {
1098         GenTree* destNode = destAddr->gtGetOp1();
1099         // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1100         // will be morphed, don't insert an OBJ(ADDR).
1101         if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1102 #ifndef LEGACY_BACKEND
1103             || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1104 #endif // !LEGACY_BACKEND
1105                 )
1106         {
1107             dest = destNode;
1108         }
1109         destType = destNode->TypeGet();
1110     }
1111     else
1112     {
1113         destType = src->TypeGet();
1114     }
1115
1116     var_types asgType = src->TypeGet();
1117
1118     if (src->gtOper == GT_CALL)
1119     {
1120         if (src->AsCall()->TreatAsHasRetBufArg(this))
1121         {
1122             // Case of call returning a struct via hidden retbuf arg
1123
1124             // insert the return value buffer into the argument list as first byref parameter
1125             src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1126
1127             // now returns void, not a struct
1128             src->gtType = TYP_VOID;
1129
1130             // return the morphed call node
1131             return src;
1132         }
1133         else
1134         {
1135             // Case of call returning a struct in one or more registers.
1136
1137             var_types returnType = (var_types)src->gtCall.gtReturnType;
1138
1139             // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1140             src->gtType = genActualType(returnType);
1141
1142             // First we try to change this to "LclVar/LclFld = call"
1143             //
1144             if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1145             {
1146                 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1147                 // That is, the IR will be of the form lclVar = call for multi-reg return
1148                 //
1149                 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1150                 if (src->AsCall()->HasMultiRegRetVal())
1151                 {
1152                     // Mark the struct LclVar as used in a MultiReg return context
1153                     //  which currently makes it non promotable.
1154                     // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1155                     // handle multireg returns.
1156                     lcl->gtFlags |= GTF_DONT_CSE;
1157                     lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1158                 }
1159                 else // The call result is not a multireg return
1160                 {
1161                     // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1162                     lcl->ChangeOper(GT_LCL_FLD);
1163                     fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1164                 }
1165
1166                 lcl->gtType = src->gtType;
1167                 asgType     = src->gtType;
1168                 dest        = lcl;
1169
1170 #if defined(_TARGET_ARM_)
1171                 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1172                 // but that method has not been updadted to include ARM.
1173                 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1174                 lcl->gtFlags |= GTF_DONT_CSE;
1175 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1176                 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1177                 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1178
1179                 // Make the struct non promotable. The eightbytes could contain multiple fields.
1180                 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1181                 // handle multireg returns.
1182                 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1183                 // non-multireg returns.
1184                 lcl->gtFlags |= GTF_DONT_CSE;
1185                 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1186 #endif
1187             }
1188             else // we don't have a GT_ADDR of a GT_LCL_VAR
1189             {
1190                 // !!! The destination could be on stack. !!!
1191                 // This flag will let us choose the correct write barrier.
1192                 asgType   = returnType;
1193                 destFlags = GTF_IND_TGTANYWHERE;
1194             }
1195         }
1196     }
1197     else if (src->gtOper == GT_RET_EXPR)
1198     {
1199         GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1200         noway_assert(call->gtOper == GT_CALL);
1201
1202         if (call->HasRetBufArg())
1203         {
1204             // insert the return value buffer into the argument list as first byref parameter
1205             call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1206
1207             // now returns void, not a struct
1208             src->gtType  = TYP_VOID;
1209             call->gtType = TYP_VOID;
1210
1211             // We already have appended the write to 'dest' GT_CALL's args
1212             // So now we just return an empty node (pruning the GT_RET_EXPR)
1213             return src;
1214         }
1215         else
1216         {
1217             // Case of inline method returning a struct in one or more registers.
1218             //
1219             var_types returnType = (var_types)call->gtReturnType;
1220
1221             // We won't need a return buffer
1222             asgType      = returnType;
1223             src->gtType  = genActualType(returnType);
1224             call->gtType = src->gtType;
1225
1226             // If we've changed the type, and it no longer matches a local destination,
1227             // we must use an indirection.
1228             if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1229             {
1230                 dest = nullptr;
1231             }
1232
1233             // !!! The destination could be on stack. !!!
1234             // This flag will let us choose the correct write barrier.
1235             destFlags = GTF_IND_TGTANYWHERE;
1236         }
1237     }
1238     else if (src->OperIsBlk())
1239     {
1240         asgType = impNormStructType(structHnd);
1241         if (src->gtOper == GT_OBJ)
1242         {
1243             assert(src->gtObj.gtClass == structHnd);
1244         }
1245     }
1246     else if (src->gtOper == GT_INDEX)
1247     {
1248         asgType = impNormStructType(structHnd);
1249         assert(src->gtIndex.gtStructElemClass == structHnd);
1250     }
1251     else if (src->gtOper == GT_MKREFANY)
1252     {
1253         // Since we are assigning the result of a GT_MKREFANY,
1254         // "destAddr" must point to a refany.
1255
1256         GenTreePtr destAddrClone;
1257         destAddr =
1258             impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1259
1260         assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1261         assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1262         GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1263         GenTreePtr     ptrSlot         = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1264         GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1265         typeFieldOffset->gtFieldSeq    = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1266         GenTreePtr typeSlot =
1267             gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1268
1269         // append the assign of the pointer value
1270         GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1271         if (pAfterStmt)
1272         {
1273             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1274         }
1275         else
1276         {
1277             impAppendTree(asg, curLevel, impCurStmtOffs);
1278         }
1279
1280         // return the assign of the type value, to be appended
1281         return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1282     }
1283     else if (src->gtOper == GT_COMMA)
1284     {
1285         // The second thing is the struct or its address.
1286         assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1287         if (pAfterStmt)
1288         {
1289             *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1290         }
1291         else
1292         {
1293             impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1294         }
1295
1296         // Evaluate the second thing using recursion.
1297         return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1298     }
1299     else if (src->IsLocal())
1300     {
1301         asgType = src->TypeGet();
1302     }
1303     else if (asgType == TYP_STRUCT)
1304     {
1305         asgType     = impNormStructType(structHnd);
1306         src->gtType = asgType;
1307 #ifdef LEGACY_BACKEND
1308         if (asgType == TYP_STRUCT)
1309         {
1310             GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1311             src              = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1312         }
1313 #endif
1314     }
1315     if (dest == nullptr)
1316     {
1317         // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1318         // if this is a known struct type.
1319         if (asgType == TYP_STRUCT)
1320         {
1321             dest = gtNewObjNode(structHnd, destAddr);
1322             gtSetObjGcInfo(dest->AsObj());
1323             // Although an obj as a call argument was always assumed to be a globRef
1324             // (which is itself overly conservative), that is not true of the operands
1325             // of a block assignment.
1326             dest->gtFlags &= ~GTF_GLOB_REF;
1327             dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1328         }
1329         else if (varTypeIsStruct(asgType))
1330         {
1331             dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1332         }
1333         else
1334         {
1335             dest = gtNewOperNode(GT_IND, asgType, destAddr);
1336         }
1337     }
1338     else
1339     {
1340         dest->gtType = asgType;
1341     }
1342
1343     dest->gtFlags |= destFlags;
1344     destFlags = dest->gtFlags;
1345
1346     // return an assignment node, to be appended
1347     GenTree* asgNode = gtNewAssignNode(dest, src);
1348     gtBlockOpInit(asgNode, dest, src, false);
1349
1350     // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1351     // of assignments.
1352     if ((destFlags & GTF_DONT_CSE) == 0)
1353     {
1354         dest->gtFlags &= ~(GTF_DONT_CSE);
1355     }
1356     return asgNode;
1357 }
1358
1359 /*****************************************************************************
1360    Given a struct value, and the class handle for that structure, return
1361    the expression for the address for that structure value.
1362
1363    willDeref - does the caller guarantee to dereference the pointer.
1364 */
1365
1366 GenTreePtr Compiler::impGetStructAddr(GenTreePtr           structVal,
1367                                       CORINFO_CLASS_HANDLE structHnd,
1368                                       unsigned             curLevel,
1369                                       bool                 willDeref)
1370 {
1371     assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1372
1373     var_types type = structVal->TypeGet();
1374
1375     genTreeOps oper = structVal->gtOper;
1376
1377     if (oper == GT_OBJ && willDeref)
1378     {
1379         assert(structVal->gtObj.gtClass == structHnd);
1380         return (structVal->gtObj.Addr());
1381     }
1382     else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1383     {
1384         unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1385
1386         impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1387
1388         // The 'return value' is now the temp itself
1389
1390         type            = genActualType(lvaTable[tmpNum].TypeGet());
1391         GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1392         temp            = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1393         return temp;
1394     }
1395     else if (oper == GT_COMMA)
1396     {
1397         assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1398
1399         GenTreePtr oldTreeLast = impTreeLast;
1400         structVal->gtOp.gtOp2  = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1401         structVal->gtType      = TYP_BYREF;
1402
1403         if (oldTreeLast != impTreeLast)
1404         {
1405             // Some temp assignment statement was placed on the statement list
1406             // for Op2, but that would be out of order with op1, so we need to
1407             // spill op1 onto the statement list after whatever was last
1408             // before we recursed on Op2 (i.e. before whatever Op2 appended).
1409             impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1410             structVal->gtOp.gtOp1 = gtNewNothingNode();
1411         }
1412
1413         return (structVal);
1414     }
1415
1416     return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1417 }
1418
1419 //------------------------------------------------------------------------
1420 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1421 //                    and optionally determine the GC layout of the struct.
1422 //
1423 // Arguments:
1424 //    structHnd       - The class handle for the struct type of interest.
1425 //    gcLayout        - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1426 //                      into which the gcLayout will be written.
1427 //    pNumGCVars      - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1428 //                      which will be set to the number of GC fields in the struct.
1429 //    pSimdBaseType   - (optional, default nullptr) - if non-null, and the struct is a SIMD
1430 //                      type, set to the SIMD base type
1431 //
1432 // Return Value:
1433 //    The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1434 //    The gcLayout will be returned using the pointers provided by the caller, if non-null.
1435 //    It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1436 //
1437 // Assumptions:
1438 //    The caller must set gcLayout to nullptr OR ensure that it is large enough
1439 //    (see ICorStaticInfo::getClassGClayout in corinfo.h).
1440 //
1441 // Notes:
1442 //    Normalizing the type involves examining the struct type to determine if it should
1443 //    be modified to one that is handled specially by the JIT, possibly being a candidate
1444 //    for full enregistration, e.g. TYP_SIMD16.
1445
1446 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1447                                       BYTE*                gcLayout,
1448                                       unsigned*            pNumGCVars,
1449                                       var_types*           pSimdBaseType)
1450 {
1451     assert(structHnd != NO_CLASS_HANDLE);
1452
1453     const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1454     var_types   structType  = TYP_STRUCT;
1455
1456     // On coreclr the check for GC includes a "may" to account for the special
1457     // ByRef like span structs.  The added check for "CONTAINS_STACK_PTR" is the particular bit.
1458     // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1459     // pointer.
1460     const bool mayContainGCPtrs =
1461         ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1462
1463 #ifdef FEATURE_SIMD
1464     // Check to see if this is a SIMD type.
1465     if (featureSIMD && !mayContainGCPtrs)
1466     {
1467         unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1468
1469         if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1470         {
1471             unsigned int sizeBytes;
1472             var_types    simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1473             if (simdBaseType != TYP_UNKNOWN)
1474             {
1475                 assert(sizeBytes == originalSize);
1476                 structType = getSIMDTypeForSize(sizeBytes);
1477                 if (pSimdBaseType != nullptr)
1478                 {
1479                     *pSimdBaseType = simdBaseType;
1480                 }
1481                 // Also indicate that we use floating point registers.
1482                 compFloatingPointUsed = true;
1483             }
1484         }
1485     }
1486 #endif // FEATURE_SIMD
1487
1488     // Fetch GC layout info if requested
1489     if (gcLayout != nullptr)
1490     {
1491         unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1492
1493         // Verify that the quick test up above via the class attributes gave a
1494         // safe view of the type's GCness.
1495         //
1496         // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1497         // does not report any gc fields.
1498
1499         assert(mayContainGCPtrs || (numGCVars == 0));
1500
1501         if (pNumGCVars != nullptr)
1502         {
1503             *pNumGCVars = numGCVars;
1504         }
1505     }
1506     else
1507     {
1508         // Can't safely ask for number of GC pointers without also
1509         // asking for layout.
1510         assert(pNumGCVars == nullptr);
1511     }
1512
1513     return structType;
1514 }
1515
1516 //****************************************************************************
1517 //  Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1518 //  it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1519 //
1520 GenTreePtr Compiler::impNormStructVal(GenTreePtr           structVal,
1521                                       CORINFO_CLASS_HANDLE structHnd,
1522                                       unsigned             curLevel,
1523                                       bool                 forceNormalization /*=false*/)
1524 {
1525     assert(forceNormalization || varTypeIsStruct(structVal));
1526     assert(structHnd != NO_CLASS_HANDLE);
1527     var_types structType = structVal->TypeGet();
1528     bool      makeTemp   = false;
1529     if (structType == TYP_STRUCT)
1530     {
1531         structType = impNormStructType(structHnd);
1532     }
1533     bool                 alreadyNormalized = false;
1534     GenTreeLclVarCommon* structLcl         = nullptr;
1535
1536     genTreeOps oper = structVal->OperGet();
1537     switch (oper)
1538     {
1539         // GT_RETURN and GT_MKREFANY don't capture the handle.
1540         case GT_RETURN:
1541             break;
1542         case GT_MKREFANY:
1543             alreadyNormalized = true;
1544             break;
1545
1546         case GT_CALL:
1547             structVal->gtCall.gtRetClsHnd = structHnd;
1548             makeTemp                      = true;
1549             break;
1550
1551         case GT_RET_EXPR:
1552             structVal->gtRetExpr.gtRetClsHnd = structHnd;
1553             makeTemp                         = true;
1554             break;
1555
1556         case GT_ARGPLACE:
1557             structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1558             break;
1559
1560         case GT_INDEX:
1561             // This will be transformed to an OBJ later.
1562             alreadyNormalized                    = true;
1563             structVal->gtIndex.gtStructElemClass = structHnd;
1564             structVal->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(structHnd);
1565             break;
1566
1567         case GT_FIELD:
1568             // Wrap it in a GT_OBJ.
1569             structVal->gtType = structType;
1570             structVal         = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1571             break;
1572
1573         case GT_LCL_VAR:
1574         case GT_LCL_FLD:
1575             structLcl = structVal->AsLclVarCommon();
1576             // Wrap it in a GT_OBJ.
1577             structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1578             __fallthrough;
1579
1580         case GT_OBJ:
1581         case GT_BLK:
1582         case GT_DYN_BLK:
1583         case GT_ASG:
1584             // These should already have the appropriate type.
1585             assert(structVal->gtType == structType);
1586             alreadyNormalized = true;
1587             break;
1588
1589         case GT_IND:
1590             assert(structVal->gtType == structType);
1591             structVal         = gtNewObjNode(structHnd, structVal->gtGetOp1());
1592             alreadyNormalized = true;
1593             break;
1594
1595 #ifdef FEATURE_SIMD
1596         case GT_SIMD:
1597             assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1598             break;
1599 #endif // FEATURE_SIMD
1600
1601         case GT_COMMA:
1602         {
1603             // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1604             GenTree* blockNode = structVal->gtOp.gtOp2;
1605             assert(blockNode->gtType == structType);
1606
1607             // Is this GT_COMMA(op1, GT_COMMA())?
1608             GenTree* parent = structVal;
1609             if (blockNode->OperGet() == GT_COMMA)
1610             {
1611                 // Find the last node in the comma chain.
1612                 do
1613                 {
1614                     assert(blockNode->gtType == structType);
1615                     parent    = blockNode;
1616                     blockNode = blockNode->gtOp.gtOp2;
1617                 } while (blockNode->OperGet() == GT_COMMA);
1618             }
1619
1620 #ifdef FEATURE_SIMD
1621             if (blockNode->OperGet() == GT_SIMD)
1622             {
1623                 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1624                 alreadyNormalized  = true;
1625             }
1626             else
1627 #endif
1628             {
1629                 assert(blockNode->OperIsBlk());
1630
1631                 // Sink the GT_COMMA below the blockNode addr.
1632                 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1633                 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1634                 //
1635                 // In case of a chained GT_COMMA case, we sink the last
1636                 // GT_COMMA below the blockNode addr.
1637                 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1638                 assert(blockNodeAddr->gtType == TYP_BYREF);
1639                 GenTree* commaNode    = parent;
1640                 commaNode->gtType     = TYP_BYREF;
1641                 commaNode->gtOp.gtOp2 = blockNodeAddr;
1642                 blockNode->gtOp.gtOp1 = commaNode;
1643                 if (parent == structVal)
1644                 {
1645                     structVal = blockNode;
1646                 }
1647                 alreadyNormalized = true;
1648             }
1649         }
1650         break;
1651
1652         default:
1653             assert(!"Unexpected node in impNormStructVal()");
1654             break;
1655     }
1656     structVal->gtType  = structType;
1657     GenTree* structObj = structVal;
1658
1659     if (!alreadyNormalized || forceNormalization)
1660     {
1661         if (makeTemp)
1662         {
1663             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1664
1665             impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1666
1667             // The structVal is now the temp itself
1668
1669             structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1670             // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1671             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1672         }
1673         else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1674         {
1675             // Wrap it in a GT_OBJ
1676             structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1677         }
1678     }
1679
1680     if (structLcl != nullptr)
1681     {
1682         // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1683         // so we don't set GTF_EXCEPT here.
1684         if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1685         {
1686             structObj->gtFlags &= ~GTF_GLOB_REF;
1687         }
1688     }
1689     else
1690     {
1691         // In general a OBJ is an indirection and could raise an exception.
1692         structObj->gtFlags |= GTF_EXCEPT;
1693     }
1694     return (structObj);
1695 }
1696
1697 /******************************************************************************/
1698 // Given a type token, generate code that will evaluate to the correct
1699 // handle representation of that token (type handle, field handle, or method handle)
1700 //
1701 // For most cases, the handle is determined at compile-time, and the code
1702 // generated is simply an embedded handle.
1703 //
1704 // Run-time lookup is required if the enclosing method is shared between instantiations
1705 // and the token refers to formal type parameters whose instantiation is not known
1706 // at compile-time.
1707 //
1708 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1709                                       BOOL*                   pRuntimeLookup /* = NULL */,
1710                                       BOOL                    mustRestoreHandle /* = FALSE */,
1711                                       BOOL                    importParent /* = FALSE */)
1712 {
1713     assert(!fgGlobalMorph);
1714
1715     CORINFO_GENERICHANDLE_RESULT embedInfo;
1716     info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1717
1718     if (pRuntimeLookup)
1719     {
1720         *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1721     }
1722
1723     if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1724     {
1725         switch (embedInfo.handleType)
1726         {
1727             case CORINFO_HANDLETYPE_CLASS:
1728                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1729                 break;
1730
1731             case CORINFO_HANDLETYPE_METHOD:
1732                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1733                 break;
1734
1735             case CORINFO_HANDLETYPE_FIELD:
1736                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1737                     info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1738                 break;
1739
1740             default:
1741                 break;
1742         }
1743     }
1744
1745     return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1746                            embedInfo.compileTimeHandle);
1747 }
1748
1749 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1750                                      CORINFO_LOOKUP*         pLookup,
1751                                      unsigned                handleFlags,
1752                                      void*                   compileTimeHandle)
1753 {
1754     if (!pLookup->lookupKind.needsRuntimeLookup)
1755     {
1756         // No runtime lookup is required.
1757         // Access is direct or memory-indirect (of a fixed address) reference
1758
1759         CORINFO_GENERIC_HANDLE handle       = nullptr;
1760         void*                  pIndirection = nullptr;
1761         assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1762
1763         if (pLookup->constLookup.accessType == IAT_VALUE)
1764         {
1765             handle = pLookup->constLookup.handle;
1766         }
1767         else if (pLookup->constLookup.accessType == IAT_PVALUE)
1768         {
1769             pIndirection = pLookup->constLookup.addr;
1770         }
1771         return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1772     }
1773     else if (compIsForInlining())
1774     {
1775         // Don't import runtime lookups when inlining
1776         // Inlining has to be aborted in such a case
1777         compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1778         return nullptr;
1779     }
1780     else
1781     {
1782         // Need to use dictionary-based access which depends on the typeContext
1783         // which is only available at runtime, not at compile-time.
1784
1785         return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1786     }
1787 }
1788
1789 #ifdef FEATURE_READYTORUN_COMPILER
1790 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1791                                                unsigned              handleFlags,
1792                                                void*                 compileTimeHandle)
1793 {
1794     CORINFO_GENERIC_HANDLE handle       = nullptr;
1795     void*                  pIndirection = nullptr;
1796     assert(pLookup->accessType != IAT_PPVALUE);
1797
1798     if (pLookup->accessType == IAT_VALUE)
1799     {
1800         handle = pLookup->handle;
1801     }
1802     else if (pLookup->accessType == IAT_PVALUE)
1803     {
1804         pIndirection = pLookup->addr;
1805     }
1806     return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1807 }
1808
1809 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1810     CORINFO_RESOLVED_TOKEN* pResolvedToken,
1811     CorInfoHelpFunc         helper,
1812     var_types               type,
1813     GenTreeArgList*         args /* =NULL*/,
1814     CORINFO_LOOKUP_KIND*    pGenericLookupKind /* =NULL. Only used with generics */)
1815 {
1816     CORINFO_CONST_LOOKUP lookup;
1817     if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1818     {
1819         return nullptr;
1820     }
1821
1822     GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1823
1824     op1->setEntryPoint(lookup);
1825
1826     return op1;
1827 }
1828 #endif
1829
1830 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1831 {
1832     GenTreePtr op1 = nullptr;
1833
1834     switch (pCallInfo->kind)
1835     {
1836         case CORINFO_CALL:
1837             op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1838
1839 #ifdef FEATURE_READYTORUN_COMPILER
1840             if (opts.IsReadyToRun())
1841             {
1842                 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1843             }
1844             else
1845             {
1846                 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1847             }
1848 #endif
1849             break;
1850
1851         case CORINFO_CALL_CODE_POINTER:
1852             if (compIsForInlining())
1853             {
1854                 // Don't import runtime lookups when inlining
1855                 // Inlining has to be aborted in such a case
1856                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1857                 return nullptr;
1858             }
1859
1860             op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1861             break;
1862
1863         default:
1864             noway_assert(!"unknown call kind");
1865             break;
1866     }
1867
1868     return op1;
1869 }
1870
1871 //------------------------------------------------------------------------
1872 // getRuntimeContextTree: find pointer to context for runtime lookup.
1873 //
1874 // Arguments:
1875 //    kind - lookup kind.
1876 //
1877 // Return Value:
1878 //    Return GenTree pointer to generic shared context.
1879 //
1880 // Notes:
1881 //    Reports about generic context using.
1882
1883 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1884 {
1885     GenTreePtr ctxTree = nullptr;
1886
1887     // Collectible types requires that for shared generic code, if we use the generic context parameter
1888     // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1889     // context parameter is this that we don't need the eager reporting logic.)
1890     lvaGenericsContextUseCount++;
1891
1892     if (kind == CORINFO_LOOKUP_THISOBJ)
1893     {
1894         // this Object
1895         ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1896
1897         // Vtable pointer of this object
1898         ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1899         ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1900         ctxTree->gtFlags |= GTF_IND_INVARIANT;
1901     }
1902     else
1903     {
1904         assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1905
1906         ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1907     }
1908     return ctxTree;
1909 }
1910
1911 /*****************************************************************************/
1912 /* Import a dictionary lookup to access a handle in code shared between
1913    generic instantiations.
1914    The lookup depends on the typeContext which is only available at
1915    runtime, and not at compile-time.
1916    pLookup->token1 and pLookup->token2 specify the handle that is needed.
1917    The cases are:
1918
1919    1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1920       instantiation-specific handle, and the tokens to lookup the handle.
1921    2. pLookup->indirections != CORINFO_USEHELPER :
1922       2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1923           to get the handle.
1924       2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1925           If it is non-NULL, it is the handle required. Else, call a helper
1926           to lookup the handle.
1927  */
1928
1929 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1930                                             CORINFO_LOOKUP*         pLookup,
1931                                             void*                   compileTimeHandle)
1932 {
1933
1934     // This method can only be called from the importer instance of the Compiler.
1935     // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1936     assert(!compIsForInlining());
1937
1938     GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1939
1940     CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1941     // It's available only via the run-time helper function
1942     if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1943     {
1944 #ifdef FEATURE_READYTORUN_COMPILER
1945         if (opts.IsReadyToRun())
1946         {
1947             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1948                                              gtNewArgList(ctxTree), &pLookup->lookupKind);
1949         }
1950 #endif
1951
1952         GenTreeArgList* helperArgs =
1953             gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1954                                                       nullptr, compileTimeHandle));
1955
1956         return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1957     }
1958
1959     // Slot pointer
1960     GenTreePtr slotPtrTree = ctxTree;
1961
1962     if (pRuntimeLookup->testForNull)
1963     {
1964         slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1965                                    nullptr DEBUGARG("impRuntimeLookup slot"));
1966     }
1967
1968     GenTreePtr indOffTree = nullptr;
1969
1970     // Applied repeated indirections
1971     for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1972     {
1973         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
1974         {
1975             indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1976                                       nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
1977         }
1978
1979         if (i != 0)
1980         {
1981             slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1982             slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1983             slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1984         }
1985
1986         if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
1987         {
1988             slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
1989         }
1990
1991         if (pRuntimeLookup->offsets[i] != 0)
1992         {
1993             slotPtrTree =
1994                 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1995         }
1996     }
1997
1998     // No null test required
1999     if (!pRuntimeLookup->testForNull)
2000     {
2001         if (pRuntimeLookup->indirections == 0)
2002         {
2003             return slotPtrTree;
2004         }
2005
2006         slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2007         slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2008
2009         if (!pRuntimeLookup->testForFixup)
2010         {
2011             return slotPtrTree;
2012         }
2013
2014         impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2015
2016         unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2017         impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2018
2019         GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2020         // downcast the pointer to a TYP_INT on 64-bit targets
2021         slot = impImplicitIorI4Cast(slot, TYP_INT);
2022         // Use a GT_AND to check for the lowest bit and indirect if it is set
2023         GenTree* test  = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2024         GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2025         relop->gtFlags |= GTF_RELOP_QMARK;
2026
2027         // slot = GT_IND(slot - 1)
2028         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2029         GenTree* add   = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2030         GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2031         slot           = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2032         GenTree* asg   = gtNewAssignNode(slot, indir);
2033
2034         GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2035         GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2036         impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2037
2038         return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2039     }
2040
2041     assert(pRuntimeLookup->indirections != 0);
2042
2043     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2044
2045     // Extract the handle
2046     GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2047     handle->gtFlags |= GTF_IND_NONFAULTING;
2048
2049     GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2050                                          nullptr DEBUGARG("impRuntimeLookup typehandle"));
2051
2052     // Call to helper
2053     GenTreeArgList* helperArgs =
2054         gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2055                                                   compileTimeHandle));
2056     GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2057
2058     // Check for null and possibly call helper
2059     GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2060     relop->gtFlags |= GTF_RELOP_QMARK;
2061
2062     GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2063                                                          gtNewNothingNode(), // do nothing if nonnull
2064                                                          helperCall);
2065
2066     GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2067
2068     unsigned tmp;
2069     if (handleCopy->IsLocal())
2070     {
2071         tmp = handleCopy->gtLclVarCommon.gtLclNum;
2072     }
2073     else
2074     {
2075         tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2076     }
2077
2078     impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2079     return gtNewLclvNode(tmp, TYP_I_IMPL);
2080 }
2081
2082 /******************************************************************************
2083  *  Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2084  *  If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2085  *     else, grab a new temp.
2086  *  For structs (which can be pushed on the stack using obj, etc),
2087  *  special handling is needed
2088  */
2089
2090 struct RecursiveGuard
2091 {
2092 public:
2093     RecursiveGuard()
2094     {
2095         m_pAddress = nullptr;
2096     }
2097
2098     ~RecursiveGuard()
2099     {
2100         if (m_pAddress)
2101         {
2102             *m_pAddress = false;
2103         }
2104     }
2105
2106     void Init(bool* pAddress, bool bInitialize)
2107     {
2108         assert(pAddress && *pAddress == false && "Recursive guard violation");
2109         m_pAddress = pAddress;
2110
2111         if (bInitialize)
2112         {
2113             *m_pAddress = true;
2114         }
2115     }
2116
2117 protected:
2118     bool* m_pAddress;
2119 };
2120
2121 bool Compiler::impSpillStackEntry(unsigned level,
2122                                   unsigned tnum
2123 #ifdef DEBUG
2124                                   ,
2125                                   bool        bAssertOnRecursion,
2126                                   const char* reason
2127 #endif
2128                                   )
2129 {
2130
2131 #ifdef DEBUG
2132     RecursiveGuard guard;
2133     guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2134 #endif
2135
2136     GenTreePtr tree = verCurrentState.esStack[level].val;
2137
2138     /* Allocate a temp if we haven't been asked to use a particular one */
2139
2140     if (tiVerificationNeeded)
2141     {
2142         // Ignore bad temp requests (they will happen with bad code and will be
2143         // catched when importing the destblock)
2144         if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2145         {
2146             return false;
2147         }
2148     }
2149     else
2150     {
2151         if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2152         {
2153             return false;
2154         }
2155     }
2156
2157     bool isNewTemp = false;
2158
2159     if (tnum == BAD_VAR_NUM)
2160     {
2161         tnum      = lvaGrabTemp(true DEBUGARG(reason));
2162         isNewTemp = true;
2163     }
2164     else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2165     {
2166         // if verification is needed and tnum's type is incompatible with
2167         // type on that stack, we grab a new temp. This is safe since
2168         // we will throw a verification exception in the dest block.
2169
2170         var_types valTyp = tree->TypeGet();
2171         var_types dstTyp = lvaTable[tnum].TypeGet();
2172
2173         // if the two types are different, we return. This will only happen with bad code and will
2174         // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2175         if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2176             !(
2177 #ifndef _TARGET_64BIT_
2178                 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2179 #endif // !_TARGET_64BIT_
2180                 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2181         {
2182             if (verNeedsVerification())
2183             {
2184                 return false;
2185             }
2186         }
2187     }
2188
2189     /* Assign the spilled entry to the temp */
2190     impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2191
2192     // If temp is newly introduced and a ref type, grab what type info we can.
2193     if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2194     {
2195         CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2196         lvaSetClass(tnum, tree, stkHnd);
2197     }
2198
2199     // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2200     var_types  type                    = genActualType(lvaTable[tnum].TypeGet());
2201     GenTreePtr temp                    = gtNewLclvNode(tnum, type);
2202     verCurrentState.esStack[level].val = temp;
2203
2204     return true;
2205 }
2206
2207 /*****************************************************************************
2208  *
2209  *  Ensure that the stack has only spilled values
2210  */
2211
2212 void Compiler::impSpillStackEnsure(bool spillLeaves)
2213 {
2214     assert(!spillLeaves || opts.compDbgCode);
2215
2216     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2217     {
2218         GenTreePtr tree = verCurrentState.esStack[level].val;
2219
2220         if (!spillLeaves && tree->OperIsLeaf())
2221         {
2222             continue;
2223         }
2224
2225         // Temps introduced by the importer itself don't need to be spilled
2226
2227         bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2228
2229         if (isTempLcl)
2230         {
2231             continue;
2232         }
2233
2234         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2235     }
2236 }
2237
2238 void Compiler::impSpillEvalStack()
2239 {
2240     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2241     {
2242         impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2243     }
2244 }
2245
2246 /*****************************************************************************
2247  *
2248  *  If the stack contains any trees with side effects in them, assign those
2249  *  trees to temps and append the assignments to the statement list.
2250  *  On return the stack is guaranteed to be empty.
2251  */
2252
2253 inline void Compiler::impEvalSideEffects()
2254 {
2255     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2256     verCurrentState.esStackDepth = 0;
2257 }
2258
2259 /*****************************************************************************
2260  *
2261  *  If the stack contains any trees with side effects in them, assign those
2262  *  trees to temps and replace them on the stack with refs to their temps.
2263  *  [0..chkLevel) is the portion of the stack which will be checked and spilled.
2264  */
2265
2266 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2267 {
2268     assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2269
2270     /* Before we make any appends to the tree list we must spill the
2271      * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2272
2273     impSpillSpecialSideEff();
2274
2275     if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2276     {
2277         chkLevel = verCurrentState.esStackDepth;
2278     }
2279
2280     assert(chkLevel <= verCurrentState.esStackDepth);
2281
2282     unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2283
2284     for (unsigned i = 0; i < chkLevel; i++)
2285     {
2286         GenTreePtr tree = verCurrentState.esStack[i].val;
2287
2288         GenTreePtr lclVarTree;
2289
2290         if ((tree->gtFlags & spillFlags) != 0 ||
2291             (spillGlobEffects &&                        // Only consider the following when  spillGlobEffects == TRUE
2292              !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2293              gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2294                                            // lvAddrTaken flag.
2295         {
2296             impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2297         }
2298     }
2299 }
2300
2301 /*****************************************************************************
2302  *
2303  *  If the stack contains any trees with special side effects in them, assign
2304  *  those trees to temps and replace them on the stack with refs to their temps.
2305  */
2306
2307 inline void Compiler::impSpillSpecialSideEff()
2308 {
2309     // Only exception objects need to be carefully handled
2310
2311     if (!compCurBB->bbCatchTyp)
2312     {
2313         return;
2314     }
2315
2316     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2317     {
2318         GenTreePtr tree = verCurrentState.esStack[level].val;
2319         // Make sure if we have an exception object in the sub tree we spill ourselves.
2320         if (gtHasCatchArg(tree))
2321         {
2322             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2323         }
2324     }
2325 }
2326
2327 /*****************************************************************************
2328  *
2329  *  Spill all stack references to value classes (TYP_STRUCT nodes)
2330  */
2331
2332 void Compiler::impSpillValueClasses()
2333 {
2334     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2335     {
2336         GenTreePtr tree = verCurrentState.esStack[level].val;
2337
2338         if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2339         {
2340             // Tree walk was aborted, which means that we found a
2341             // value class on the stack.  Need to spill that
2342             // stack entry.
2343
2344             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2345         }
2346     }
2347 }
2348
2349 /*****************************************************************************
2350  *
2351  *  Callback that checks if a tree node is TYP_STRUCT
2352  */
2353
2354 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2355 {
2356     fgWalkResult walkResult = WALK_CONTINUE;
2357
2358     if ((*pTree)->gtType == TYP_STRUCT)
2359     {
2360         // Abort the walk and indicate that we found a value class
2361
2362         walkResult = WALK_ABORT;
2363     }
2364
2365     return walkResult;
2366 }
2367
2368 /*****************************************************************************
2369  *
2370  *  If the stack contains any trees with references to local #lclNum, assign
2371  *  those trees to temps and replace their place on the stack with refs to
2372  *  their temps.
2373  */
2374
2375 void Compiler::impSpillLclRefs(ssize_t lclNum)
2376 {
2377     /* Before we make any appends to the tree list we must spill the
2378      * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2379
2380     impSpillSpecialSideEff();
2381
2382     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2383     {
2384         GenTreePtr tree = verCurrentState.esStack[level].val;
2385
2386         /* If the tree may throw an exception, and the block has a handler,
2387            then we need to spill assignments to the local if the local is
2388            live on entry to the handler.
2389            Just spill 'em all without considering the liveness */
2390
2391         bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2392
2393         /* Skip the tree if it doesn't have an affected reference,
2394            unless xcptnCaught */
2395
2396         if (xcptnCaught || gtHasRef(tree, lclNum, false))
2397         {
2398             impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2399         }
2400     }
2401 }
2402
2403 /*****************************************************************************
2404  *
2405  *  Push catch arg onto the stack.
2406  *  If there are jumps to the beginning of the handler, insert basic block
2407  *  and spill catch arg to a temp. Update the handler block if necessary.
2408  *
2409  *  Returns the basic block of the actual handler.
2410  */
2411
2412 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2413 {
2414     // Do not inject the basic block twice on reimport. This should be
2415     // hit only under JIT stress. See if the block is the one we injected.
2416     // Note that EH canonicalization can inject internal blocks here. We might
2417     // be able to re-use such a block (but we don't, right now).
2418     if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2419         (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2420     {
2421         GenTreePtr tree = hndBlk->bbTreeList;
2422
2423         if (tree != nullptr && tree->gtOper == GT_STMT)
2424         {
2425             tree = tree->gtStmt.gtStmtExpr;
2426             assert(tree != nullptr);
2427
2428             if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2429                 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2430             {
2431                 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2432
2433                 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2434
2435                 return hndBlk->bbNext;
2436             }
2437         }
2438
2439         // If we get here, it must have been some other kind of internal block. It's possible that
2440         // someone prepended something to our injected block, but that's unlikely.
2441     }
2442
2443     /* Push the exception address value on the stack */
2444     GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2445
2446     /* Mark the node as having a side-effect - i.e. cannot be
2447      * moved around since it is tied to a fixed location (EAX) */
2448     arg->gtFlags |= GTF_ORDER_SIDEEFF;
2449
2450 #if defined(JIT32_GCENCODER)
2451     const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2452 #else
2453     const bool forceInsertNewBlock                                     = compStressCompile(STRESS_CATCH_ARG, 5);
2454 #endif // defined(JIT32_GCENCODER)
2455
2456     /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2457     if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2458     {
2459         if (hndBlk->bbRefs == 1)
2460         {
2461             hndBlk->bbRefs++;
2462         }
2463
2464         /* Create extra basic block for the spill */
2465         BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2466         newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2467         newBlk->setBBWeight(hndBlk->bbWeight);
2468         newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2469
2470         /* Account for the new link we are about to create */
2471         hndBlk->bbRefs++;
2472
2473         /* Spill into a temp */
2474         unsigned tempNum         = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2475         lvaTable[tempNum].lvType = TYP_REF;
2476         arg                      = gtNewTempAssign(tempNum, arg);
2477
2478         hndBlk->bbStkTempsIn = tempNum;
2479
2480         /* Report the debug info. impImportBlockCode won't treat
2481          * the actual handler as exception block and thus won't do it for us. */
2482         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2483         {
2484             impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2485             arg            = gtNewStmt(arg, impCurStmtOffs);
2486         }
2487
2488         fgInsertStmtAtEnd(newBlk, arg);
2489
2490         arg = gtNewLclvNode(tempNum, TYP_REF);
2491     }
2492
2493     impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2494
2495     return hndBlk;
2496 }
2497
2498 /*****************************************************************************
2499  *
2500  *  Given a tree, clone it. *pClone is set to the cloned tree.
2501  *  Returns the original tree if the cloning was easy,
2502  *   else returns the temp to which the tree had to be spilled to.
2503  *  If the tree has side-effects, it will be spilled to a temp.
2504  */
2505
2506 GenTreePtr Compiler::impCloneExpr(GenTreePtr           tree,
2507                                   GenTreePtr*          pClone,
2508                                   CORINFO_CLASS_HANDLE structHnd,
2509                                   unsigned             curLevel,
2510                                   GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2511 {
2512     if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2513     {
2514         GenTreePtr clone = gtClone(tree, true);
2515
2516         if (clone)
2517         {
2518             *pClone = clone;
2519             return tree;
2520         }
2521     }
2522
2523     /* Store the operand in a temp and return the temp */
2524
2525     unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2526
2527     // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2528     // return a struct type. It also may modify the struct type to a more
2529     // specialized type (e.g. a SIMD type).  So we will get the type from
2530     // the lclVar AFTER calling impAssignTempGen().
2531
2532     impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2533     var_types type = genActualType(lvaTable[temp].TypeGet());
2534
2535     *pClone = gtNewLclvNode(temp, type);
2536     return gtNewLclvNode(temp, type);
2537 }
2538
2539 /*****************************************************************************
2540  * Remember the IL offset (including stack-empty info) for the trees we will
2541  * generate now.
2542  */
2543
2544 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2545 {
2546     if (compIsForInlining())
2547     {
2548         GenTreePtr callStmt = impInlineInfo->iciStmt;
2549         assert(callStmt->gtOper == GT_STMT);
2550         impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2551     }
2552     else
2553     {
2554         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2555         IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2556         impCurStmtOffs    = offs | stkBit;
2557     }
2558 }
2559
2560 /*****************************************************************************
2561  * Returns current IL offset with stack-empty and call-instruction info incorporated
2562  */
2563 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2564 {
2565     if (compIsForInlining())
2566     {
2567         return BAD_IL_OFFSET;
2568     }
2569     else
2570     {
2571         assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2572         IL_OFFSETX stkBit             = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2573         IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2574         return offs | stkBit | callInstructionBit;
2575     }
2576 }
2577
2578 //------------------------------------------------------------------------
2579 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2580 //
2581 // Arguments:
2582 //    prevOpcode - last importer opcode
2583 //
2584 // Return Value:
2585 //    true if it is legal, false if it could be a sequence that we do not want to divide.
2586 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2587 {
2588     // Don't spill after ldtoken, because it could be a part of the InitializeArray sequence.
2589     // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2590     return prevOpcode != CEE_LDTOKEN;
2591 }
2592
2593 /*****************************************************************************
2594  *
2595  *  Remember the instr offset for the statements
2596  *
2597  *  When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2598  *  impCurOpcOffs, if the append was done because of a partial stack spill,
2599  *  as some of the trees corresponding to code up to impCurOpcOffs might
2600  *  still be sitting on the stack.
2601  *  So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2602  *  This should be called when an opcode finally/explicitly causes
2603  *  impAppendTree(tree) to be called (as opposed to being called because of
2604  *  a spill caused by the opcode)
2605  */
2606
2607 #ifdef DEBUG
2608
2609 void Compiler::impNoteLastILoffs()
2610 {
2611     if (impLastILoffsStmt == nullptr)
2612     {
2613         // We should have added a statement for the current basic block
2614         // Is this assert correct ?
2615
2616         assert(impTreeLast);
2617         assert(impTreeLast->gtOper == GT_STMT);
2618
2619         impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620     }
2621     else
2622     {
2623         impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2624         impLastILoffsStmt                          = nullptr;
2625     }
2626 }
2627
2628 #endif // DEBUG
2629
2630 /*****************************************************************************
2631  * We don't create any GenTree (excluding spills) for a branch.
2632  * For debugging info, we need a placeholder so that we can note
2633  * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2634  */
2635
2636 void Compiler::impNoteBranchOffs()
2637 {
2638     if (opts.compDbgCode)
2639     {
2640         impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2641     }
2642 }
2643
2644 /*****************************************************************************
2645  * Locate the next stmt boundary for which we need to record info.
2646  * We will have to spill the stack at such boundaries if it is not
2647  * already empty.
2648  * Returns the next stmt boundary (after the start of the block)
2649  */
2650
2651 unsigned Compiler::impInitBlockLineInfo()
2652 {
2653     /* Assume the block does not correspond with any IL offset. This prevents
2654        us from reporting extra offsets. Extra mappings can cause confusing
2655        stepping, especially if the extra mapping is a jump-target, and the
2656        debugger does not ignore extra mappings, but instead rewinds to the
2657        nearest known offset */
2658
2659     impCurStmtOffsSet(BAD_IL_OFFSET);
2660
2661     if (compIsForInlining())
2662     {
2663         return ~0;
2664     }
2665
2666     IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2667
2668     if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2669     {
2670         impCurStmtOffsSet(blockOffs);
2671     }
2672
2673     if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2674     {
2675         impCurStmtOffsSet(blockOffs);
2676     }
2677
2678     /* Always report IL offset 0 or some tests get confused.
2679        Probably a good idea anyways */
2680
2681     if (blockOffs == 0)
2682     {
2683         impCurStmtOffsSet(blockOffs);
2684     }
2685
2686     if (!info.compStmtOffsetsCount)
2687     {
2688         return ~0;
2689     }
2690
2691     /* Find the lowest explicit stmt boundary within the block */
2692
2693     /* Start looking at an entry that is based on our instr offset */
2694
2695     unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2696
2697     if (index >= info.compStmtOffsetsCount)
2698     {
2699         index = info.compStmtOffsetsCount - 1;
2700     }
2701
2702     /* If we've guessed too far, back up */
2703
2704     while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2705     {
2706         index--;
2707     }
2708
2709     /* If we guessed short, advance ahead */
2710
2711     while (info.compStmtOffsets[index] < blockOffs)
2712     {
2713         index++;
2714
2715         if (index == info.compStmtOffsetsCount)
2716         {
2717             return info.compStmtOffsetsCount;
2718         }
2719     }
2720
2721     assert(index < info.compStmtOffsetsCount);
2722
2723     if (info.compStmtOffsets[index] == blockOffs)
2724     {
2725         /* There is an explicit boundary for the start of this basic block.
2726            So we will start with bbCodeOffs. Else we will wait until we
2727            get to the next explicit boundary */
2728
2729         impCurStmtOffsSet(blockOffs);
2730
2731         index++;
2732     }
2733
2734     return index;
2735 }
2736
2737 /*****************************************************************************/
2738
2739 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2740 {
2741     switch (opcode)
2742     {
2743         case CEE_CALL:
2744         case CEE_CALLI:
2745         case CEE_CALLVIRT:
2746             return true;
2747
2748         default:
2749             return false;
2750     }
2751 }
2752
2753 /*****************************************************************************/
2754
2755 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2756 {
2757     switch (opcode)
2758     {
2759         case CEE_CALL:
2760         case CEE_CALLI:
2761         case CEE_CALLVIRT:
2762         case CEE_JMP:
2763         case CEE_NEWOBJ:
2764         case CEE_NEWARR:
2765             return true;
2766
2767         default:
2768             return false;
2769     }
2770 }
2771
2772 /*****************************************************************************/
2773
2774 // One might think it is worth caching these values, but results indicate
2775 // that it isn't.
2776 // In addition, caching them causes SuperPMI to be unable to completely
2777 // encapsulate an individual method context.
2778 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2779 {
2780     CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2781     assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2782     return refAnyClass;
2783 }
2784
2785 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2786 {
2787     CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2788     assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2789     return typeHandleClass;
2790 }
2791
2792 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2793 {
2794     CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2795     assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2796     return argIteratorClass;
2797 }
2798
2799 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2800 {
2801     CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2802     assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2803     return stringClass;
2804 }
2805
2806 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2807 {
2808     CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2809     assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2810     return objectClass;
2811 }
2812
2813 /*****************************************************************************
2814  *  "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2815  *  set its type to TYP_BYREF when we create it. We know if it can be
2816  *  changed to TYP_I_IMPL only at the point where we use it
2817  */
2818
2819 /* static */
2820 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2821 {
2822     if (tree1->IsVarAddr())
2823     {
2824         tree1->gtType = TYP_I_IMPL;
2825     }
2826
2827     if (tree2 && tree2->IsVarAddr())
2828     {
2829         tree2->gtType = TYP_I_IMPL;
2830     }
2831 }
2832
2833 /*****************************************************************************
2834  *  TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2835  *  to make that an explicit cast in our trees, so any implicit casts that
2836  *  exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2837  *  turned into explicit casts here.
2838  *  We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2839  */
2840
2841 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2842 {
2843     var_types currType   = genActualType(tree->gtType);
2844     var_types wantedType = genActualType(dstTyp);
2845
2846     if (wantedType != currType)
2847     {
2848         // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2849         if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2850         {
2851             if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2852             {
2853                 tree->gtType = TYP_I_IMPL;
2854             }
2855         }
2856 #ifdef _TARGET_64BIT_
2857         else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2858         {
2859             // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2860             tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2861         }
2862         else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2863         {
2864             // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2865             tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2866         }
2867 #endif // _TARGET_64BIT_
2868     }
2869
2870     return tree;
2871 }
2872
2873 /*****************************************************************************
2874  *  TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2875  *  but we want to make that an explicit cast in our trees, so any implicit casts
2876  *  that exist in the IL are turned into explicit casts here.
2877  */
2878
2879 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2880 {
2881 #ifndef LEGACY_BACKEND
2882     if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2883     {
2884         tree = gtNewCastNode(dstTyp, tree, dstTyp);
2885     }
2886 #endif // !LEGACY_BACKEND
2887
2888     return tree;
2889 }
2890
2891 //------------------------------------------------------------------------
2892 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2893 //    with a GT_COPYBLK node.
2894 //
2895 // Arguments:
2896 //    sig - The InitializeArray signature.
2897 //
2898 // Return Value:
2899 //    A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2900 //    nullptr otherwise.
2901 //
2902 // Notes:
2903 //    The function recognizes the following IL pattern:
2904 //      ldc <length> or a list of ldc <lower bound>/<length>
2905 //      newarr or newobj
2906 //      dup
2907 //      ldtoken <field handle>
2908 //      call InitializeArray
2909 //    The lower bounds need not be constant except when the array rank is 1.
2910 //    The function recognizes all kinds of arrays thus enabling a small runtime
2911 //    such as CoreRT to skip providing an implementation for InitializeArray.
2912
2913 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2914 {
2915     assert(sig->numArgs == 2);
2916
2917     GenTreePtr fieldTokenNode = impStackTop(0).val;
2918     GenTreePtr arrayLocalNode = impStackTop(1).val;
2919
2920     //
2921     // Verify that the field token is known and valid.  Note that It's also
2922     // possible for the token to come from reflection, in which case we cannot do
2923     // the optimization and must therefore revert to calling the helper.  You can
2924     // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2925     //
2926
2927     // Check to see if the ldtoken helper call is what we see here.
2928     if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2929         (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2930     {
2931         return nullptr;
2932     }
2933
2934     // Strip helper call away
2935     fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2936
2937     if (fieldTokenNode->gtOper == GT_IND)
2938     {
2939         fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2940     }
2941
2942     // Check for constant
2943     if (fieldTokenNode->gtOper != GT_CNS_INT)
2944     {
2945         return nullptr;
2946     }
2947
2948     CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2949     if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2950     {
2951         return nullptr;
2952     }
2953
2954     //
2955     // We need to get the number of elements in the array and the size of each element.
2956     // We verify that the newarr statement is exactly what we expect it to be.
2957     // If it's not then we just return NULL and we don't optimize this call
2958     //
2959
2960     //
2961     // It is possible the we don't have any statements in the block yet
2962     //
2963     if (impTreeLast->gtOper != GT_STMT)
2964     {
2965         assert(impTreeLast->gtOper == GT_BEG_STMTS);
2966         return nullptr;
2967     }
2968
2969     //
2970     // We start by looking at the last statement, making sure it's an assignment, and
2971     // that the target of the assignment is the array passed to InitializeArray.
2972     //
2973     GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2974     if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2975         (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2976         (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2977     {
2978         return nullptr;
2979     }
2980
2981     //
2982     // Make sure that the object being assigned is a helper call.
2983     //
2984
2985     GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2986     if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2987     {
2988         return nullptr;
2989     }
2990
2991     //
2992     // Verify that it is one of the new array helpers.
2993     //
2994
2995     bool isMDArray = false;
2996
2997     if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2998         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2999         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3000         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3001 #ifdef FEATURE_READYTORUN_COMPILER
3002         && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3003         newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3004 #endif
3005             )
3006     {
3007         if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3008         {
3009             return nullptr;
3010         }
3011
3012         isMDArray = true;
3013     }
3014
3015     CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3016
3017     //
3018     // Make sure we found a compile time handle to the array
3019     //
3020
3021     if (!arrayClsHnd)
3022     {
3023         return nullptr;
3024     }
3025
3026     unsigned rank = 0;
3027     S_UINT32 numElements;
3028
3029     if (isMDArray)
3030     {
3031         rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3032
3033         if (rank == 0)
3034         {
3035             return nullptr;
3036         }
3037
3038         GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3039         assert(tokenArg != nullptr);
3040         GenTreeArgList* numArgsArg = tokenArg->Rest();
3041         assert(numArgsArg != nullptr);
3042         GenTreeArgList* argsArg = numArgsArg->Rest();
3043         assert(argsArg != nullptr);
3044
3045         //
3046         // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3047         // so at least one length must be present and the rank can't exceed 32 so there can
3048         // be at most 64 arguments - 32 lengths and 32 lower bounds.
3049         //
3050
3051         if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3052             (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3053         {
3054             return nullptr;
3055         }
3056
3057         unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3058         bool     lowerBoundsSpecified;
3059
3060         if (numArgs == rank * 2)
3061         {
3062             lowerBoundsSpecified = true;
3063         }
3064         else if (numArgs == rank)
3065         {
3066             lowerBoundsSpecified = false;
3067
3068             //
3069             // If the rank is 1 and a lower bound isn't specified then the runtime creates
3070             // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3071             // we get a SDArray as well, see the for loop below.
3072             //
3073
3074             if (rank == 1)
3075             {
3076                 isMDArray = false;
3077             }
3078         }
3079         else
3080         {
3081             return nullptr;
3082         }
3083
3084         //
3085         // The rank is known to be at least 1 so we can start with numElements being 1
3086         // to avoid the need to special case the first dimension.
3087         //
3088
3089         numElements = S_UINT32(1);
3090
3091         struct Match
3092         {
3093             static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3094             {
3095                 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3096                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3097             }
3098
3099             static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3100             {
3101                 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3102                        (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3103                        IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3104             }
3105
3106             static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3107             {
3108                 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3109                        (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3110             }
3111
3112             static bool IsComma(GenTree* tree)
3113             {
3114                 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3115             }
3116         };
3117
3118         unsigned argIndex = 0;
3119         GenTree* comma;
3120
3121         for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3122         {
3123             if (lowerBoundsSpecified)
3124             {
3125                 //
3126                 // In general lower bounds can be ignored because they're not needed to
3127                 // calculate the total number of elements. But for single dimensional arrays
3128                 // we need to know if the lower bound is 0 because in this case the runtime
3129                 // creates a SDArray and this affects the way the array data offset is calculated.
3130                 //
3131
3132                 if (rank == 1)
3133                 {
3134                     GenTree* lowerBoundAssign = comma->gtGetOp1();
3135                     assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3136                     GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3137
3138                     if (lowerBoundNode->IsIntegralConst(0))
3139                     {
3140                         isMDArray = false;
3141                     }
3142                 }
3143
3144                 comma = comma->gtGetOp2();
3145                 argIndex++;
3146             }
3147
3148             GenTree* lengthNodeAssign = comma->gtGetOp1();
3149             assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3150             GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3151
3152             if (!lengthNode->IsCnsIntOrI())
3153             {
3154                 return nullptr;
3155             }
3156
3157             numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3158             argIndex++;
3159         }
3160
3161         assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3162
3163         if (argIndex != numArgs)
3164         {
3165             return nullptr;
3166         }
3167     }
3168     else
3169     {
3170         //
3171         // Make sure there are exactly two arguments:  the array class and
3172         // the number of elements.
3173         //
3174
3175         GenTreePtr arrayLengthNode;
3176
3177         GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3178 #ifdef FEATURE_READYTORUN_COMPILER
3179         if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3180         {
3181             // Array length is 1st argument for readytorun helper
3182             arrayLengthNode = args->Current();
3183         }
3184         else
3185 #endif
3186         {
3187             // Array length is 2nd argument for regular helper
3188             arrayLengthNode = args->Rest()->Current();
3189         }
3190
3191         //
3192         // Make sure that the number of elements look valid.
3193         //
3194         if (arrayLengthNode->gtOper != GT_CNS_INT)
3195         {
3196             return nullptr;
3197         }
3198
3199         numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3200
3201         if (!info.compCompHnd->isSDArray(arrayClsHnd))
3202         {
3203             return nullptr;
3204         }
3205     }
3206
3207     CORINFO_CLASS_HANDLE elemClsHnd;
3208     var_types            elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3209
3210     //
3211     // Note that genTypeSize will return zero for non primitive types, which is exactly
3212     // what we want (size will then be 0, and we will catch this in the conditional below).
3213     // Note that we don't expect this to fail for valid binaries, so we assert in the
3214     // non-verification case (the verification case should not assert but rather correctly
3215     // handle bad binaries).  This assert is not guarding any specific invariant, but rather
3216     // saying that we don't expect this to happen, and if it is hit, we need to investigate
3217     // why.
3218     //
3219
3220     S_UINT32 elemSize(genTypeSize(elementType));
3221     S_UINT32 size = elemSize * S_UINT32(numElements);
3222
3223     if (size.IsOverflow())
3224     {
3225         return nullptr;
3226     }
3227
3228     if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3229     {
3230         assert(verNeedsVerification());
3231         return nullptr;
3232     }
3233
3234     void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3235     if (!initData)
3236     {
3237         return nullptr;
3238     }
3239
3240     //
3241     // At this point we are ready to commit to implementing the InitializeArray
3242     // intrinsic using a struct assignment.  Pop the arguments from the stack and
3243     // return the struct assignment node.
3244     //
3245
3246     impPopStack();
3247     impPopStack();
3248
3249     const unsigned blkSize = size.Value();
3250     unsigned       dataOffset;
3251
3252     if (isMDArray)
3253     {
3254         dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3255     }
3256     else
3257     {
3258         dataOffset = eeGetArrayDataOffset(elementType);
3259     }
3260
3261     GenTreePtr dst     = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3262     GenTreePtr blk     = gtNewBlockVal(dst, blkSize);
3263     GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3264     GenTreePtr src     = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3265
3266     return gtNewBlkOpNode(blk,     // dst
3267                           src,     // src
3268                           blkSize, // size
3269                           false,   // volatil
3270                           true);   // copyBlock
3271 }
3272
3273 /*****************************************************************************/
3274 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3275 // Returns NULL if an intrinsic cannot be used
3276
3277 GenTreePtr Compiler::impIntrinsic(GenTreePtr            newobjThis,
3278                                   CORINFO_CLASS_HANDLE  clsHnd,
3279                                   CORINFO_METHOD_HANDLE method,
3280                                   CORINFO_SIG_INFO*     sig,
3281                                   int                   memberRef,
3282                                   bool                  readonlyCall,
3283                                   bool                  tailCall,
3284                                   bool                  isJitIntrinsic,
3285                                   CorInfoIntrinsics*    pIntrinsicID)
3286 {
3287     bool              mustExpand  = false;
3288     CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3289     *pIntrinsicID                 = intrinsicID;
3290
3291     // Jit intrinsics are always optional to expand, and won't have an
3292     // Intrinsic ID.
3293     if (isJitIntrinsic)
3294     {
3295         assert(!mustExpand);
3296         assert(intrinsicID == CORINFO_INTRINSIC_Illegal);
3297     }
3298
3299 #ifndef _TARGET_ARM_
3300     genTreeOps interlockedOperator;
3301 #endif
3302
3303     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3304     {
3305         // must be done regardless of DbgCode and MinOpts
3306         return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3307     }
3308 #ifdef _TARGET_64BIT_
3309     if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3310     {
3311         // must be done regardless of DbgCode and MinOpts
3312         return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3313     }
3314 #else
3315     assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3316 #endif
3317
3318     GenTreePtr retNode = nullptr;
3319
3320     //
3321     // We disable the inlining of instrinsics for MinOpts.
3322     //
3323     if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3324     {
3325         *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3326         return retNode;
3327     }
3328
3329     // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3330     // seem to work properly for Infinity values, we don't do
3331     // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3332
3333     var_types callType = JITtype2varType(sig->retType);
3334
3335     /* First do the intrinsics which are always smaller than a call */
3336
3337     switch (intrinsicID)
3338     {
3339         GenTreePtr op1, op2;
3340
3341         case CORINFO_INTRINSIC_Sin:
3342         case CORINFO_INTRINSIC_Sqrt:
3343         case CORINFO_INTRINSIC_Abs:
3344         case CORINFO_INTRINSIC_Cos:
3345         case CORINFO_INTRINSIC_Round:
3346         case CORINFO_INTRINSIC_Cosh:
3347         case CORINFO_INTRINSIC_Sinh:
3348         case CORINFO_INTRINSIC_Tan:
3349         case CORINFO_INTRINSIC_Tanh:
3350         case CORINFO_INTRINSIC_Asin:
3351         case CORINFO_INTRINSIC_Acos:
3352         case CORINFO_INTRINSIC_Atan:
3353         case CORINFO_INTRINSIC_Atan2:
3354         case CORINFO_INTRINSIC_Log10:
3355         case CORINFO_INTRINSIC_Pow:
3356         case CORINFO_INTRINSIC_Exp:
3357         case CORINFO_INTRINSIC_Ceiling:
3358         case CORINFO_INTRINSIC_Floor:
3359
3360             // These are math intrinsics
3361
3362             assert(callType != TYP_STRUCT);
3363
3364             op1 = nullptr;
3365
3366 #if defined(LEGACY_BACKEND)
3367             if (IsTargetIntrinsic(intrinsicID))
3368 #elif !defined(_TARGET_X86_)
3369             // Intrinsics that are not implemented directly by target instructions will
3370             // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3371             // don't do this optimization, because
3372             //  a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3373             //  b) It will be non-trivial task or too late to re-materialize a surviving
3374             //     tail prefixed GT_INTRINSIC as tail call in rationalizer.
3375             if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3376 #else
3377             // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3378             // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3379             // code generation for certain EH constructs.
3380             if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3381 #endif
3382             {
3383                 switch (sig->numArgs)
3384                 {
3385                     case 1:
3386                         op1 = impPopStack().val;
3387
3388 #if FEATURE_X87_DOUBLES
3389
3390                         // X87 stack doesn't differentiate between float/double
3391                         // so it doesn't need a cast, but everybody else does
3392                         // Just double check it is at least a FP type
3393                         noway_assert(varTypeIsFloating(op1));
3394
3395 #else // FEATURE_X87_DOUBLES
3396
3397                         if (op1->TypeGet() != callType)
3398                         {
3399                             op1 = gtNewCastNode(callType, op1, callType);
3400                         }
3401
3402 #endif // FEATURE_X87_DOUBLES
3403
3404                         op1 = new (this, GT_INTRINSIC)
3405                             GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3406                         break;
3407
3408                     case 2:
3409                         op2 = impPopStack().val;
3410                         op1 = impPopStack().val;
3411
3412 #if FEATURE_X87_DOUBLES
3413
3414                         // X87 stack doesn't differentiate between float/double
3415                         // so it doesn't need a cast, but everybody else does
3416                         // Just double check it is at least a FP type
3417                         noway_assert(varTypeIsFloating(op2));
3418                         noway_assert(varTypeIsFloating(op1));
3419
3420 #else // FEATURE_X87_DOUBLES
3421
3422                         if (op2->TypeGet() != callType)
3423                         {
3424                             op2 = gtNewCastNode(callType, op2, callType);
3425                         }
3426                         if (op1->TypeGet() != callType)
3427                         {
3428                             op1 = gtNewCastNode(callType, op1, callType);
3429                         }
3430
3431 #endif // FEATURE_X87_DOUBLES
3432
3433                         op1 = new (this, GT_INTRINSIC)
3434                             GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3435                         break;
3436
3437                     default:
3438                         NO_WAY("Unsupported number of args for Math Instrinsic");
3439                 }
3440
3441 #ifndef LEGACY_BACKEND
3442                 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3443                 {
3444                     op1->gtFlags |= GTF_CALL;
3445                 }
3446 #endif
3447             }
3448
3449             retNode = op1;
3450             break;
3451
3452 #ifdef _TARGET_XARCH_
3453         // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3454         case CORINFO_INTRINSIC_InterlockedAdd32:
3455             interlockedOperator = GT_LOCKADD;
3456             goto InterlockedBinOpCommon;
3457         case CORINFO_INTRINSIC_InterlockedXAdd32:
3458             interlockedOperator = GT_XADD;
3459             goto InterlockedBinOpCommon;
3460         case CORINFO_INTRINSIC_InterlockedXchg32:
3461             interlockedOperator = GT_XCHG;
3462             goto InterlockedBinOpCommon;
3463
3464 #ifdef _TARGET_AMD64_
3465         case CORINFO_INTRINSIC_InterlockedAdd64:
3466             interlockedOperator = GT_LOCKADD;
3467             goto InterlockedBinOpCommon;
3468         case CORINFO_INTRINSIC_InterlockedXAdd64:
3469             interlockedOperator = GT_XADD;
3470             goto InterlockedBinOpCommon;
3471         case CORINFO_INTRINSIC_InterlockedXchg64:
3472             interlockedOperator = GT_XCHG;
3473             goto InterlockedBinOpCommon;
3474 #endif // _TARGET_AMD64_
3475
3476         InterlockedBinOpCommon:
3477             assert(callType != TYP_STRUCT);
3478             assert(sig->numArgs == 2);
3479
3480             op2 = impPopStack().val;
3481             op1 = impPopStack().val;
3482
3483             // This creates:
3484             //   val
3485             // XAdd
3486             //   addr
3487             //     field (for example)
3488             //
3489             // In the case where the first argument is the address of a local, we might
3490             // want to make this *not* make the var address-taken -- but atomic instructions
3491             // on a local are probably pretty useless anyway, so we probably don't care.
3492
3493             op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3494             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3495             retNode = op1;
3496             break;
3497 #endif // _TARGET_XARCH_
3498
3499         case CORINFO_INTRINSIC_MemoryBarrier:
3500
3501             assert(sig->numArgs == 0);
3502
3503             op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3504             op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3505             retNode = op1;
3506             break;
3507
3508 #ifdef _TARGET_XARCH_
3509         // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3510         case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3511 #ifdef _TARGET_AMD64_
3512         case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3513 #endif
3514         {
3515             assert(callType != TYP_STRUCT);
3516             assert(sig->numArgs == 3);
3517             GenTreePtr op3;
3518
3519             op3 = impPopStack().val; // comparand
3520             op2 = impPopStack().val; // value
3521             op1 = impPopStack().val; // location
3522
3523             GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3524
3525             node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3526             retNode = node;
3527             break;
3528         }
3529 #endif
3530
3531         case CORINFO_INTRINSIC_StringLength:
3532             op1 = impPopStack().val;
3533             if (!opts.MinOpts() && !opts.compDbgCode)
3534             {
3535                 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3536                 op1                   = arrLen;
3537             }
3538             else
3539             {
3540                 /* Create the expression "*(str_addr + stringLengthOffset)" */
3541                 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3542                                     gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3543                 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3544             }
3545
3546             // Getting the length of a null string should throw
3547             op1->gtFlags |= GTF_EXCEPT;
3548
3549             retNode = op1;
3550             break;
3551
3552         case CORINFO_INTRINSIC_StringGetChar:
3553             op2 = impPopStack().val;
3554             op1 = impPopStack().val;
3555             op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3556             op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3557             retNode = op1;
3558             break;
3559
3560         case CORINFO_INTRINSIC_InitializeArray:
3561             retNode = impInitializeArrayIntrinsic(sig);
3562             break;
3563
3564         case CORINFO_INTRINSIC_Array_Address:
3565         case CORINFO_INTRINSIC_Array_Get:
3566         case CORINFO_INTRINSIC_Array_Set:
3567             retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3568             break;
3569
3570         case CORINFO_INTRINSIC_GetTypeFromHandle:
3571             op1 = impStackTop(0).val;
3572             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3574             {
3575                 op1 = impPopStack().val;
3576                 // Change call to return RuntimeType directly.
3577                 op1->gtType = TYP_REF;
3578                 retNode     = op1;
3579             }
3580             // Call the regular function.
3581             break;
3582
3583         case CORINFO_INTRINSIC_RTH_GetValueInternal:
3584             op1 = impStackTop(0).val;
3585             if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3586                 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3587             {
3588                 // Old tree
3589                 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3590                 //
3591                 // New tree
3592                 // TreeToGetNativeTypeHandle
3593
3594                 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3595                 // to that helper.
3596
3597                 op1 = impPopStack().val;
3598
3599                 // Get native TypeHandle argument to old helper
3600                 op1 = op1->gtCall.gtCallArgs;
3601                 assert(op1->OperIsList());
3602                 assert(op1->gtOp.gtOp2 == nullptr);
3603                 op1     = op1->gtOp.gtOp1;
3604                 retNode = op1;
3605             }
3606             // Call the regular function.
3607             break;
3608
3609 #ifndef LEGACY_BACKEND
3610         case CORINFO_INTRINSIC_Object_GetType:
3611
3612             op1 = impPopStack().val;
3613             op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3614
3615             // Set the CALL flag to indicate that the operator is implemented by a call.
3616             // Set also the EXCEPTION flag because the native implementation of
3617             // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3618             op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3619             retNode = op1;
3620             break;
3621 #endif
3622         // Implement ByReference Ctor.  This wraps the assignment of the ref into a byref-like field
3623         // in a value type.  The canonical example of this is Span<T>. In effect this is just a
3624         // substitution.  The parameter byref will be assigned into the newly allocated object.
3625         case CORINFO_INTRINSIC_ByReference_Ctor:
3626         {
3627             // Remove call to constructor and directly assign the byref passed
3628             // to the call to the first slot of the ByReference struct.
3629             op1                                    = impPopStack().val;
3630             GenTreePtr           thisptr           = newobjThis;
3631             CORINFO_FIELD_HANDLE fldHnd            = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632             GenTreePtr           field             = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3633             GenTreePtr           assign            = gtNewAssignNode(field, op1);
3634             GenTreePtr           byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3635             assert(byReferenceStruct != nullptr);
3636             impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3637             retNode = assign;
3638             break;
3639         }
3640         // Implement ptr value getter for ByReference struct.
3641         case CORINFO_INTRINSIC_ByReference_Value:
3642         {
3643             op1                         = impPopStack().val;
3644             CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3645             GenTreePtr           field  = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3646             retNode                     = field;
3647             break;
3648         }
3649         case CORINFO_INTRINSIC_Span_GetItem:
3650         case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3651         {
3652             // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3653             //
3654             // For Span<T>
3655             //   Comma
3656             //     BoundsCheck(index, s->_length)
3657             //     s->_pointer + index * sizeof(T)
3658             //
3659             // For ReadOnlySpan<T>
3660             //   Comma
3661             //     BoundsCheck(index, s->_length)
3662             //     *(s->_pointer + index * sizeof(T))
3663             //
3664             // Signature should show one class type parameter, which
3665             // we need to examine.
3666             assert(sig->sigInst.classInstCount == 1);
3667             CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3668             const unsigned       elemSize    = info.compCompHnd->getClassSize(spanElemHnd);
3669             assert(elemSize > 0);
3670
3671             const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3672
3673             JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3674                     info.compCompHnd->getClassName(spanElemHnd), elemSize);
3675
3676             GenTreePtr index          = impPopStack().val;
3677             GenTreePtr ptrToSpan      = impPopStack().val;
3678             GenTreePtr indexClone     = nullptr;
3679             GenTreePtr ptrToSpanClone = nullptr;
3680
3681 #if defined(DEBUG)
3682             if (verbose)
3683             {
3684                 printf("with ptr-to-span\n");
3685                 gtDispTree(ptrToSpan);
3686                 printf("and index\n");
3687                 gtDispTree(index);
3688             }
3689 #endif // defined(DEBUG)
3690
3691             // We need to use both index and ptr-to-span twice, so clone or spill.
3692             index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3693                                  nullptr DEBUGARG("Span.get_Item index"));
3694             ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3695                                      nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3696
3697             // Bounds check
3698             CORINFO_FIELD_HANDLE lengthHnd    = info.compCompHnd->getFieldInClass(clsHnd, 1);
3699             const unsigned       lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3700             GenTreePtr           length       = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3701             GenTreePtr           boundsCheck  = new (this, GT_ARR_BOUNDS_CHECK)
3702                 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3703
3704             // Element access
3705             GenTreePtr           indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3706             GenTreePtr           sizeofNode  = gtNewIconNode(elemSize);
3707             GenTreePtr           mulNode     = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3708             CORINFO_FIELD_HANDLE ptrHnd      = info.compCompHnd->getFieldInClass(clsHnd, 0);
3709             const unsigned       ptrOffset   = info.compCompHnd->getFieldOffset(ptrHnd);
3710             GenTreePtr           data        = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3711             GenTreePtr           result      = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3712
3713             // Prepare result
3714             var_types resultType = JITtype2varType(sig->retType);
3715
3716             if (isReadOnly)
3717             {
3718                 result = gtNewOperNode(GT_IND, resultType, result);
3719             }
3720             else
3721             {
3722                 assert(resultType == result->TypeGet());
3723             }
3724
3725             retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3726
3727             break;
3728         }
3729
3730         case CORINFO_INTRINSIC_GetRawHandle:
3731         {
3732             noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3733             CORINFO_RESOLVED_TOKEN resolvedToken;
3734             resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3735             resolvedToken.tokenScope   = info.compScopeHnd;
3736             resolvedToken.token        = memberRef;
3737             resolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
3738
3739             CORINFO_GENERICHANDLE_RESULT embedInfo;
3740             info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3741
3742             GenTreePtr rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3743                                                    embedInfo.compileTimeHandle);
3744             if (rawHandle == nullptr)
3745             {
3746                 return nullptr;
3747             }
3748
3749             noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3750
3751             unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3752             impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3753
3754             GenTreePtr lclVar     = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3755             GenTreePtr lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3756             var_types  resultType = JITtype2varType(sig->retType);
3757             retNode               = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3758
3759             break;
3760         }
3761
3762         default:
3763             /* Unknown intrinsic */
3764             break;
3765     }
3766
3767 #ifdef DEBUG
3768     // Sample code showing how to use the new intrinsic mechansim.
3769     if (isJitIntrinsic)
3770     {
3771         assert(retNode == nullptr);
3772         const char* className     = nullptr;
3773         const char* namespaceName = nullptr;
3774         const char* methodName    = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
3775
3776         if ((namespaceName != nullptr) && strcmp(namespaceName, "System") == 0)
3777         {
3778             if ((className != nullptr) && strcmp(className, "Enum") == 0)
3779             {
3780                 if ((methodName != nullptr) && strcmp(methodName, "HasFlag") == 0)
3781                 {
3782                     // Todo: plug in the intrinsic expansion
3783                     JITDUMP("Found Intrinsic call to Enum.HasFlag\n");
3784                 }
3785             }
3786         }
3787     }
3788 #endif
3789
3790     if (mustExpand)
3791     {
3792         if (retNode == nullptr)
3793         {
3794             NO_WAY("JIT must expand the intrinsic!");
3795         }
3796     }
3797
3798     return retNode;
3799 }
3800
3801 /*****************************************************************************/
3802
3803 GenTreePtr Compiler::impArrayAccessIntrinsic(
3804     CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3805 {
3806     /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3807        the following, as it generates fatter code.
3808     */
3809
3810     if (compCodeOpt() == SMALL_CODE)
3811     {
3812         return nullptr;
3813     }
3814
3815     /* These intrinsics generate fatter (but faster) code and are only
3816        done if we don't need SMALL_CODE */
3817
3818     unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3819
3820     // The rank 1 case is special because it has to handle two array formats
3821     // we will simply not do that case
3822     if (rank > GT_ARR_MAX_RANK || rank <= 1)
3823     {
3824         return nullptr;
3825     }
3826
3827     CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3828     var_types            elemType      = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3829
3830     // For the ref case, we will only be able to inline if the types match
3831     // (verifier checks for this, we don't care for the nonverified case and the
3832     // type is final (so we don't need to do the cast)
3833     if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3834     {
3835         // Get the call site signature
3836         CORINFO_SIG_INFO LocalSig;
3837         eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3838         assert(LocalSig.hasThis());
3839
3840         CORINFO_CLASS_HANDLE actualElemClsHnd;
3841
3842         if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3843         {
3844             // Fetch the last argument, the one that indicates the type we are setting.
3845             CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3846             for (unsigned r = 0; r < rank; r++)
3847             {
3848                 argType = info.compCompHnd->getArgNext(argType);
3849             }
3850
3851             typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3852             actualElemClsHnd = argInfo.GetClassHandle();
3853         }
3854         else
3855         {
3856             assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3857
3858             // Fetch the return type
3859             typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3860             assert(retInfo.IsByRef());
3861             actualElemClsHnd = retInfo.GetClassHandle();
3862         }
3863
3864         // if it's not final, we can't do the optimization
3865         if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3866         {
3867             return nullptr;
3868         }
3869     }
3870
3871     unsigned arrayElemSize;
3872     if (elemType == TYP_STRUCT)
3873     {
3874         assert(arrElemClsHnd);
3875
3876         arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3877     }
3878     else
3879     {
3880         arrayElemSize = genTypeSize(elemType);
3881     }
3882
3883     if ((unsigned char)arrayElemSize != arrayElemSize)
3884     {
3885         // arrayElemSize would be truncated as an unsigned char.
3886         // This means the array element is too large. Don't do the optimization.
3887         return nullptr;
3888     }
3889
3890     GenTreePtr val = nullptr;
3891
3892     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3893     {
3894         // Assignment of a struct is more work, and there are more gets than sets.
3895         if (elemType == TYP_STRUCT)
3896         {
3897             return nullptr;
3898         }
3899
3900         val = impPopStack().val;
3901         assert(genActualType(elemType) == genActualType(val->gtType) ||
3902                (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3903                (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3904                (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3905     }
3906
3907     noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3908
3909     GenTreePtr inds[GT_ARR_MAX_RANK];
3910     for (unsigned k = rank; k > 0; k--)
3911     {
3912         inds[k - 1] = impPopStack().val;
3913     }
3914
3915     GenTreePtr arr = impPopStack().val;
3916     assert(arr->gtType == TYP_REF);
3917
3918     GenTreePtr arrElem =
3919         new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3920                                                static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3921
3922     if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3923     {
3924         arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3925     }
3926
3927     if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3928     {
3929         assert(val != nullptr);
3930         return gtNewAssignNode(arrElem, val);
3931     }
3932     else
3933     {
3934         return arrElem;
3935     }
3936 }
3937
3938 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3939 {
3940     unsigned i;
3941
3942     // do some basic checks first
3943     if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3944     {
3945         return FALSE;
3946     }
3947
3948     if (verCurrentState.esStackDepth > 0)
3949     {
3950         // merge stack types
3951         StackEntry* parentStack = block->bbStackOnEntry();
3952         StackEntry* childStack  = verCurrentState.esStack;
3953
3954         for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3955         {
3956             if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3957             {
3958                 return FALSE;
3959             }
3960         }
3961     }
3962
3963     // merge initialization status of this ptr
3964
3965     if (verTrackObjCtorInitState)
3966     {
3967         // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3968         assert(verCurrentState.thisInitialized != TIS_Bottom);
3969
3970         // If the successor block's thisInit state is unknown, copy it from the current state.
3971         if (block->bbThisOnEntry() == TIS_Bottom)
3972         {
3973             *changed = true;
3974             verSetThisInit(block, verCurrentState.thisInitialized);
3975         }
3976         else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3977         {
3978             if (block->bbThisOnEntry() != TIS_Top)
3979             {
3980                 *changed = true;
3981                 verSetThisInit(block, TIS_Top);
3982
3983                 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3984                 {
3985                     // The block is bad. Control can flow through the block to any handler that catches the
3986                     // verification exception, but the importer ignores bad blocks and therefore won't model
3987                     // this flow in the normal way. To complete the merge into the bad block, the new state
3988                     // needs to be manually pushed to the handlers that may be reached after the verification
3989                     // exception occurs.
3990                     //
3991                     // Usually, the new state was already propagated to the relevant handlers while processing
3992                     // the predecessors of the bad block. The exception is when the bad block is at the start
3993                     // of a try region, meaning it is protected by additional handlers that do not protect its
3994                     // predecessors.
3995                     //
3996                     if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3997                     {
3998                         // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3999                         // recursive calls back into this code path (if successors of the current bad block are
4000                         // also bad blocks).
4001                         //
4002                         ThisInitState origTIS           = verCurrentState.thisInitialized;
4003                         verCurrentState.thisInitialized = TIS_Top;
4004                         impVerifyEHBlock(block, true);
4005                         verCurrentState.thisInitialized = origTIS;
4006                     }
4007                 }
4008             }
4009         }
4010     }
4011     else
4012     {
4013         assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4014     }
4015
4016     return TRUE;
4017 }
4018
4019 /*****************************************************************************
4020  * 'logMsg' is true if a log message needs to be logged. false if the caller has
4021  *   already logged it (presumably in a more detailed fashion than done here)
4022  * 'bVerificationException' is true for a verification exception, false for a
4023  *   "call unauthorized by host" exception.
4024  */
4025
4026 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4027 {
4028     block->bbJumpKind = BBJ_THROW;
4029     block->bbFlags |= BBF_FAILED_VERIFICATION;
4030
4031     impCurStmtOffsSet(block->bbCodeOffs);
4032
4033 #ifdef DEBUG
4034     // we need this since BeginTreeList asserts otherwise
4035     impTreeList = impTreeLast = nullptr;
4036     block->bbFlags &= ~BBF_IMPORTED;
4037
4038     if (logMsg)
4039     {
4040         JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4041                 block->bbCodeOffs, block->bbCodeOffsEnd));
4042         if (verbose)
4043         {
4044             printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4045         }
4046     }
4047
4048     if (JitConfig.DebugBreakOnVerificationFailure())
4049     {
4050         DebugBreak();
4051     }
4052 #endif
4053
4054     impBeginTreeList();
4055
4056     // if the stack is non-empty evaluate all the side-effects
4057     if (verCurrentState.esStackDepth > 0)
4058     {
4059         impEvalSideEffects();
4060     }
4061     assert(verCurrentState.esStackDepth == 0);
4062
4063     GenTreePtr op1 =
4064         gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4065     // verCurrentState.esStackDepth = 0;
4066     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4067
4068     // The inliner is not able to handle methods that require throw block, so
4069     // make sure this methods never gets inlined.
4070     info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4071 }
4072
4073 /*****************************************************************************
4074  *
4075  */
4076 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4077
4078 {
4079     // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4080     // slightly different mechanism in which it calls the JIT to perform IL verification:
4081     // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4082     // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4083     // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4084     // it bubble up until reported by the runtime.  Currently in RyuJIT, this method doesn't bubble
4085     // up the exception, instead it embeds a throw inside the offending basic block and lets this
4086     // to fail upon runtime of the jitted method.
4087     //
4088     // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4089     // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4090     // just try to find out whether to fail this method before even actually jitting it.  So, in case
4091     // we detect these two conditions, instead of generating a throw statement inside the offending
4092     // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4093     // to return false and make RyuJIT behave the same way JIT64 does.
4094     //
4095     // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4096     // RyuJIT for the time being until we completely replace JIT64.
4097     // TODO-ARM64-Cleanup:  We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4098
4099     // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4100     // exception if we are only importing and verifying.  The method verNeedsVerification() can also modify the
4101     // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4102     // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4103     // be turned off during importation).
4104     CLANG_FORMAT_COMMENT_ANCHOR;
4105
4106 #ifdef _TARGET_64BIT_
4107
4108 #ifdef DEBUG
4109     bool canSkipVerificationResult =
4110         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4111     assert(tiVerificationNeeded || canSkipVerificationResult);
4112 #endif // DEBUG
4113
4114     // Add the non verifiable flag to the compiler
4115     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4116     {
4117         tiIsVerifiableCode = FALSE;
4118     }
4119 #endif //_TARGET_64BIT_
4120     verResetCurrentState(block, &verCurrentState);
4121     verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4122
4123 #ifdef DEBUG
4124     impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4125 #endif                   // DEBUG
4126 }
4127
4128 /******************************************************************************/
4129 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4130 {
4131     assert(ciType < CORINFO_TYPE_COUNT);
4132
4133     typeInfo tiResult;
4134     switch (ciType)
4135     {
4136         case CORINFO_TYPE_STRING:
4137         case CORINFO_TYPE_CLASS:
4138             tiResult = verMakeTypeInfo(clsHnd);
4139             if (!tiResult.IsType(TI_REF))
4140             { // type must be consistent with element type
4141                 return typeInfo();
4142             }
4143             break;
4144
4145 #ifdef _TARGET_64BIT_
4146         case CORINFO_TYPE_NATIVEINT:
4147         case CORINFO_TYPE_NATIVEUINT:
4148             if (clsHnd)
4149             {
4150                 // If we have more precise information, use it
4151                 return verMakeTypeInfo(clsHnd);
4152             }
4153             else
4154             {
4155                 return typeInfo::nativeInt();
4156             }
4157             break;
4158 #endif // _TARGET_64BIT_
4159
4160         case CORINFO_TYPE_VALUECLASS:
4161         case CORINFO_TYPE_REFANY:
4162             tiResult = verMakeTypeInfo(clsHnd);
4163             // type must be constant with element type;
4164             if (!tiResult.IsValueClass())
4165             {
4166                 return typeInfo();
4167             }
4168             break;
4169         case CORINFO_TYPE_VAR:
4170             return verMakeTypeInfo(clsHnd);
4171
4172         case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4173         case CORINFO_TYPE_VOID:
4174             return typeInfo();
4175             break;
4176
4177         case CORINFO_TYPE_BYREF:
4178         {
4179             CORINFO_CLASS_HANDLE childClassHandle;
4180             CorInfoType          childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4181             return ByRef(verMakeTypeInfo(childType, childClassHandle));
4182         }
4183         break;
4184
4185         default:
4186             if (clsHnd)
4187             { // If we have more precise information, use it
4188                 return typeInfo(TI_STRUCT, clsHnd);
4189             }
4190             else
4191             {
4192                 return typeInfo(JITtype2tiType(ciType));
4193             }
4194     }
4195     return tiResult;
4196 }
4197
4198 /******************************************************************************/
4199
4200 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4201 {
4202     if (clsHnd == nullptr)
4203     {
4204         return typeInfo();
4205     }
4206
4207     // Byrefs should only occur in method and local signatures, which are accessed
4208     // using ICorClassInfo and ICorClassInfo.getChildType.
4209     // So findClass() and getClassAttribs() should not be called for byrefs
4210
4211     if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4212     {
4213         assert(!"Did findClass() return a Byref?");
4214         return typeInfo();
4215     }
4216
4217     unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4218
4219     if (attribs & CORINFO_FLG_VALUECLASS)
4220     {
4221         CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4222
4223         // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4224         // not occur here, so we may want to change this to an assert instead.
4225         if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4226         {
4227             return typeInfo();
4228         }
4229
4230 #ifdef _TARGET_64BIT_
4231         if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4232         {
4233             return typeInfo::nativeInt();
4234         }
4235 #endif // _TARGET_64BIT_
4236
4237         if (t != CORINFO_TYPE_UNDEF)
4238         {
4239             return (typeInfo(JITtype2tiType(t)));
4240         }
4241         else if (bashStructToRef)
4242         {
4243             return (typeInfo(TI_REF, clsHnd));
4244         }
4245         else
4246         {
4247             return (typeInfo(TI_STRUCT, clsHnd));
4248         }
4249     }
4250     else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4251     {
4252         // See comment in _typeInfo.h for why we do it this way.
4253         return (typeInfo(TI_REF, clsHnd, true));
4254     }
4255     else
4256     {
4257         return (typeInfo(TI_REF, clsHnd));
4258     }
4259 }
4260
4261 /******************************************************************************/
4262 BOOL Compiler::verIsSDArray(typeInfo ti)
4263 {
4264     if (ti.IsNullObjRef())
4265     { // nulls are SD arrays
4266         return TRUE;
4267     }
4268
4269     if (!ti.IsType(TI_REF))
4270     {
4271         return FALSE;
4272     }
4273
4274     if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4275     {
4276         return FALSE;
4277     }
4278     return TRUE;
4279 }
4280
4281 /******************************************************************************/
4282 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4283 /* Returns an error type if anything goes wrong */
4284
4285 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4286 {
4287     assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4288
4289     if (!verIsSDArray(arrayObjectType))
4290     {
4291         return typeInfo();
4292     }
4293
4294     CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4295     CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4296
4297     return verMakeTypeInfo(ciType, childClassHandle);
4298 }
4299
4300 /*****************************************************************************
4301  */
4302 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4303 {
4304     CORINFO_CLASS_HANDLE classHandle;
4305     CorInfoType          ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4306
4307     var_types type = JITtype2varType(ciType);
4308     if (varTypeIsGC(type))
4309     {
4310         // For efficiency, getArgType only returns something in classHandle for
4311         // value types.  For other types that have addition type info, you
4312         // have to call back explicitly
4313         classHandle = info.compCompHnd->getArgClass(sig, args);
4314         if (!classHandle)
4315         {
4316             NO_WAY("Could not figure out Class specified in argument or local signature");
4317         }
4318     }
4319
4320     return verMakeTypeInfo(ciType, classHandle);
4321 }
4322
4323 /*****************************************************************************/
4324
4325 // This does the expensive check to figure out whether the method
4326 // needs to be verified. It is called only when we fail verification,
4327 // just before throwing the verification exception.
4328
4329 BOOL Compiler::verNeedsVerification()
4330 {
4331     // If we have previously determined that verification is NOT needed
4332     // (for example in Compiler::compCompile), that means verification is really not needed.
4333     // Return the same decision we made before.
4334     // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4335
4336     if (!tiVerificationNeeded)
4337     {
4338         return tiVerificationNeeded;
4339     }
4340
4341     assert(tiVerificationNeeded);
4342
4343     // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4344     // obtain the answer.
4345     CorInfoCanSkipVerificationResult canSkipVerificationResult =
4346         info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4347
4348     // canSkipVerification will return one of the following three values:
4349     //    CORINFO_VERIFICATION_CANNOT_SKIP = 0,       // Cannot skip verification during jit time.
4350     //    CORINFO_VERIFICATION_CAN_SKIP = 1,          // Can skip verification during jit time.
4351     //    CORINFO_VERIFICATION_RUNTIME_CHECK = 2,     // Skip verification during jit time,
4352     //     but need to insert a callout to the VM to ask during runtime
4353     //     whether to skip verification or not.
4354
4355     // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4356     if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4357     {
4358         tiRuntimeCalloutNeeded = true;
4359     }
4360
4361     if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4362     {
4363         // Dev10 706080 - Testers don't like the assert, so just silence it
4364         // by not using the macros that invoke debugAssert.
4365         badCode();
4366     }
4367
4368     // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4369     // The following line means we will NOT do jit time verification if canSkipVerification
4370     // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4371     tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4372     return tiVerificationNeeded;
4373 }
4374
4375 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4376 {
4377     if (ti.IsByRef())
4378     {
4379         return TRUE;
4380     }
4381     if (!ti.IsType(TI_STRUCT))
4382     {
4383         return FALSE;
4384     }
4385     return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4386 }
4387
4388 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4389 {
4390     if (ti.IsPermanentHomeByRef())
4391     {
4392         return TRUE;
4393     }
4394     else
4395     {
4396         return FALSE;
4397     }
4398 }
4399
4400 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4401 {
4402     return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4403             || ti.IsUnboxedGenericTypeVar() ||
4404             (ti.IsType(TI_STRUCT) &&
4405              // exclude byreflike structs
4406              !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4407 }
4408
4409 // Is it a boxed value type?
4410 bool Compiler::verIsBoxedValueType(typeInfo ti)
4411 {
4412     if (ti.GetType() == TI_REF)
4413     {
4414         CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4415         return !!eeIsValueClass(clsHnd);
4416     }
4417     else
4418     {
4419         return false;
4420     }
4421 }
4422
4423 /*****************************************************************************
4424  *
4425  *  Check if a TailCall is legal.
4426  */
4427
4428 bool Compiler::verCheckTailCallConstraint(
4429     OPCODE                  opcode,
4430     CORINFO_RESOLVED_TOKEN* pResolvedToken,
4431     CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4432     bool                    speculative                // If true, won't throw if verificatoin fails. Instead it will
4433                                                        // return false to the caller.
4434                                                        // If false, it will throw.
4435     )
4436 {
4437     DWORD            mflags;
4438     CORINFO_SIG_INFO sig;
4439     unsigned int     popCount = 0; // we can't pop the stack since impImportCall needs it, so
4440                                    // this counter is used to keep track of how many items have been
4441                                    // virtually popped
4442
4443     CORINFO_METHOD_HANDLE methodHnd       = nullptr;
4444     CORINFO_CLASS_HANDLE  methodClassHnd  = nullptr;
4445     unsigned              methodClassFlgs = 0;
4446
4447     assert(impOpcodeIsCallOpcode(opcode));
4448
4449     if (compIsForInlining())
4450     {
4451         return false;
4452     }
4453
4454     // for calli, VerifyOrReturn that this is not a virtual method
4455     if (opcode == CEE_CALLI)
4456     {
4457         /* Get the call sig */
4458         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4459
4460         // We don't know the target method, so we have to infer the flags, or
4461         // assume the worst-case.
4462         mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4463     }
4464     else
4465     {
4466         methodHnd = pResolvedToken->hMethod;
4467
4468         mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4469
4470         // When verifying generic code we pair the method handle with its
4471         // owning class to get the exact method signature.
4472         methodClassHnd = pResolvedToken->hClass;
4473         assert(methodClassHnd);
4474
4475         eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4476
4477         // opcode specific check
4478         methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4479     }
4480
4481     // We must have got the methodClassHnd if opcode is not CEE_CALLI
4482     assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4483
4484     if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4485     {
4486         eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4487     }
4488
4489     // check compatibility of the arguments
4490     unsigned int argCount;
4491     argCount = sig.numArgs;
4492     CORINFO_ARG_LIST_HANDLE args;
4493     args = sig.args;
4494     while (argCount--)
4495     {
4496         typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4497
4498         // check that the argument is not a byref for tailcalls
4499         VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4500
4501         // For unsafe code, we might have parameters containing pointer to the stack location.
4502         // Disallow the tailcall for this kind.
4503         CORINFO_CLASS_HANDLE classHandle;
4504         CorInfoType          ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4505         VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4506
4507         args = info.compCompHnd->getArgNext(args);
4508     }
4509
4510     // update popCount
4511     popCount += sig.numArgs;
4512
4513     // check for 'this' which is on non-static methods, not called via NEWOBJ
4514     if (!(mflags & CORINFO_FLG_STATIC))
4515     {
4516         // Always update the popCount.
4517         // This is crucial for the stack calculation to be correct.
4518         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4519         popCount++;
4520
4521         if (opcode == CEE_CALLI)
4522         {
4523             // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4524             // on the stack.
4525             if (tiThis.IsValueClass())
4526             {
4527                 tiThis.MakeByRef();
4528             }
4529             VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4530         }
4531         else
4532         {
4533             // Check type compatibility of the this argument
4534             typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4535             if (tiDeclaredThis.IsValueClass())
4536             {
4537                 tiDeclaredThis.MakeByRef();
4538             }
4539
4540             VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4541         }
4542     }
4543
4544     // Tail calls on constrained calls should be illegal too:
4545     // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4546     VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4547
4548     // Get the exact view of the signature for an array method
4549     if (sig.retType != CORINFO_TYPE_VOID)
4550     {
4551         if (methodClassFlgs & CORINFO_FLG_ARRAY)
4552         {
4553             assert(opcode != CEE_CALLI);
4554             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4555         }
4556     }
4557
4558     typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4559     typeInfo tiCallerRetType =
4560         verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4561
4562     // void return type gets morphed into the error type, so we have to treat them specially here
4563     if (sig.retType == CORINFO_TYPE_VOID)
4564     {
4565         VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4566                                   speculative);
4567     }
4568     else
4569     {
4570         VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4571                                                    NormaliseForStack(tiCallerRetType), true),
4572                                   "tailcall return mismatch", speculative);
4573     }
4574
4575     // for tailcall, stack must be empty
4576     VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4577
4578     return true; // Yes, tailcall is legal
4579 }
4580
4581 /*****************************************************************************
4582  *
4583  *  Checks the IL verification rules for the call
4584  */
4585
4586 void Compiler::verVerifyCall(OPCODE                  opcode,
4587                              CORINFO_RESOLVED_TOKEN* pResolvedToken,
4588                              CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4589                              bool                    tailCall,
4590                              bool                    readonlyCall,
4591                              const BYTE*             delegateCreateStart,
4592                              const BYTE*             codeAddr,
4593                              CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4594 {
4595     DWORD             mflags;
4596     CORINFO_SIG_INFO* sig      = nullptr;
4597     unsigned int      popCount = 0; // we can't pop the stack since impImportCall needs it, so
4598                                     // this counter is used to keep track of how many items have been
4599                                     // virtually popped
4600
4601     // for calli, VerifyOrReturn that this is not a virtual method
4602     if (opcode == CEE_CALLI)
4603     {
4604         Verify(false, "Calli not verifiable");
4605         return;
4606     }
4607
4608     //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4609     mflags = callInfo->verMethodFlags;
4610
4611     sig = &callInfo->verSig;
4612
4613     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4614     {
4615         eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4616     }
4617
4618     // opcode specific check
4619     unsigned methodClassFlgs = callInfo->classFlags;
4620     switch (opcode)
4621     {
4622         case CEE_CALLVIRT:
4623             // cannot do callvirt on valuetypes
4624             VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4625             VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4626             break;
4627
4628         case CEE_NEWOBJ:
4629         {
4630             assert(!tailCall); // Importer should not allow this
4631             VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4632                            "newobj must be on instance");
4633
4634             if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4635             {
4636                 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4637                 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4638                 typeInfo tiDeclaredFtn =
4639                     verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4640                 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4641
4642                 assert(popCount == 0);
4643                 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4644                 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4645
4646                 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4647                 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4648                 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4649                                "delegate object type mismatch");
4650
4651                 CORINFO_CLASS_HANDLE objTypeHandle =
4652                     tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4653
4654                 // the method signature must be compatible with the delegate's invoke method
4655
4656                 // check that for virtual functions, the type of the object used to get the
4657                 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4658                 // since this is a bit of work to determine in general, we pattern match stylized
4659                 // code sequences
4660
4661                 // the delegate creation code check, which used to be done later, is now done here
4662                 // so we can read delegateMethodRef directly from
4663                 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4664                 // we then use it in our call to isCompatibleDelegate().
4665
4666                 mdMemberRef delegateMethodRef = mdMemberRefNil;
4667                 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4668                                "must create delegates with certain IL");
4669
4670                 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4671                 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4672                 delegateResolvedToken.tokenScope   = info.compScopeHnd;
4673                 delegateResolvedToken.token        = delegateMethodRef;
4674                 delegateResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
4675                 info.compCompHnd->resolveToken(&delegateResolvedToken);
4676
4677                 CORINFO_CALL_INFO delegateCallInfo;
4678                 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4679                               addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4680
4681                 BOOL isOpenDelegate = FALSE;
4682                 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4683                                                                       tiActualFtn.GetMethod(), pResolvedToken->hClass,
4684                                                                       &isOpenDelegate),
4685                                "function incompatible with delegate");
4686
4687                 // check the constraints on the target method
4688                 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4689                                "delegate target has unsatisfied class constraints");
4690                 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4691                                                                             tiActualFtn.GetMethod()),
4692                                "delegate target has unsatisfied method constraints");
4693
4694                 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4695                 // for additional verification rules for delegates
4696                 CORINFO_METHOD_HANDLE actualMethodHandle  = tiActualFtn.GetMethod();
4697                 DWORD                 actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4698                 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4699                 {
4700
4701                     if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4702 #ifdef DEBUG
4703                         && StrictCheckForNonVirtualCallToVirtualMethod()
4704 #endif
4705                             )
4706                     {
4707                         if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4708                         {
4709                             VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4710                                                verIsBoxedValueType(tiActualObj),
4711                                            "The 'this' parameter to the call must be either the calling method's "
4712                                            "'this' parameter or "
4713                                            "a boxed value type.");
4714                         }
4715                     }
4716                 }
4717
4718                 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4719                 {
4720                     BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4721
4722                     Verify(targetIsStatic || !isOpenDelegate,
4723                            "Unverifiable creation of an open instance delegate for a protected member.");
4724
4725                     CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4726                                                                 ? info.compClassHnd
4727                                                                 : tiActualObj.GetClassHandleForObjRef();
4728
4729                     // In the case of protected methods, it is a requirement that the 'this'
4730                     // pointer be a subclass of the current context.  Perform this check.
4731                     Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4732                            "Accessing protected method through wrong type.");
4733                 }
4734                 goto DONE_ARGS;
4735             }
4736         }
4737         // fall thru to default checks
4738         default:
4739             VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4740     }
4741     VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4742                    "can only newobj a delegate constructor");
4743
4744     // check compatibility of the arguments
4745     unsigned int argCount;
4746     argCount = sig->numArgs;
4747     CORINFO_ARG_LIST_HANDLE args;
4748     args = sig->args;
4749     while (argCount--)
4750     {
4751         typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4752
4753         typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4754         VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4755
4756         args = info.compCompHnd->getArgNext(args);
4757     }
4758
4759 DONE_ARGS:
4760
4761     // update popCount
4762     popCount += sig->numArgs;
4763
4764     // check for 'this' which are is non-static methods, not called via NEWOBJ
4765     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4766     if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4767     {
4768         typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4769         popCount++;
4770
4771         // If it is null, we assume we can access it (since it will AV shortly)
4772         // If it is anything but a reference class, there is no hierarchy, so
4773         // again, we don't need the precise instance class to compute 'protected' access
4774         if (tiThis.IsType(TI_REF))
4775         {
4776             instanceClassHnd = tiThis.GetClassHandleForObjRef();
4777         }
4778
4779         // Check type compatibility of the this argument
4780         typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4781         if (tiDeclaredThis.IsValueClass())
4782         {
4783             tiDeclaredThis.MakeByRef();
4784         }
4785
4786         // If this is a call to the base class .ctor, set thisPtr Init for
4787         // this block.
4788         if (mflags & CORINFO_FLG_CONSTRUCTOR)
4789         {
4790             if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4791                 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4792             {
4793                 assert(verCurrentState.thisInitialized !=
4794                        TIS_Bottom); // This should never be the case just from the logic of the verifier.
4795                 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4796                                "Call to base class constructor when 'this' is possibly initialized");
4797                 // Otherwise, 'this' is now initialized.
4798                 verCurrentState.thisInitialized = TIS_Init;
4799                 tiThis.SetInitialisedObjRef();
4800             }
4801             else
4802             {
4803                 // We allow direct calls to value type constructors
4804                 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4805                 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4806                 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4807                                "Bad call to a constructor");
4808             }
4809         }
4810
4811         if (pConstrainedResolvedToken != nullptr)
4812         {
4813             VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4814
4815             typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4816
4817             // We just dereference this and test for equality
4818             tiThis.DereferenceByRef();
4819             VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4820                            "this type mismatch with constrained type operand");
4821
4822             // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4823             tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4824         }
4825
4826         // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4827         if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4828         {
4829             tiDeclaredThis.SetIsReadonlyByRef();
4830         }
4831
4832         VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4833
4834         if (tiThis.IsByRef())
4835         {
4836             // Find the actual type where the method exists (as opposed to what is declared
4837             // in the metadata). This is to prevent passing a byref as the "this" argument
4838             // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4839
4840             CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4841             VerifyOrReturn(eeIsValueClass(actualClassHnd),
4842                            "Call to base type of valuetype (which is never a valuetype)");
4843         }
4844
4845         // Rules for non-virtual call to a non-final virtual method:
4846
4847         // Define:
4848         // The "this" pointer is considered to be "possibly written" if
4849         //   1. Its address have been taken (LDARGA 0) anywhere in the method.
4850         //   (or)
4851         //   2. It has been stored to (STARG.0) anywhere in the method.
4852
4853         // A non-virtual call to a non-final virtual method is only allowed if
4854         //   1. The this pointer passed to the callee is an instance of a boxed value type.
4855         //   (or)
4856         //   2. The this pointer passed to the callee is the current method's this pointer.
4857         //      (and) The current method's this pointer is not "possibly written".
4858
4859         // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4860         // virtual methods.  (Luckily this does affect .ctors, since they are not virtual).
4861         // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4862         // hard and more error prone.
4863
4864         if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4865 #ifdef DEBUG
4866             && StrictCheckForNonVirtualCallToVirtualMethod()
4867 #endif
4868                 )
4869         {
4870             if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4871             {
4872                 VerifyOrReturn(
4873                     tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4874                     "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4875                     "a boxed value type.");
4876             }
4877         }
4878     }
4879
4880     // check any constraints on the callee's class and type parameters
4881     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4882                    "method has unsatisfied class constraints");
4883     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4884                    "method has unsatisfied method constraints");
4885
4886     if (mflags & CORINFO_FLG_PROTECTED)
4887     {
4888         VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4889                        "Can't access protected method");
4890     }
4891
4892     // Get the exact view of the signature for an array method
4893     if (sig->retType != CORINFO_TYPE_VOID)
4894     {
4895         eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4896     }
4897
4898     // "readonly." prefixed calls only allowed for the Address operation on arrays.
4899     // The methods supported by array types are under the control of the EE
4900     // so we can trust that only the Address operation returns a byref.
4901     if (readonlyCall)
4902     {
4903         typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4904         VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4905                        "unexpected use of readonly prefix");
4906     }
4907
4908     // Verify the tailcall
4909     if (tailCall)
4910     {
4911         verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4912     }
4913 }
4914
4915 /*****************************************************************************
4916  *  Checks that a delegate creation is done using the following pattern:
4917  *     dup
4918  *     ldvirtftn targetMemberRef
4919  *  OR
4920  *     ldftn targetMemberRef
4921  *
4922  * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4923  *  not in this basic block)
4924  *
4925  *  targetMemberRef is read from the code sequence.
4926  *  targetMemberRef is validated iff verificationNeeded.
4927  */
4928
4929 BOOL Compiler::verCheckDelegateCreation(const BYTE*  delegateCreateStart,
4930                                         const BYTE*  codeAddr,
4931                                         mdMemberRef& targetMemberRef)
4932 {
4933     if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4934     {
4935         targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4936         return TRUE;
4937     }
4938     else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4939     {
4940         targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4941         return TRUE;
4942     }
4943
4944     return FALSE;
4945 }
4946
4947 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4948 {
4949     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4950     typeInfo ptrVal     = verVerifyLDIND(tiTo, instrType);
4951     typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4952     if (!tiCompatibleWith(value, normPtrVal, true))
4953     {
4954         Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4955         compUnsafeCastUsed = true;
4956     }
4957     return ptrVal;
4958 }
4959
4960 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4961 {
4962     assert(!instrType.IsStruct());
4963
4964     typeInfo ptrVal;
4965     if (ptr.IsByRef())
4966     {
4967         ptrVal = DereferenceByRef(ptr);
4968         if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4969         {
4970             Verify(false, "bad pointer");
4971             compUnsafeCastUsed = true;
4972         }
4973         else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4974         {
4975             Verify(false, "pointer not consistent with instr");
4976             compUnsafeCastUsed = true;
4977         }
4978     }
4979     else
4980     {
4981         Verify(false, "pointer not byref");
4982         compUnsafeCastUsed = true;
4983     }
4984
4985     return ptrVal;
4986 }
4987
4988 // Verify that the field is used properly.  'tiThis' is NULL for statics,
4989 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4990 // ld*flda or a st*fld.
4991 // 'enclosingClass' is given if we are accessing a field in some specific type.
4992
4993 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN*   pResolvedToken,
4994                               const CORINFO_FIELD_INFO& fieldInfo,
4995                               const typeInfo*           tiThis,
4996                               BOOL                      mutator,
4997                               BOOL                      allowPlainStructAsThis)
4998 {
4999     CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5000     unsigned             fieldFlags     = fieldInfo.fieldFlags;
5001     CORINFO_CLASS_HANDLE instanceClass =
5002         info.compClassHnd; // for statics, we imagine the instance is the current class.
5003
5004     bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5005     if (mutator)
5006     {
5007         Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5008         if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5009         {
5010             Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5011                        info.compIsStatic == isStaticField,
5012                    "bad use of initonly field (set or address taken)");
5013         }
5014     }
5015
5016     if (tiThis == nullptr)
5017     {
5018         Verify(isStaticField, "used static opcode with non-static field");
5019     }
5020     else
5021     {
5022         typeInfo tThis = *tiThis;
5023
5024         if (allowPlainStructAsThis && tThis.IsValueClass())
5025         {
5026             tThis.MakeByRef();
5027         }
5028
5029         // If it is null, we assume we can access it (since it will AV shortly)
5030         // If it is anything but a refernce class, there is no hierarchy, so
5031         // again, we don't need the precise instance class to compute 'protected' access
5032         if (tiThis->IsType(TI_REF))
5033         {
5034             instanceClass = tiThis->GetClassHandleForObjRef();
5035         }
5036
5037         // Note that even if the field is static, we require that the this pointer
5038         // satisfy the same constraints as a non-static field  This happens to
5039         // be simpler and seems reasonable
5040         typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5041         if (tiDeclaredThis.IsValueClass())
5042         {
5043             tiDeclaredThis.MakeByRef();
5044
5045             // we allow read-only tThis, on any field access (even stores!), because if the
5046             // class implementor wants to prohibit stores he should make the field private.
5047             // we do this by setting the read-only bit on the type we compare tThis to.
5048             tiDeclaredThis.SetIsReadonlyByRef();
5049         }
5050         else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5051         {
5052             // Any field access is legal on "uninitialized" this pointers.
5053             // The easiest way to implement this is to simply set the
5054             // initialized bit for the duration of the type check on the
5055             // field access only.  It does not change the state of the "this"
5056             // for the function as a whole. Note that the "tThis" is a copy
5057             // of the original "this" type (*tiThis) passed in.
5058             tThis.SetInitialisedObjRef();
5059         }
5060
5061         Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5062     }
5063
5064     // Presently the JIT does not check that we don't store or take the address of init-only fields
5065     // since we cannot guarantee their immutability and it is not a security issue.
5066
5067     // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5068     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5069                    "field has unsatisfied class constraints");
5070     if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5071     {
5072         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5073                "Accessing protected method through wrong type.");
5074     }
5075 }
5076
5077 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5078 {
5079     if (tiOp1.IsNumberType())
5080     {
5081 #ifdef _TARGET_64BIT_
5082         Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5083 #else  // _TARGET_64BIT
5084         // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5085         // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5086         // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5087         Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5088 #endif // !_TARGET_64BIT_
5089     }
5090     else if (tiOp1.IsObjRef())
5091     {
5092         switch (opcode)
5093         {
5094             case CEE_BEQ_S:
5095             case CEE_BEQ:
5096             case CEE_BNE_UN_S:
5097             case CEE_BNE_UN:
5098             case CEE_CEQ:
5099             case CEE_CGT_UN:
5100                 break;
5101             default:
5102                 Verify(FALSE, "Cond not allowed on object types");
5103         }
5104         Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5105     }
5106     else if (tiOp1.IsByRef())
5107     {
5108         Verify(tiOp2.IsByRef(), "Cond type mismatch");
5109     }
5110     else
5111     {
5112         Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5113     }
5114 }
5115
5116 void Compiler::verVerifyThisPtrInitialised()
5117 {
5118     if (verTrackObjCtorInitState)
5119     {
5120         Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5121     }
5122 }
5123
5124 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5125 {
5126     // Either target == context, in this case calling an alternate .ctor
5127     // Or target is the immediate parent of context
5128
5129     return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5130 }
5131
5132 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr              thisPtr,
5133                                         CORINFO_RESOLVED_TOKEN* pResolvedToken,
5134                                         CORINFO_CALL_INFO*      pCallInfo)
5135 {
5136     if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5137     {
5138         NO_WAY("Virtual call to a function added via EnC is not supported");
5139     }
5140
5141     // CoreRT generic virtual method
5142     if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5143     {
5144         GenTreePtr runtimeMethodHandle = nullptr;
5145         if (pCallInfo->exactContextNeedsRuntimeLookup)
5146         {
5147             runtimeMethodHandle =
5148                 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5149         }
5150         else
5151         {
5152             runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5153         }
5154         return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5155                                    gtNewArgList(thisPtr, runtimeMethodHandle));
5156     }
5157
5158 #ifdef FEATURE_READYTORUN_COMPILER
5159     if (opts.IsReadyToRun())
5160     {
5161         if (!pCallInfo->exactContextNeedsRuntimeLookup)
5162         {
5163             GenTreeCall* call =
5164                 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5165
5166             call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5167
5168             return call;
5169         }
5170
5171         // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5172         if (IsTargetAbi(CORINFO_CORERT_ABI))
5173         {
5174             GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5175
5176             return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5177                                              gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5178         }
5179     }
5180 #endif
5181
5182     // Get the exact descriptor for the static callsite
5183     GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5184     if (exactTypeDesc == nullptr)
5185     { // compDonotInline()
5186         return nullptr;
5187     }
5188
5189     GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5190     if (exactMethodDesc == nullptr)
5191     { // compDonotInline()
5192         return nullptr;
5193     }
5194
5195     GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5196
5197     helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5198
5199     helpArgs = gtNewListNode(thisPtr, helpArgs);
5200
5201     // Call helper function.  This gets the target address of the final destination callsite.
5202
5203     return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5204 }
5205
5206 //------------------------------------------------------------------------
5207 // impImportAndPushBox: build and import a value-type box
5208 //
5209 // Arguments:
5210 //   pResolvedToken - resolved token from the box operation
5211 //
5212 // Return Value:
5213 //   None.
5214 //
5215 // Side Effects:
5216 //   The value to be boxed is popped from the stack, and a tree for
5217 //   the boxed value is pushed. This method may create upstream
5218 //   statements, spill side effecting trees, and create new temps.
5219 //
5220 //   If importing an inlinee, we may also discover the inline must
5221 //   fail. If so there is no new value pushed on the stack. Callers
5222 //   should use CompDoNotInline after calling this method to see if
5223 //   ongoing importation should be aborted.
5224 //
5225 // Notes:
5226 //   Boxing of ref classes results in the same value as the value on
5227 //   the top of the stack, so is handled inline in impImportBlockCode
5228 //   for the CEE_BOX case. Only value or primitive type boxes make it
5229 //   here.
5230 //
5231 //   Boxing for nullable types is done via a helper call; boxing
5232 //   of other value types is expanded inline or handled via helper
5233 //   call, depending on the jit's codegen mode.
5234 //
5235 //   When the jit is operating in size and time constrained modes,
5236 //   using a helper call here can save jit time and code size. But it
5237 //   also may inhibit cleanup optimizations that could have also had a
5238 //   even greater benefit effect on code size and jit time. An optimal
5239 //   strategy may need to peek ahead and see if it is easy to tell how
5240 //   the box is being used. For now, we defer.
5241
5242 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5243 {
5244     // Spill any special side effects
5245     impSpillSpecialSideEff();
5246
5247     // Get get the expression to box from the stack.
5248     GenTreePtr           op1       = nullptr;
5249     GenTreePtr           op2       = nullptr;
5250     StackEntry           se        = impPopStack();
5251     CORINFO_CLASS_HANDLE operCls   = se.seTypeInfo.GetClassHandle();
5252     GenTreePtr           exprToBox = se.val;
5253
5254     // Look at what helper we should use.
5255     CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5256
5257     // Determine what expansion to prefer.
5258     //
5259     // In size/time/debuggable constrained modes, the helper call
5260     // expansion for box is generally smaller and is preferred, unless
5261     // the value to box is a struct that comes from a call. In that
5262     // case the call can construct its return value directly into the
5263     // box payload, saving possibly some up-front zeroing.
5264     //
5265     // Currently primitive type boxes always get inline expanded. We may
5266     // want to do the same for small structs if they don't come from
5267     // calls and don't have GC pointers, since explicitly copying such
5268     // structs is cheap.
5269     JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5270     bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5271     bool optForSize      = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5272     bool expandInline    = canExpandInline && !optForSize;
5273
5274     if (expandInline)
5275     {
5276         JITDUMP(" inline allocate/copy sequence\n");
5277
5278         // we are doing 'normal' boxing.  This means that we can inline the box operation
5279         // Box(expr) gets morphed into
5280         // temp = new(clsHnd)
5281         // cpobj(temp+4, expr, clsHnd)
5282         // push temp
5283         // The code paths differ slightly below for structs and primitives because
5284         // "cpobj" differs in these cases.  In one case you get
5285         //    impAssignStructPtr(temp+4, expr, clsHnd)
5286         // and the other you get
5287         //    *(temp+4) = expr
5288
5289         if (opts.MinOpts() || opts.compDbgCode)
5290         {
5291             // For minopts/debug code, try and minimize the total number
5292             // of box temps by reusing an existing temp when possible.
5293             if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5294             {
5295                 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5296             }
5297         }
5298         else
5299         {
5300             // When optimizing, use a new temp for each box operation
5301             // since we then know the exact class of the box temp.
5302             impBoxTemp                  = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5303             lvaTable[impBoxTemp].lvType = TYP_REF;
5304             const bool isExact          = true;
5305             lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5306         }
5307
5308         // needs to stay in use until this box expression is appended
5309         // some other node.  We approximate this by keeping it alive until
5310         // the opcode stack becomes empty
5311         impBoxTempInUse = true;
5312
5313 #ifdef FEATURE_READYTORUN_COMPILER
5314         bool usingReadyToRunHelper = false;
5315
5316         if (opts.IsReadyToRun())
5317         {
5318             op1                   = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5319             usingReadyToRunHelper = (op1 != nullptr);
5320         }
5321
5322         if (!usingReadyToRunHelper)
5323 #endif
5324         {
5325             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5326             // and the newfast call with a single call to a dynamic R2R cell that will:
5327             //      1) Load the context
5328             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5329             //      3) Allocate and return the new object for boxing
5330             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5331
5332             // Ensure that the value class is restored
5333             op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5334             if (op2 == nullptr)
5335             {
5336                 // We must be backing out of an inline.
5337                 assert(compDonotInline());
5338                 return;
5339             }
5340
5341             op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF,
5342                                       gtNewArgList(op2));
5343         }
5344
5345         /* Remember that this basic block contains 'new' of an object */
5346         compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5347
5348         GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5349
5350         GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5351
5352         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5353         op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5354         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5355
5356         if (varTypeIsStruct(exprToBox))
5357         {
5358             assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5359             op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5360         }
5361         else
5362         {
5363             var_types lclTyp = exprToBox->TypeGet();
5364             if (lclTyp == TYP_BYREF)
5365             {
5366                 lclTyp = TYP_I_IMPL;
5367             }
5368             CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5369             if (impIsPrimitive(jitType))
5370             {
5371                 lclTyp = JITtype2varType(jitType);
5372             }
5373             assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5374                    varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5375             var_types srcTyp = exprToBox->TypeGet();
5376             var_types dstTyp = lclTyp;
5377
5378             if (srcTyp != dstTyp)
5379             {
5380                 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5381                        (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5382                 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5383             }
5384             op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5385         }
5386
5387         // Spill eval stack to flush out any pending side effects.
5388         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5389
5390         // Set up this copy as a second assignment.
5391         GenTreePtr copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5392
5393         op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5394
5395         // Record that this is a "box" node and keep track of the matching parts.
5396         op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5397
5398         // If it is a value class, mark the "box" node.  We can use this information
5399         // to optimise several cases:
5400         //    "box(x) == null" --> false
5401         //    "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5402         //    "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5403
5404         op1->gtFlags |= GTF_BOX_VALUE;
5405         assert(op1->IsBoxedValue());
5406         assert(asg->gtOper == GT_ASG);
5407     }
5408     else
5409     {
5410         // Don't optimize, just call the helper and be done with it.
5411         JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5412         assert(operCls != nullptr);
5413
5414         // Ensure that the value class is restored
5415         op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5416         if (op2 == nullptr)
5417         {
5418             // We must be backing out of an inline.
5419             assert(compDonotInline());
5420             return;
5421         }
5422
5423         GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5424         op1                  = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5425     }
5426
5427     /* Push the result back on the stack, */
5428     /* even if clsHnd is a value class we want the TI_REF */
5429     typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5430     impPushOnStack(op1, tiRetVal);
5431 }
5432
5433 //------------------------------------------------------------------------
5434 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5435 //
5436 // Arguments:
5437 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5438 //                     by a call to CEEInfo::resolveToken().
5439 //    pCallInfo - The CORINFO_CALL_INFO that has been initialized
5440 //                by a call to CEEInfo::getCallInfo().
5441 //
5442 // Assumptions:
5443 //    The multi-dimensional array constructor arguments (array dimensions) are
5444 //    pushed on the IL stack on entry to this method.
5445 //
5446 // Notes:
5447 //    Multi-dimensional array constructors are imported as calls to a JIT
5448 //    helper, not as regular calls.
5449
5450 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5451 {
5452     GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5453     if (classHandle == nullptr)
5454     { // compDonotInline()
5455         return;
5456     }
5457
5458     assert(pCallInfo->sig.numArgs);
5459
5460     GenTreePtr      node;
5461     GenTreeArgList* args;
5462
5463     //
5464     // There are two different JIT helpers that can be used to allocate
5465     // multi-dimensional arrays:
5466     //
5467     // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5468     //      This variant is deprecated. It should be eventually removed.
5469     //
5470     // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5471     //      pointer to block of int32s. This variant is more portable.
5472     //
5473     // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5474     // unconditionally would require ReadyToRun version bump.
5475     //
5476     CLANG_FORMAT_COMMENT_ANCHOR;
5477
5478     if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5479     {
5480         LclVarDsc* newObjArrayArgsVar;
5481
5482         // Reuse the temp used to pass the array dimensions to avoid bloating
5483         // the stack frame in case there are multiple calls to multi-dim array
5484         // constructors within a single method.
5485         if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5486         {
5487             lvaNewObjArrayArgs                       = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5488             lvaTable[lvaNewObjArrayArgs].lvType      = TYP_BLK;
5489             lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5490         }
5491
5492         // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5493         // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5494         lvaTable[lvaNewObjArrayArgs].lvExactSize =
5495             max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5496
5497         // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5498         // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5499         // to one allocation at a time.
5500         impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5501
5502         //
5503         // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5504         //  - Array class handle
5505         //  - Number of dimension arguments
5506         //  - Pointer to block of int32 dimensions - address  of lvaNewObjArrayArgs temp.
5507         //
5508
5509         node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5510         node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5511
5512         // Pop dimension arguments from the stack one at a time and store it
5513         // into lvaNewObjArrayArgs temp.
5514         for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5515         {
5516             GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5517
5518             GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5519             dest            = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5520             dest            = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5521                                  new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5522             dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5523
5524             node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5525         }
5526
5527         args = gtNewArgList(node);
5528
5529         // pass number of arguments to the helper
5530         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5531
5532         args = gtNewListNode(classHandle, args);
5533
5534         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5535     }
5536     else
5537     {
5538         //
5539         // The varargs helper needs the type and method handles as last
5540         // and  last-1 param (this is a cdecl call, so args will be
5541         // pushed in reverse order on the CPU stack)
5542         //
5543
5544         args = gtNewArgList(classHandle);
5545
5546         // pass number of arguments to the helper
5547         args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5548
5549         unsigned argFlags = 0;
5550         args              = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5551
5552         node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5553
5554         // varargs, so we pop the arguments
5555         node->gtFlags |= GTF_CALL_POP_ARGS;
5556
5557 #ifdef DEBUG
5558         // At the present time we don't track Caller pop arguments
5559         // that have GC references in them
5560         for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5561         {
5562             assert(temp->Current()->gtType != TYP_REF);
5563         }
5564 #endif
5565     }
5566
5567     node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5568     node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5569
5570     // Remember that this basic block contains 'new' of a md array
5571     compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5572
5573     impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5574 }
5575
5576 GenTreePtr Compiler::impTransformThis(GenTreePtr              thisPtr,
5577                                       CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5578                                       CORINFO_THIS_TRANSFORM  transform)
5579 {
5580     switch (transform)
5581     {
5582         case CORINFO_DEREF_THIS:
5583         {
5584             GenTreePtr obj = thisPtr;
5585
5586             // This does a LDIND on the obj, which should be a byref. pointing to a ref
5587             impBashVarAddrsToI(obj);
5588             assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5589             CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5590
5591             obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5592             // ldind could point anywhere, example a boxed class static int
5593             obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5594
5595             return obj;
5596         }
5597
5598         case CORINFO_BOX_THIS:
5599         {
5600             // Constraint calls where there might be no
5601             // unboxed entry point require us to implement the call via helper.
5602             // These only occur when a possible target of the call
5603             // may have inherited an implementation of an interface
5604             // method from System.Object or System.ValueType.  The EE does not provide us with
5605             // "unboxed" versions of these methods.
5606
5607             GenTreePtr obj = thisPtr;
5608
5609             assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5610             obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5611             obj->gtFlags |= GTF_EXCEPT;
5612
5613             CorInfoType jitTyp  = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5614             var_types   objType = JITtype2varType(jitTyp);
5615             if (impIsPrimitive(jitTyp))
5616             {
5617                 if (obj->OperIsBlk())
5618                 {
5619                     obj->ChangeOperUnchecked(GT_IND);
5620
5621                     // Obj could point anywhere, example a boxed class static int
5622                     obj->gtFlags |= GTF_IND_TGTANYWHERE;
5623                     obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5624                 }
5625
5626                 obj->gtType = JITtype2varType(jitTyp);
5627                 assert(varTypeIsArithmetic(obj->gtType));
5628             }
5629
5630             // This pushes on the dereferenced byref
5631             // This is then used immediately to box.
5632             impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5633
5634             // This pops off the byref-to-a-value-type remaining on the stack and
5635             // replaces it with a boxed object.
5636             // This is then used as the object to the virtual call immediately below.
5637             impImportAndPushBox(pConstrainedResolvedToken);
5638             if (compDonotInline())
5639             {
5640                 return nullptr;
5641             }
5642
5643             obj = impPopStack().val;
5644             return obj;
5645         }
5646         case CORINFO_NO_THIS_TRANSFORM:
5647         default:
5648             return thisPtr;
5649     }
5650 }
5651
5652 //------------------------------------------------------------------------
5653 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5654 //
5655 // Return Value:
5656 //    true if PInvoke inlining should be enabled in current method, false otherwise
5657 //
5658 // Notes:
5659 //    Checks a number of ambient conditions where we could pinvoke but choose not to
5660
5661 bool Compiler::impCanPInvokeInline()
5662 {
5663     return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5664            (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5665         ;
5666 }
5667
5668 //------------------------------------------------------------------------
5669 // impCanPInvokeInlineCallSite: basic legality checks using information
5670 // from a call to see if the call qualifies as an inline pinvoke.
5671 //
5672 // Arguments:
5673 //    block      - block contaning the call, or for inlinees, block
5674 //                 containing the call being inlined
5675 //
5676 // Return Value:
5677 //    true if this call can legally qualify as an inline pinvoke, false otherwise
5678 //
5679 // Notes:
5680 //    For runtimes that support exception handling interop there are
5681 //    restrictions on using inline pinvoke in handler regions.
5682 //
5683 //    * We have to disable pinvoke inlining inside of filters because
5684 //    in case the main execution (i.e. in the try block) is inside
5685 //    unmanaged code, we cannot reuse the inlined stub (we still need
5686 //    the original state until we are in the catch handler)
5687 //
5688 //    * We disable pinvoke inlining inside handlers since the GSCookie
5689 //    is in the inlined Frame (see
5690 //    CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5691 //    this would not protect framelets/return-address of handlers.
5692 //
5693 //    These restrictions are currently also in place for CoreCLR but
5694 //    can be relaxed when coreclr/#8459 is addressed.
5695
5696 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5697 {
5698     if (block->hasHndIndex())
5699     {
5700         return false;
5701     }
5702
5703     // The remaining limitations do not apply to CoreRT
5704     if (IsTargetAbi(CORINFO_CORERT_ABI))
5705     {
5706         return true;
5707     }
5708
5709 #ifdef _TARGET_AMD64_
5710     // On x64, we disable pinvoke inlining inside of try regions.
5711     // Here is the comment from JIT64 explaining why:
5712     //
5713     //   [VSWhidbey: 611015] - because the jitted code links in the
5714     //   Frame (instead of the stub) we rely on the Frame not being
5715     //   'active' until inside the stub.  This normally happens by the
5716     //   stub setting the return address pointer in the Frame object
5717     //   inside the stub.  On a normal return, the return address
5718     //   pointer is zeroed out so the Frame can be safely re-used, but
5719     //   if an exception occurs, nobody zeros out the return address
5720     //   pointer.  Thus if we re-used the Frame object, it would go
5721     //   'active' as soon as we link it into the Frame chain.
5722     //
5723     //   Technically we only need to disable PInvoke inlining if we're
5724     //   in a handler or if we're in a try body with a catch or
5725     //   filter/except where other non-handler code in this method
5726     //   might run and try to re-use the dirty Frame object.
5727     //
5728     //   A desktop test case where this seems to matter is
5729     //   jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5730     if (block->hasTryIndex())
5731     {
5732         return false;
5733     }
5734 #endif // _TARGET_AMD64_
5735
5736     return true;
5737 }
5738
5739 //------------------------------------------------------------------------
5740 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5741 // if it can be expressed as an inline pinvoke.
5742 //
5743 // Arguments:
5744 //    call       - tree for the call
5745 //    methHnd    - handle for the method being called (may be null)
5746 //    sig        - signature of the method being called
5747 //    mflags     - method flags for the method being called
5748 //    block      - block contaning the call, or for inlinees, block
5749 //                 containing the call being inlined
5750 //
5751 // Notes:
5752 //   Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5753 //
5754 //   Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5755 //   call passes a combination of legality and profitabilty checks.
5756 //
5757 //   If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5758
5759 void Compiler::impCheckForPInvokeCall(
5760     GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5761 {
5762     CorInfoUnmanagedCallConv unmanagedCallConv;
5763
5764     // If VM flagged it as Pinvoke, flag the call node accordingly
5765     if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5766     {
5767         call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5768     }
5769
5770     if (methHnd)
5771     {
5772         if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5773         {
5774             return;
5775         }
5776
5777         unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5778     }
5779     else
5780     {
5781         CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5782         if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5783         {
5784             // Used by the IL Stubs.
5785             callConv = CORINFO_CALLCONV_C;
5786         }
5787         static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5788         static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5789         static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5790         unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5791
5792         assert(!call->gtCallCookie);
5793     }
5794
5795     if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5796         unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5797     {
5798         return;
5799     }
5800     optNativeCallCount++;
5801
5802     if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5803     {
5804         // PInvoke CALLI in IL stubs must be inlined
5805     }
5806     else
5807     {
5808         // Check legality
5809         if (!impCanPInvokeInlineCallSite(block))
5810         {
5811             return;
5812         }
5813
5814         // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5815         // profitability checks
5816         if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5817         {
5818             if (!impCanPInvokeInline())
5819             {
5820                 return;
5821             }
5822
5823             // Size-speed tradeoff: don't use inline pinvoke at rarely
5824             // executed call sites.  The non-inline version is more
5825             // compact.
5826             if (block->isRunRarely())
5827             {
5828                 return;
5829             }
5830         }
5831
5832         // The expensive check should be last
5833         if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5834         {
5835             return;
5836         }
5837     }
5838
5839     JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5840
5841     call->gtFlags |= GTF_CALL_UNMANAGED;
5842     info.compCallUnmanaged++;
5843
5844     // AMD64 convention is same for native and managed
5845     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5846     {
5847         call->gtFlags |= GTF_CALL_POP_ARGS;
5848     }
5849
5850     if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5851     {
5852         call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5853     }
5854 }
5855
5856 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5857 {
5858     var_types callRetTyp = JITtype2varType(sig->retType);
5859
5860     /* The function pointer is on top of the stack - It may be a
5861      * complex expression. As it is evaluated after the args,
5862      * it may cause registered args to be spilled. Simply spill it.
5863      */
5864
5865     // Ignore this trivial case.
5866     if (impStackTop().val->gtOper != GT_LCL_VAR)
5867     {
5868         impSpillStackEntry(verCurrentState.esStackDepth - 1,
5869                            BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5870     }
5871
5872     /* Get the function pointer */
5873
5874     GenTreePtr fptr = impPopStack().val;
5875
5876     // The function pointer is typically a sized to match the target pointer size
5877     // However, stubgen IL optimization can change LDC.I8 to LDC.I4
5878     // See ILCodeStream::LowerOpcode
5879     assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
5880
5881 #ifdef DEBUG
5882     // This temporary must never be converted to a double in stress mode,
5883     // because that can introduce a call to the cast helper after the
5884     // arguments have already been evaluated.
5885
5886     if (fptr->OperGet() == GT_LCL_VAR)
5887     {
5888         lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5889     }
5890 #endif
5891
5892     /* Create the call node */
5893
5894     GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5895
5896     call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5897
5898     return call;
5899 }
5900
5901 /*****************************************************************************/
5902
5903 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5904 {
5905     assert(call->gtFlags & GTF_CALL_UNMANAGED);
5906
5907     /* Since we push the arguments in reverse order (i.e. right -> left)
5908      * spill any side effects from the stack
5909      *
5910      * OBS: If there is only one side effect we do not need to spill it
5911      *      thus we have to spill all side-effects except last one
5912      */
5913
5914     unsigned lastLevelWithSideEffects = UINT_MAX;
5915
5916     unsigned argsToReverse = sig->numArgs;
5917
5918     // For "thiscall", the first argument goes in a register. Since its
5919     // order does not need to be changed, we do not need to spill it
5920
5921     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5922     {
5923         assert(argsToReverse);
5924         argsToReverse--;
5925     }
5926
5927 #ifndef _TARGET_X86_
5928     // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5929     argsToReverse = 0;
5930 #endif
5931
5932     for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5933     {
5934         if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5935         {
5936             assert(lastLevelWithSideEffects == UINT_MAX);
5937
5938             impSpillStackEntry(level,
5939                                BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5940         }
5941         else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5942         {
5943             if (lastLevelWithSideEffects != UINT_MAX)
5944             {
5945                 /* We had a previous side effect - must spill it */
5946                 impSpillStackEntry(lastLevelWithSideEffects,
5947                                    BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5948
5949                 /* Record the level for the current side effect in case we will spill it */
5950                 lastLevelWithSideEffects = level;
5951             }
5952             else
5953             {
5954                 /* This is the first side effect encountered - record its level */
5955
5956                 lastLevelWithSideEffects = level;
5957             }
5958         }
5959     }
5960
5961     /* The argument list is now "clean" - no out-of-order side effects
5962      * Pop the argument list in reverse order */
5963
5964     GenTreePtr args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
5965
5966     if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5967     {
5968         GenTreePtr thisPtr = args->Current();
5969         impBashVarAddrsToI(thisPtr);
5970         assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5971     }
5972
5973     if (args)
5974     {
5975         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5976     }
5977 }
5978
5979 //------------------------------------------------------------------------
5980 // impInitClass: Build a node to initialize the class before accessing the
5981 //               field if necessary
5982 //
5983 // Arguments:
5984 //    pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5985 //                     by a call to CEEInfo::resolveToken().
5986 //
5987 // Return Value: If needed, a pointer to the node that will perform the class
5988 //               initializtion.  Otherwise, nullptr.
5989 //
5990
5991 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5992 {
5993     CorInfoInitClassResult initClassResult =
5994         info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5995
5996     if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5997     {
5998         return nullptr;
5999     }
6000     BOOL runtimeLookup;
6001
6002     GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6003
6004     if (node == nullptr)
6005     {
6006         assert(compDonotInline());
6007         return nullptr;
6008     }
6009
6010     if (runtimeLookup)
6011     {
6012         node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6013     }
6014     else
6015     {
6016         // Call the shared non gc static helper, as its the fastest
6017         node = fgGetSharedCCtor(pResolvedToken->hClass);
6018     }
6019
6020     return node;
6021 }
6022
6023 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6024 {
6025     GenTreePtr op1 = nullptr;
6026
6027     switch (lclTyp)
6028     {
6029         int     ival;
6030         __int64 lval;
6031         double  dval;
6032
6033         case TYP_BOOL:
6034             ival = *((bool*)fldAddr);
6035             goto IVAL_COMMON;
6036
6037         case TYP_BYTE:
6038             ival = *((signed char*)fldAddr);
6039             goto IVAL_COMMON;
6040
6041         case TYP_UBYTE:
6042             ival = *((unsigned char*)fldAddr);
6043             goto IVAL_COMMON;
6044
6045         case TYP_SHORT:
6046             ival = *((short*)fldAddr);
6047             goto IVAL_COMMON;
6048
6049         case TYP_CHAR:
6050         case TYP_USHORT:
6051             ival = *((unsigned short*)fldAddr);
6052             goto IVAL_COMMON;
6053
6054         case TYP_UINT:
6055         case TYP_INT:
6056             ival = *((int*)fldAddr);
6057         IVAL_COMMON:
6058             op1 = gtNewIconNode(ival);
6059             break;
6060
6061         case TYP_LONG:
6062         case TYP_ULONG:
6063             lval = *((__int64*)fldAddr);
6064             op1  = gtNewLconNode(lval);
6065             break;
6066
6067         case TYP_FLOAT:
6068             dval = *((float*)fldAddr);
6069             op1  = gtNewDconNode(dval);
6070 #if !FEATURE_X87_DOUBLES
6071             // X87 stack doesn't differentiate between float/double
6072             // so R4 is treated as R8, but everybody else does
6073             op1->gtType = TYP_FLOAT;
6074 #endif // FEATURE_X87_DOUBLES
6075             break;
6076
6077         case TYP_DOUBLE:
6078             dval = *((double*)fldAddr);
6079             op1  = gtNewDconNode(dval);
6080             break;
6081
6082         default:
6083             assert(!"Unexpected lclTyp");
6084             break;
6085     }
6086
6087     return op1;
6088 }
6089
6090 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6091                                                 CORINFO_ACCESS_FLAGS    access,
6092                                                 CORINFO_FIELD_INFO*     pFieldInfo,
6093                                                 var_types               lclTyp)
6094 {
6095     GenTreePtr op1;
6096
6097     switch (pFieldInfo->fieldAccessor)
6098     {
6099         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6100         {
6101             assert(!compIsForInlining());
6102
6103             // We first call a special helper to get the statics base pointer
6104             op1 = impParentClassTokenToHandle(pResolvedToken);
6105
6106             // compIsForInlining() is false so we should not neve get NULL here
6107             assert(op1 != nullptr);
6108
6109             var_types type = TYP_BYREF;
6110
6111             switch (pFieldInfo->helper)
6112             {
6113                 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6114                     type = TYP_I_IMPL;
6115                     break;
6116                 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6117                 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6118                 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6119                     break;
6120                 default:
6121                     assert(!"unknown generic statics helper");
6122                     break;
6123             }
6124
6125             op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6126
6127             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6128             op1              = gtNewOperNode(GT_ADD, type, op1,
6129                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6130         }
6131         break;
6132
6133         case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6134         {
6135 #ifdef FEATURE_READYTORUN_COMPILER
6136             if (opts.IsReadyToRun())
6137             {
6138                 unsigned callFlags = 0;
6139
6140                 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6141                 {
6142                     callFlags |= GTF_CALL_HOISTABLE;
6143                 }
6144
6145                 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6146                 op1->gtFlags |= callFlags;
6147
6148                 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6149             }
6150             else
6151 #endif
6152             {
6153                 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6154             }
6155
6156             {
6157                 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6158                 op1              = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6159                                     new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6160             }
6161             break;
6162         }
6163
6164         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6165         {
6166 #ifdef FEATURE_READYTORUN_COMPILER
6167             noway_assert(opts.IsReadyToRun());
6168             CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6169             assert(kind.needsRuntimeLookup);
6170
6171             GenTreePtr      ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6172             GenTreeArgList* args    = gtNewArgList(ctxTree);
6173
6174             unsigned callFlags = 0;
6175
6176             if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6177             {
6178                 callFlags |= GTF_CALL_HOISTABLE;
6179             }
6180             var_types type = TYP_BYREF;
6181             op1            = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6182             op1->gtFlags |= callFlags;
6183
6184             op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6185             FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6186             op1              = gtNewOperNode(GT_ADD, type, op1,
6187                                 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6188 #else
6189             unreached();
6190 #endif // FEATURE_READYTORUN_COMPILER
6191         }
6192         break;
6193
6194         default:
6195         {
6196             if (!(access & CORINFO_ACCESS_ADDRESS))
6197             {
6198                 // In future, it may be better to just create the right tree here instead of folding it later.
6199                 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6200
6201                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6202                 {
6203                     op1->gtFlags |= GTF_FLD_INITCLASS;
6204                 }
6205
6206                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6207                 {
6208                     op1->gtType = TYP_REF; // points at boxed object
6209                     FieldSeqNode* firstElemFldSeq =
6210                         GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6211                     op1 =
6212                         gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6213                                       new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
6214
6215                     if (varTypeIsStruct(lclTyp))
6216                     {
6217                         // Constructor adds GTF_GLOB_REF.  Note that this is *not* GTF_EXCEPT.
6218                         op1 = gtNewObjNode(pFieldInfo->structType, op1);
6219                     }
6220                     else
6221                     {
6222                         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6223                         op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6224                     }
6225                 }
6226
6227                 return op1;
6228             }
6229             else
6230             {
6231                 void** pFldAddr = nullptr;
6232                 void*  fldAddr  = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6233
6234                 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6235
6236                 /* Create the data member node */
6237                 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6238                                           fldSeq);
6239
6240                 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6241                 {
6242                     op1->gtFlags |= GTF_ICON_INITCLASS;
6243                 }
6244
6245                 if (pFldAddr != nullptr)
6246                 {
6247                     // There are two cases here, either the static is RVA based,
6248                     // in which case the type of the FIELD node is not a GC type
6249                     // and the handle to the RVA is a TYP_I_IMPL.  Or the FIELD node is
6250                     // a GC type and the handle to it is a TYP_BYREF in the GC heap
6251                     // because handles to statics now go into the large object heap
6252
6253                     var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6254                     op1                 = gtNewOperNode(GT_IND, handleTyp, op1);
6255                     op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6256                 }
6257             }
6258             break;
6259         }
6260     }
6261
6262     if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6263     {
6264         op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6265
6266         FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6267
6268         op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6269                             new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6270     }
6271
6272     if (!(access & CORINFO_ACCESS_ADDRESS))
6273     {
6274         op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6275         op1->gtFlags |= GTF_GLOB_REF;
6276     }
6277
6278     return op1;
6279 }
6280
6281 // In general try to call this before most of the verification work.  Most people expect the access
6282 // exceptions before the verification exceptions.  If you do this after, that usually doesn't happen.  Turns
6283 // out if you can't access something we also think that you're unverifiable for other reasons.
6284 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6285 {
6286     if (result != CORINFO_ACCESS_ALLOWED)
6287     {
6288         impHandleAccessAllowedInternal(result, helperCall);
6289     }
6290 }
6291
6292 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6293 {
6294     switch (result)
6295     {
6296         case CORINFO_ACCESS_ALLOWED:
6297             break;
6298         case CORINFO_ACCESS_ILLEGAL:
6299             // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6300             // method is verifiable.  Otherwise, delay the exception to runtime.
6301             if (compIsForImportOnly())
6302             {
6303                 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6304             }
6305             else
6306             {
6307                 impInsertHelperCall(helperCall);
6308             }
6309             break;
6310         case CORINFO_ACCESS_RUNTIME_CHECK:
6311             impInsertHelperCall(helperCall);
6312             break;
6313     }
6314 }
6315
6316 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6317 {
6318     // Construct the argument list
6319     GenTreeArgList* args = nullptr;
6320     assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6321     for (unsigned i = helperInfo->numArgs; i > 0; --i)
6322     {
6323         const CORINFO_HELPER_ARG& helperArg  = helperInfo->args[i - 1];
6324         GenTreePtr                currentArg = nullptr;
6325         switch (helperArg.argType)
6326         {
6327             case CORINFO_HELPER_ARG_TYPE_Field:
6328                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6329                     info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6330                 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6331                 break;
6332             case CORINFO_HELPER_ARG_TYPE_Method:
6333                 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6334                 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6335                 break;
6336             case CORINFO_HELPER_ARG_TYPE_Class:
6337                 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6338                 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6339                 break;
6340             case CORINFO_HELPER_ARG_TYPE_Module:
6341                 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6342                 break;
6343             case CORINFO_HELPER_ARG_TYPE_Const:
6344                 currentArg = gtNewIconNode(helperArg.constant);
6345                 break;
6346             default:
6347                 NO_WAY("Illegal helper arg type");
6348         }
6349         args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6350     }
6351
6352     /* TODO-Review:
6353      * Mark as CSE'able, and hoistable.  Consider marking hoistable unless you're in the inlinee.
6354      * Also, consider sticking this in the first basic block.
6355      */
6356     GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6357     impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6358 }
6359
6360 // Checks whether the return types of caller and callee are compatible
6361 // so that callee can be tail called. Note that here we don't check
6362 // compatibility in IL Verifier sense, but on the lines of return type
6363 // sizes are equal and get returned in the same return register.
6364 bool Compiler::impTailCallRetTypeCompatible(var_types            callerRetType,
6365                                             CORINFO_CLASS_HANDLE callerRetTypeClass,
6366                                             var_types            calleeRetType,
6367                                             CORINFO_CLASS_HANDLE calleeRetTypeClass)
6368 {
6369     // Note that we can not relax this condition with genActualType() as the
6370     // calling convention dictates that the caller of a function with a small
6371     // typed return value is responsible for normalizing the return val.
6372     if (callerRetType == calleeRetType)
6373     {
6374         return true;
6375     }
6376
6377     // If the class handles are the same and not null, the return types are compatible.
6378     if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6379     {
6380         return true;
6381     }
6382
6383 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6384     // Jit64 compat:
6385     if (callerRetType == TYP_VOID)
6386     {
6387         // This needs to be allowed to support the following IL pattern that Jit64 allows:
6388         //     tail.call
6389         //     pop
6390         //     ret
6391         //
6392         // Note that the above IL pattern is not valid as per IL verification rules.
6393         // Therefore, only full trust code can take advantage of this pattern.
6394         return true;
6395     }
6396
6397     // These checks return true if the return value type sizes are the same and
6398     // get returned in the same return register i.e. caller doesn't need to normalize
6399     // return value. Some of the tail calls permitted by below checks would have
6400     // been rejected by IL Verifier before we reached here.  Therefore, only full
6401     // trust code can make those tail calls.
6402     unsigned callerRetTypeSize = 0;
6403     unsigned calleeRetTypeSize = 0;
6404     bool     isCallerRetTypMBEnreg =
6405         VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6406     bool isCalleeRetTypMBEnreg =
6407         VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6408
6409     if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6410     {
6411         return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6412     }
6413 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6414
6415     return false;
6416 }
6417
6418 // For prefixFlags
6419 enum
6420 {
6421     PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6422     PREFIX_TAILCALL_IMPLICIT =
6423         0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6424     PREFIX_TAILCALL    = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6425     PREFIX_VOLATILE    = 0x00000100,
6426     PREFIX_UNALIGNED   = 0x00001000,
6427     PREFIX_CONSTRAINED = 0x00010000,
6428     PREFIX_READONLY    = 0x00100000
6429 };
6430
6431 /********************************************************************************
6432  *
6433  * Returns true if the current opcode and and the opcodes following it correspond
6434  * to a supported tail call IL pattern.
6435  *
6436  */
6437 bool Compiler::impIsTailCallILPattern(bool        tailPrefixed,
6438                                       OPCODE      curOpcode,
6439                                       const BYTE* codeAddrOfNextOpcode,
6440                                       const BYTE* codeEnd,
6441                                       bool        isRecursive,
6442                                       bool*       isCallPopAndRet /* = nullptr */)
6443 {
6444     // Bail out if the current opcode is not a call.
6445     if (!impOpcodeIsCallOpcode(curOpcode))
6446     {
6447         return false;
6448     }
6449
6450 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6451     // If shared ret tail opt is not enabled, we will enable
6452     // it for recursive methods.
6453     if (isRecursive)
6454 #endif
6455     {
6456         // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6457         // sequence. Make sure we don't go past the end of the IL however.
6458         codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6459     }
6460
6461     // Bail out if there is no next opcode after call
6462     if (codeAddrOfNextOpcode >= codeEnd)
6463     {
6464         return false;
6465     }
6466
6467     // Scan the opcodes to look for the following IL patterns if either
6468     //   i) the call is not tail prefixed (i.e. implicit tail call) or
6469     //  ii) if tail prefixed, IL verification is not needed for the method.
6470     //
6471     // Only in the above two cases we can allow the below tail call patterns
6472     // violating ECMA spec.
6473     //
6474     // Pattern1:
6475     //       call
6476     //       nop*
6477     //       ret
6478     //
6479     // Pattern2:
6480     //       call
6481     //       nop*
6482     //       pop
6483     //       nop*
6484     //       ret
6485     int    cntPop = 0;
6486     OPCODE nextOpcode;
6487
6488 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6489     do
6490     {
6491         nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6492         codeAddrOfNextOpcode += sizeof(__int8);
6493     } while ((codeAddrOfNextOpcode < codeEnd) &&         // Haven't reached end of method
6494              (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6495              ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6496                                                                                          // one pop seen so far.
6497 #else
6498     nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6499 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6500
6501     if (isCallPopAndRet)
6502     {
6503         // Allow call+pop+ret to be tail call optimized if caller ret type is void
6504         *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6505     }
6506
6507 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6508     // Jit64 Compat:
6509     // Tail call IL pattern could be either of the following
6510     // 1) call/callvirt/calli + ret
6511     // 2) call/callvirt/calli + pop + ret in a method returning void.
6512     return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6513 #else
6514     return (nextOpcode == CEE_RET) && (cntPop == 0);
6515 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6516 }
6517
6518 /*****************************************************************************
6519  *
6520  * Determine whether the call could be converted to an implicit tail call
6521  *
6522  */
6523 bool Compiler::impIsImplicitTailCallCandidate(
6524     OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6525 {
6526
6527 #if FEATURE_TAILCALL_OPT
6528     if (!opts.compTailCallOpt)
6529     {
6530         return false;
6531     }
6532
6533     if (opts.compDbgCode || opts.MinOpts())
6534     {
6535         return false;
6536     }
6537
6538     // must not be tail prefixed
6539     if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6540     {
6541         return false;
6542     }
6543
6544 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6545     // the block containing call is marked as BBJ_RETURN
6546     // We allow shared ret tail call optimization on recursive calls even under
6547     // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6548     if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6549         return false;
6550 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6551
6552     // must be call+ret or call+pop+ret
6553     if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6554     {
6555         return false;
6556     }
6557
6558     return true;
6559 #else
6560     return false;
6561 #endif // FEATURE_TAILCALL_OPT
6562 }
6563
6564 //------------------------------------------------------------------------
6565 // impImportCall: import a call-inspiring opcode
6566 //
6567 // Arguments:
6568 //    opcode                    - opcode that inspires the call
6569 //    pResolvedToken            - resolved token for the call target
6570 //    pConstrainedResolvedToken - resolved constraint token (or nullptr)
6571 //    newObjThis                - tree for this pointer or uninitalized newobj temp (or nullptr)
6572 //    prefixFlags               - IL prefix flags for the call
6573 //    callInfo                  - EE supplied info for the call
6574 //    rawILOffset               - IL offset of the opcode
6575 //
6576 // Returns:
6577 //    Type of the call's return value.
6578 //
6579 // Notes:
6580 //    opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6581 //
6582 //    For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6583 //    uninitalized object.
6584
6585 #ifdef _PREFAST_
6586 #pragma warning(push)
6587 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6588 #endif
6589
6590 var_types Compiler::impImportCall(OPCODE                  opcode,
6591                                   CORINFO_RESOLVED_TOKEN* pResolvedToken,
6592                                   CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6593                                   GenTreePtr              newobjThis,
6594                                   int                     prefixFlags,
6595                                   CORINFO_CALL_INFO*      callInfo,
6596                                   IL_OFFSET               rawILOffset)
6597 {
6598     assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6599
6600     IL_OFFSETX             ilOffset                       = impCurILOffset(rawILOffset, true);
6601     var_types              callRetTyp                     = TYP_COUNT;
6602     CORINFO_SIG_INFO*      sig                            = nullptr;
6603     CORINFO_METHOD_HANDLE  methHnd                        = nullptr;
6604     CORINFO_CLASS_HANDLE   clsHnd                         = nullptr;
6605     unsigned               clsFlags                       = 0;
6606     unsigned               mflags                         = 0;
6607     unsigned               argFlags                       = 0;
6608     GenTreePtr             call                           = nullptr;
6609     GenTreeArgList*        args                           = nullptr;
6610     CORINFO_THIS_TRANSFORM constraintCallThisTransform    = CORINFO_NO_THIS_TRANSFORM;
6611     CORINFO_CONTEXT_HANDLE exactContextHnd                = nullptr;
6612     bool                   exactContextNeedsRuntimeLookup = false;
6613     bool                   canTailCall                    = true;
6614     const char*            szCanTailCallFailReason        = nullptr;
6615     int                    tailCall                       = prefixFlags & PREFIX_TAILCALL;
6616     bool                   readonlyCall                   = (prefixFlags & PREFIX_READONLY) != 0;
6617
6618     CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6619
6620     // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6621     // do that before tailcalls, but that is probably not the intended
6622     // semantic. So just disallow tailcalls from synchronized methods.
6623     // Also, popping arguments in a varargs function is more work and NYI
6624     // If we have a security object, we have to keep our frame around for callers
6625     // to see any imperative security.
6626     if (info.compFlags & CORINFO_FLG_SYNCH)
6627     {
6628         canTailCall             = false;
6629         szCanTailCallFailReason = "Caller is synchronized";
6630     }
6631 #if !FEATURE_FIXED_OUT_ARGS
6632     else if (info.compIsVarArgs)
6633     {
6634         canTailCall             = false;
6635         szCanTailCallFailReason = "Caller is varargs";
6636     }
6637 #endif // FEATURE_FIXED_OUT_ARGS
6638     else if (opts.compNeedSecurityCheck)
6639     {
6640         canTailCall             = false;
6641         szCanTailCallFailReason = "Caller requires a security check.";
6642     }
6643
6644     // We only need to cast the return value of pinvoke inlined calls that return small types
6645
6646     // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6647     // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6648     // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6649     // the time being that the callee might be compiled by the other JIT and thus the return
6650     // value will need to be widened by us (or not widened at all...)
6651
6652     // ReadyToRun code sticks with default calling convention that does not widen small return types.
6653
6654     bool checkForSmallType  = opts.IsJit64Compat() || opts.IsReadyToRun();
6655     bool bIntrinsicImported = false;
6656
6657     CORINFO_SIG_INFO calliSig;
6658     GenTreeArgList*  extraArg = nullptr;
6659
6660     /*-------------------------------------------------------------------------
6661      * First create the call node
6662      */
6663
6664     if (opcode == CEE_CALLI)
6665     {
6666         /* Get the call site sig */
6667         eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6668
6669         callRetTyp = JITtype2varType(calliSig.retType);
6670
6671         call = impImportIndirectCall(&calliSig, ilOffset);
6672
6673         // We don't know the target method, so we have to infer the flags, or
6674         // assume the worst-case.
6675         mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6676
6677 #ifdef DEBUG
6678         if (verbose)
6679         {
6680             unsigned structSize =
6681                 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6682             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6683                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6684         }
6685 #endif
6686         // This should be checked in impImportBlockCode.
6687         assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6688
6689         sig = &calliSig;
6690
6691 #ifdef DEBUG
6692         // We cannot lazily obtain the signature of a CALLI call because it has no method
6693         // handle that we can use, so we need to save its full call signature here.
6694         assert(call->gtCall.callSig == nullptr);
6695         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6696         *call->gtCall.callSig = calliSig;
6697 #endif // DEBUG
6698
6699         if (IsTargetAbi(CORINFO_CORERT_ABI))
6700         {
6701             bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
6702                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
6703                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
6704                                 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
6705             if (managedCall)
6706             {
6707                 addFatPointerCandidate(call->AsCall());
6708             }
6709         }
6710     }
6711     else // (opcode != CEE_CALLI)
6712     {
6713         CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6714
6715         // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6716         // supply the instantiation parameters necessary to make direct calls to underlying
6717         // shared generic code, rather than calling through instantiating stubs.  If the
6718         // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6719         // must indeed pass an instantiation parameter.
6720
6721         methHnd = callInfo->hMethod;
6722
6723         sig        = &(callInfo->sig);
6724         callRetTyp = JITtype2varType(sig->retType);
6725
6726         mflags = callInfo->methodFlags;
6727
6728 #ifdef DEBUG
6729         if (verbose)
6730         {
6731             unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6732             printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6733                    opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6734         }
6735 #endif
6736         if (compIsForInlining())
6737         {
6738             /* Does this call site have security boundary restrictions? */
6739
6740             if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6741             {
6742                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6743                 return callRetTyp;
6744             }
6745
6746             /* Does the inlinee need a security check token on the frame */
6747
6748             if (mflags & CORINFO_FLG_SECURITYCHECK)
6749             {
6750                 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6751                 return callRetTyp;
6752             }
6753
6754             /* Does the inlinee use StackCrawlMark */
6755
6756             if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6757             {
6758                 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6759                 return callRetTyp;
6760             }
6761
6762             /* For now ignore delegate invoke */
6763
6764             if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6765             {
6766                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6767                 return callRetTyp;
6768             }
6769
6770             /* For now ignore varargs */
6771             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6772             {
6773                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6774                 return callRetTyp;
6775             }
6776
6777             if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6778             {
6779                 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6780                 return callRetTyp;
6781             }
6782
6783             if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6784             {
6785                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6786                 return callRetTyp;
6787             }
6788         }
6789
6790         clsHnd = pResolvedToken->hClass;
6791
6792         clsFlags = callInfo->classFlags;
6793
6794 #ifdef DEBUG
6795         // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6796
6797         // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6798         // These should be in mscorlib.h, and available through a JIT/EE interface call.
6799         const char* modName;
6800         const char* className;
6801         const char* methodName;
6802         if ((className = eeGetClassName(clsHnd)) != nullptr &&
6803             strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6804             (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6805         {
6806             return impImportJitTestLabelMark(sig->numArgs);
6807         }
6808 #endif // DEBUG
6809
6810         // <NICE> Factor this into getCallInfo </NICE>
6811         const bool isIntrinsic    = (mflags & CORINFO_FLG_INTRINSIC) != 0;
6812         const bool isJitIntrinsic = (mflags & CORINFO_FLG_JIT_INTRINSIC) != 0;
6813         if ((isIntrinsic || isJitIntrinsic) && !pConstrainedResolvedToken)
6814         {
6815             call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6816                                 (canTailCall && (tailCall != 0)), isJitIntrinsic, &intrinsicID);
6817
6818             if (compIsForInlining() && compInlineResult->IsFailure())
6819             {
6820                 return callRetTyp;
6821             }
6822
6823             if (call != nullptr)
6824             {
6825                 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6826                        (clsFlags & CORINFO_FLG_FINAL));
6827
6828 #ifdef FEATURE_READYTORUN_COMPILER
6829                 if (call->OperGet() == GT_INTRINSIC)
6830                 {
6831                     if (opts.IsReadyToRun())
6832                     {
6833                         noway_assert(callInfo->kind == CORINFO_CALL);
6834                         call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6835                     }
6836                     else
6837                     {
6838                         call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6839                     }
6840                 }
6841 #endif
6842
6843                 bIntrinsicImported = true;
6844                 goto DONE_CALL;
6845             }
6846         }
6847
6848 #ifdef FEATURE_SIMD
6849         if (featureSIMD)
6850         {
6851             call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6852             if (call != nullptr)
6853             {
6854                 bIntrinsicImported = true;
6855                 goto DONE_CALL;
6856             }
6857         }
6858 #endif // FEATURE_SIMD
6859
6860         if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6861         {
6862             NO_WAY("Virtual call to a function added via EnC is not supported");
6863         }
6864
6865         if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6866             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6867             (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6868         {
6869             BADCODE("Bad calling convention");
6870         }
6871
6872         //-------------------------------------------------------------------------
6873         //  Construct the call node
6874         //
6875         // Work out what sort of call we're making.
6876         // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6877
6878         constraintCallThisTransform    = callInfo->thisTransform;
6879         exactContextHnd                = callInfo->contextHandle;
6880         exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
6881
6882         // Recursive call is treaded as a loop to the begining of the method.
6883         if (methHnd == info.compMethodHnd)
6884         {
6885 #ifdef DEBUG
6886             if (verbose)
6887             {
6888                 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6889                         fgFirstBB->bbNum, compCurBB->bbNum);
6890             }
6891 #endif
6892             fgMarkBackwardJump(fgFirstBB, compCurBB);
6893         }
6894
6895         switch (callInfo->kind)
6896         {
6897
6898             case CORINFO_VIRTUALCALL_STUB:
6899             {
6900                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6901                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6902                 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6903                 {
6904
6905                     if (compIsForInlining())
6906                     {
6907                         // Don't import runtime lookups when inlining
6908                         // Inlining has to be aborted in such a case
6909                         /* XXX Fri 3/20/2009
6910                          * By the way, this would never succeed.  If the handle lookup is into the generic
6911                          * dictionary for a candidate, you'll generate different dictionary offsets and the
6912                          * inlined code will crash.
6913                          *
6914                          * To anyone code reviewing this, when could this ever succeed in the future?  It'll
6915                          * always have a handle lookup.  These lookups are safe intra-module, but we're just
6916                          * failing here.
6917                          */
6918                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6919                         return callRetTyp;
6920                     }
6921
6922                     GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6923                     assert(!compDonotInline());
6924
6925                     // This is the rough code to set up an indirect stub call
6926                     assert(stubAddr != nullptr);
6927
6928                     // The stubAddr may be a
6929                     // complex expression. As it is evaluated after the args,
6930                     // it may cause registered args to be spilled. Simply spill it.
6931
6932                     unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6933                     impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6934                     stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6935
6936                     // Create the actual call node
6937
6938                     assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6939                            (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6940
6941                     call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6942
6943                     call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6944                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6945
6946 #ifdef _TARGET_X86_
6947                     // No tailcalls allowed for these yet...
6948                     canTailCall             = false;
6949                     szCanTailCallFailReason = "VirtualCall with runtime lookup";
6950 #endif
6951                 }
6952                 else
6953                 {
6954                     // ok, the stub is available at compile type.
6955
6956                     call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6957                     call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6958                     call->gtFlags |= GTF_CALL_VIRT_STUB;
6959                     assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6960                     if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6961                     {
6962                         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6963                     }
6964                 }
6965
6966 #ifdef FEATURE_READYTORUN_COMPILER
6967                 if (opts.IsReadyToRun())
6968                 {
6969                     // Null check is sometimes needed for ready to run to handle
6970                     // non-virtual <-> virtual changes between versions
6971                     if (callInfo->nullInstanceCheck)
6972                     {
6973                         call->gtFlags |= GTF_CALL_NULLCHECK;
6974                     }
6975                 }
6976 #endif
6977
6978                 break;
6979             }
6980
6981             case CORINFO_VIRTUALCALL_VTABLE:
6982             {
6983                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6984                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6985                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6986                 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6987                 break;
6988             }
6989
6990             case CORINFO_VIRTUALCALL_LDVIRTFTN:
6991             {
6992                 if (compIsForInlining())
6993                 {
6994                     compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6995                     return callRetTyp;
6996                 }
6997
6998                 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6999                 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7000                 // OK, We've been told to call via LDVIRTFTN, so just
7001                 // take the call now....
7002
7003                 args = impPopList(sig->numArgs, sig);
7004
7005                 GenTreePtr thisPtr = impPopStack().val;
7006                 thisPtr            = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7007                 if (compDonotInline())
7008                 {
7009                     return callRetTyp;
7010                 }
7011
7012                 // Clone the (possibly transformed) "this" pointer
7013                 GenTreePtr thisPtrCopy;
7014                 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7015                                        nullptr DEBUGARG("LDVIRTFTN this pointer"));
7016
7017                 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7018
7019                 if (compDonotInline())
7020                 {
7021                     return callRetTyp;
7022                 }
7023
7024                 thisPtr = nullptr; // can't reuse it
7025
7026                 // Now make an indirect call through the function pointer
7027
7028                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7029                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7030                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7031
7032                 // Create the actual call node
7033
7034                 call                    = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7035                 call->gtCall.gtCallObjp = thisPtrCopy;
7036                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7037
7038                 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7039                 {
7040                     // CoreRT generic virtual method: need to handle potential fat function pointers
7041                     addFatPointerCandidate(call->AsCall());
7042                 }
7043 #ifdef FEATURE_READYTORUN_COMPILER
7044                 if (opts.IsReadyToRun())
7045                 {
7046                     // Null check is needed for ready to run to handle
7047                     // non-virtual <-> virtual changes between versions
7048                     call->gtFlags |= GTF_CALL_NULLCHECK;
7049                 }
7050 #endif
7051
7052                 // Sine we are jumping over some code, check that its OK to skip that code
7053                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7054                        (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7055                 goto DONE;
7056             }
7057
7058             case CORINFO_CALL:
7059             {
7060                 // This is for a non-virtual, non-interface etc. call
7061                 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7062
7063                 // We remove the nullcheck for the GetType call instrinsic.
7064                 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7065                 // and instrinsics.
7066                 if (callInfo->nullInstanceCheck &&
7067                     !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7068                 {
7069                     call->gtFlags |= GTF_CALL_NULLCHECK;
7070                 }
7071
7072 #ifdef FEATURE_READYTORUN_COMPILER
7073                 if (opts.IsReadyToRun())
7074                 {
7075                     call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7076                 }
7077 #endif
7078                 break;
7079             }
7080
7081             case CORINFO_CALL_CODE_POINTER:
7082             {
7083                 // The EE has asked us to call by computing a code pointer and then doing an
7084                 // indirect call.  This is because a runtime lookup is required to get the code entry point.
7085
7086                 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7087                 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7088
7089                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7090                 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7091
7092                 GenTreePtr fptr =
7093                     impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7094
7095                 if (compDonotInline())
7096                 {
7097                     return callRetTyp;
7098                 }
7099
7100                 // Now make an indirect call through the function pointer
7101
7102                 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7103                 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7104                 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7105
7106                 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7107                 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7108                 if (callInfo->nullInstanceCheck)
7109                 {
7110                     call->gtFlags |= GTF_CALL_NULLCHECK;
7111                 }
7112
7113                 break;
7114             }
7115
7116             default:
7117                 assert(!"unknown call kind");
7118                 break;
7119         }
7120
7121         //-------------------------------------------------------------------------
7122         // Set more flags
7123
7124         PREFIX_ASSUME(call != nullptr);
7125
7126         if (mflags & CORINFO_FLG_NOGCCHECK)
7127         {
7128             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7129         }
7130
7131         // Mark call if it's one of the ones we will maybe treat as an intrinsic
7132         if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
7133             intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
7134             intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
7135         {
7136             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7137         }
7138     }
7139     assert(sig);
7140     assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7141
7142     /* Some sanity checks */
7143
7144     // CALL_VIRT and NEWOBJ must have a THIS pointer
7145     assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7146     // static bit and hasThis are negations of one another
7147     assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7148     assert(call != nullptr);
7149
7150     /*-------------------------------------------------------------------------
7151      * Check special-cases etc
7152      */
7153
7154     /* Special case - Check if it is a call to Delegate.Invoke(). */
7155
7156     if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7157     {
7158         assert(!compIsForInlining());
7159         assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7160         assert(mflags & CORINFO_FLG_FINAL);
7161
7162         /* Set the delegate flag */
7163         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7164
7165         if (callInfo->secureDelegateInvoke)
7166         {
7167             call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7168         }
7169
7170         if (opcode == CEE_CALLVIRT)
7171         {
7172             assert(mflags & CORINFO_FLG_FINAL);
7173
7174             /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7175             assert(call->gtFlags & GTF_CALL_NULLCHECK);
7176             call->gtFlags &= ~GTF_CALL_NULLCHECK;
7177         }
7178     }
7179
7180     CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7181     actualMethodRetTypeSigClass = sig->retTypeSigClass;
7182     if (varTypeIsStruct(callRetTyp))
7183     {
7184         callRetTyp   = impNormStructType(actualMethodRetTypeSigClass);
7185         call->gtType = callRetTyp;
7186     }
7187
7188 #if !FEATURE_VARARG
7189     /* Check for varargs */
7190     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7191         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7192     {
7193         BADCODE("Varargs not supported.");
7194     }
7195 #endif // !FEATURE_VARARG
7196
7197 #ifdef UNIX_X86_ABI
7198     if (call->gtCall.callSig == nullptr)
7199     {
7200         call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7201         *call->gtCall.callSig = *sig;
7202     }
7203 #endif // UNIX_X86_ABI
7204
7205     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7206         (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7207     {
7208         assert(!compIsForInlining());
7209
7210         /* Set the right flags */
7211
7212         call->gtFlags |= GTF_CALL_POP_ARGS;
7213         call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7214
7215         /* Can't allow tailcall for varargs as it is caller-pop. The caller
7216            will be expecting to pop a certain number of arguments, but if we
7217            tailcall to a function with a different number of arguments, we
7218            are hosed. There are ways around this (caller remembers esp value,
7219            varargs is not caller-pop, etc), but not worth it. */
7220         CLANG_FORMAT_COMMENT_ANCHOR;
7221
7222 #ifdef _TARGET_X86_
7223         if (canTailCall)
7224         {
7225             canTailCall             = false;
7226             szCanTailCallFailReason = "Callee is varargs";
7227         }
7228 #endif
7229
7230         /* Get the total number of arguments - this is already correct
7231          * for CALLI - for methods we have to get it from the call site */
7232
7233         if (opcode != CEE_CALLI)
7234         {
7235 #ifdef DEBUG
7236             unsigned numArgsDef = sig->numArgs;
7237 #endif
7238             eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7239
7240 #ifdef DEBUG
7241             // We cannot lazily obtain the signature of a vararg call because using its method
7242             // handle will give us only the declared argument list, not the full argument list.
7243             assert(call->gtCall.callSig == nullptr);
7244             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7245             *call->gtCall.callSig = *sig;
7246 #endif
7247
7248             // For vararg calls we must be sure to load the return type of the
7249             // method actually being called, as well as the return types of the
7250             // specified in the vararg signature. With type equivalency, these types
7251             // may not be the same.
7252             if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7253             {
7254                 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7255                     sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7256                     sig->retType != CORINFO_TYPE_VAR)
7257                 {
7258                     // Make sure that all valuetypes (including enums) that we push are loaded.
7259                     // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7260                     // all valuetypes in the method signature are already loaded.
7261                     // We need to be able to find the size of the valuetypes, but we cannot
7262                     // do a class-load from within GC.
7263                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7264                 }
7265             }
7266
7267             assert(numArgsDef <= sig->numArgs);
7268         }
7269
7270         /* We will have "cookie" as the last argument but we cannot push
7271          * it on the operand stack because we may overflow, so we append it
7272          * to the arg list next after we pop them */
7273     }
7274
7275     if (mflags & CORINFO_FLG_SECURITYCHECK)
7276     {
7277         assert(!compIsForInlining());
7278
7279         // Need security prolog/epilog callouts when there is
7280         // imperative security in the method. This is to give security a
7281         // chance to do any setup in the prolog and cleanup in the epilog if needed.
7282
7283         if (compIsForInlining())
7284         {
7285             // Cannot handle this if the method being imported is an inlinee by itself.
7286             // Because inlinee method does not have its own frame.
7287
7288             compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7289             return callRetTyp;
7290         }
7291         else
7292         {
7293             tiSecurityCalloutNeeded = true;
7294
7295             // If the current method calls a method which needs a security check,
7296             // (i.e. the method being compiled has imperative security)
7297             // we need to reserve a slot for the security object in
7298             // the current method's stack frame
7299             opts.compNeedSecurityCheck = true;
7300         }
7301     }
7302
7303     //--------------------------- Inline NDirect ------------------------------
7304
7305     // For inline cases we technically should look at both the current
7306     // block and the call site block (or just the latter if we've
7307     // fused the EH trees). However the block-related checks pertain to
7308     // EH and we currently won't inline a method with EH. So for
7309     // inlinees, just checking the call site block is sufficient.
7310     {
7311         // New lexical block here to avoid compilation errors because of GOTOs.
7312         BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7313         impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7314     }
7315
7316     if (call->gtFlags & GTF_CALL_UNMANAGED)
7317     {
7318         // We set up the unmanaged call by linking the frame, disabling GC, etc
7319         // This needs to be cleaned up on return
7320         if (canTailCall)
7321         {
7322             canTailCall             = false;
7323             szCanTailCallFailReason = "Callee is native";
7324         }
7325
7326         checkForSmallType = true;
7327
7328         impPopArgsForUnmanagedCall(call, sig);
7329
7330         goto DONE;
7331     }
7332     else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7333                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7334                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7335                                        ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7336     {
7337         if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7338         {
7339             // Normally this only happens with inlining.
7340             // However, a generic method (or type) being NGENd into another module
7341             // can run into this issue as well.  There's not an easy fall-back for NGEN
7342             // so instead we fallback to JIT.
7343             if (compIsForInlining())
7344             {
7345                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7346             }
7347             else
7348             {
7349                 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7350             }
7351
7352             return callRetTyp;
7353         }
7354
7355         GenTreePtr cookie = eeGetPInvokeCookie(sig);
7356
7357         // This cookie is required to be either a simple GT_CNS_INT or
7358         // an indirection of a GT_CNS_INT
7359         //
7360         GenTreePtr cookieConst = cookie;
7361         if (cookie->gtOper == GT_IND)
7362         {
7363             cookieConst = cookie->gtOp.gtOp1;
7364         }
7365         assert(cookieConst->gtOper == GT_CNS_INT);
7366
7367         // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7368         // we won't allow this tree to participate in any CSE logic
7369         //
7370         cookie->gtFlags |= GTF_DONT_CSE;
7371         cookieConst->gtFlags |= GTF_DONT_CSE;
7372
7373         call->gtCall.gtCallCookie = cookie;
7374
7375         if (canTailCall)
7376         {
7377             canTailCall             = false;
7378             szCanTailCallFailReason = "PInvoke calli";
7379         }
7380     }
7381
7382     /*-------------------------------------------------------------------------
7383      * Create the argument list
7384      */
7385
7386     //-------------------------------------------------------------------------
7387     // Special case - for varargs we have an implicit last argument
7388
7389     if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7390     {
7391         assert(!compIsForInlining());
7392
7393         void *varCookie, *pVarCookie;
7394         if (!info.compCompHnd->canGetVarArgsHandle(sig))
7395         {
7396             compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7397             return callRetTyp;
7398         }
7399
7400         varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7401         assert((!varCookie) != (!pVarCookie));
7402         GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7403
7404         assert(extraArg == nullptr);
7405         extraArg = gtNewArgList(cookie);
7406     }
7407
7408     //-------------------------------------------------------------------------
7409     // Extra arg for shared generic code and array methods
7410     //
7411     // Extra argument containing instantiation information is passed in the
7412     // following circumstances:
7413     // (a) To the "Address" method on array classes; the extra parameter is
7414     //     the array's type handle (a TypeDesc)
7415     // (b) To shared-code instance methods in generic structs; the extra parameter
7416     //     is the struct's type handle (a vtable ptr)
7417     // (c) To shared-code per-instantiation non-generic static methods in generic
7418     //     classes and structs; the extra parameter is the type handle
7419     // (d) To shared-code generic methods; the extra parameter is an
7420     //     exact-instantiation MethodDesc
7421     //
7422     // We also set the exact type context associated with the call so we can
7423     // inline the call correctly later on.
7424
7425     if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7426     {
7427         assert(call->gtCall.gtCallType == CT_USER_FUNC);
7428         if (clsHnd == nullptr)
7429         {
7430             NO_WAY("CALLI on parameterized type");
7431         }
7432
7433         assert(opcode != CEE_CALLI);
7434
7435         GenTreePtr instParam;
7436         BOOL       runtimeLookup;
7437
7438         // Instantiated generic method
7439         if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7440         {
7441             CORINFO_METHOD_HANDLE exactMethodHandle =
7442                 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7443
7444             if (!exactContextNeedsRuntimeLookup)
7445             {
7446 #ifdef FEATURE_READYTORUN_COMPILER
7447                 if (opts.IsReadyToRun())
7448                 {
7449                     instParam =
7450                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7451                     if (instParam == nullptr)
7452                     {
7453                         return callRetTyp;
7454                     }
7455                 }
7456                 else
7457 #endif
7458                 {
7459                     instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7460                     info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7461                 }
7462             }
7463             else
7464             {
7465                 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7466                 if (instParam == nullptr)
7467                 {
7468                     return callRetTyp;
7469                 }
7470             }
7471         }
7472
7473         // otherwise must be an instance method in a generic struct,
7474         // a static method in a generic type, or a runtime-generated array method
7475         else
7476         {
7477             assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7478             CORINFO_CLASS_HANDLE exactClassHandle =
7479                 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7480
7481             if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7482             {
7483                 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7484                 return callRetTyp;
7485             }
7486
7487             if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7488             {
7489                 // We indicate "readonly" to the Address operation by using a null
7490                 // instParam.
7491                 instParam = gtNewIconNode(0, TYP_REF);
7492             }
7493             else if (!exactContextNeedsRuntimeLookup)
7494             {
7495 #ifdef FEATURE_READYTORUN_COMPILER
7496                 if (opts.IsReadyToRun())
7497                 {
7498                     instParam =
7499                         impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7500                     if (instParam == nullptr)
7501                     {
7502                         return callRetTyp;
7503                     }
7504                 }
7505                 else
7506 #endif
7507                 {
7508                     instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7509                     info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7510                 }
7511             }
7512             else
7513             {
7514                 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7515                 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7516                 // because pResolvedToken is an interface method and interface types make a poor generic context.
7517                 if (pConstrainedResolvedToken)
7518                 {
7519                     instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7520                                                  FALSE /* importParent */);
7521                 }
7522                 else
7523                 {
7524                     instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7525                 }
7526
7527                 if (instParam == nullptr)
7528                 {
7529                     return callRetTyp;
7530                 }
7531             }
7532         }
7533
7534         assert(extraArg == nullptr);
7535         extraArg = gtNewArgList(instParam);
7536     }
7537
7538     // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7539     // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7540     // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7541     // exactContextHnd is not currently required when inlining shared generic code into shared
7542     // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7543     // (e.g. anything marked needsRuntimeLookup)
7544     if (exactContextNeedsRuntimeLookup)
7545     {
7546         exactContextHnd = nullptr;
7547     }
7548
7549     if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7550     {
7551         // Only verifiable cases are supported.
7552         // dup; ldvirtftn; newobj; or ldftn; newobj.
7553         // IL test could contain unverifiable sequence, in this case optimization should not be done.
7554         if (impStackHeight() > 0)
7555         {
7556             typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7557             if (delegateTypeInfo.IsToken())
7558             {
7559                 ldftnToken = delegateTypeInfo.GetToken();
7560             }
7561         }
7562     }
7563
7564     //-------------------------------------------------------------------------
7565     // The main group of arguments
7566
7567     args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7568
7569     if (args)
7570     {
7571         call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7572     }
7573
7574     //-------------------------------------------------------------------------
7575     // The "this" pointer
7576
7577     if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7578     {
7579         GenTreePtr obj;
7580
7581         if (opcode == CEE_NEWOBJ)
7582         {
7583             obj = newobjThis;
7584         }
7585         else
7586         {
7587             obj = impPopStack().val;
7588             obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7589             if (compDonotInline())
7590             {
7591                 return callRetTyp;
7592             }
7593         }
7594
7595         /* Is this a virtual or interface call? */
7596
7597         if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7598         {
7599             /* only true object pointers can be virtual */
7600             assert(obj->gtType == TYP_REF);
7601
7602             // See if we can devirtualize.
7603             impDevirtualizeCall(call->AsCall(), obj, &callInfo->hMethod, &callInfo->methodFlags,
7604                                 &callInfo->contextHandle, &exactContextHnd);
7605         }
7606         else
7607         {
7608             if (impIsThis(obj))
7609             {
7610                 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7611             }
7612         }
7613
7614         /* Store the "this" value in the call */
7615
7616         call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7617         call->gtCall.gtCallObjp = obj;
7618     }
7619
7620     //-------------------------------------------------------------------------
7621     // The "this" pointer for "newobj"
7622
7623     if (opcode == CEE_NEWOBJ)
7624     {
7625         if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7626         {
7627             assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7628             // This is a 'new' of a variable sized object, wher
7629             // the constructor is to return the object.  In this case
7630             // the constructor claims to return VOID but we know it
7631             // actually returns the new object
7632             assert(callRetTyp == TYP_VOID);
7633             callRetTyp   = TYP_REF;
7634             call->gtType = TYP_REF;
7635             impSpillSpecialSideEff();
7636
7637             impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7638         }
7639         else
7640         {
7641             if (clsFlags & CORINFO_FLG_DELEGATE)
7642             {
7643                 // New inliner morph it in impImportCall.
7644                 // This will allow us to inline the call to the delegate constructor.
7645                 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7646             }
7647
7648             if (!bIntrinsicImported)
7649             {
7650
7651 #if defined(DEBUG) || defined(INLINE_DATA)
7652
7653                 // Keep track of the raw IL offset of the call
7654                 call->gtCall.gtRawILOffset = rawILOffset;
7655
7656 #endif // defined(DEBUG) || defined(INLINE_DATA)
7657
7658                 // Is it an inline candidate?
7659                 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7660             }
7661
7662             // append the call node.
7663             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7664
7665             // Now push the value of the 'new onto the stack
7666
7667             // This is a 'new' of a non-variable sized object.
7668             // Append the new node (op1) to the statement list,
7669             // and then push the local holding the value of this
7670             // new instruction on the stack.
7671
7672             if (clsFlags & CORINFO_FLG_VALUECLASS)
7673             {
7674                 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7675
7676                 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7677                 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7678             }
7679             else
7680             {
7681                 if (newobjThis->gtOper == GT_COMMA)
7682                 {
7683                     // In coreclr the callout can be inserted even if verification is disabled
7684                     // so we cannot rely on tiVerificationNeeded alone
7685
7686                     // We must have inserted the callout. Get the real newobj.
7687                     newobjThis = newobjThis->gtOp.gtOp2;
7688                 }
7689
7690                 assert(newobjThis->gtOper == GT_LCL_VAR);
7691                 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7692             }
7693         }
7694         return callRetTyp;
7695     }
7696
7697 DONE:
7698
7699     if (tailCall)
7700     {
7701         // This check cannot be performed for implicit tail calls for the reason
7702         // that impIsImplicitTailCallCandidate() is not checking whether return
7703         // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7704         // As a result it is possible that in the following case, we find that
7705         // the type stack is non-empty if Callee() is considered for implicit
7706         // tail calling.
7707         //      int Caller(..) { .... void Callee(); ret val; ... }
7708         //
7709         // Note that we cannot check return type compatibility before ImpImportCall()
7710         // as we don't have required info or need to duplicate some of the logic of
7711         // ImpImportCall().
7712         //
7713         // For implicit tail calls, we perform this check after return types are
7714         // known to be compatible.
7715         if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7716         {
7717             BADCODE("Stack should be empty after tailcall");
7718         }
7719
7720         // Note that we can not relax this condition with genActualType() as
7721         // the calling convention dictates that the caller of a function with
7722         // a small-typed return value is responsible for normalizing the return val
7723
7724         if (canTailCall &&
7725             !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7726                                           callInfo->sig.retTypeClass))
7727         {
7728             canTailCall             = false;
7729             szCanTailCallFailReason = "Return types are not tail call compatible";
7730         }
7731
7732         // Stack empty check for implicit tail calls.
7733         if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7734         {
7735 #ifdef _TARGET_AMD64_
7736             // JIT64 Compatibility:  Opportunistic tail call stack mismatch throws a VerificationException
7737             // in JIT64, not an InvalidProgramException.
7738             Verify(false, "Stack should be empty after tailcall");
7739 #else  // _TARGET_64BIT_
7740             BADCODE("Stack should be empty after tailcall");
7741 #endif //!_TARGET_64BIT_
7742         }
7743
7744         // assert(compCurBB is not a catch, finally or filter block);
7745         // assert(compCurBB is not a try block protected by a finally block);
7746
7747         // Check for permission to tailcall
7748         bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7749
7750         assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7751
7752         if (canTailCall)
7753         {
7754             // True virtual or indirect calls, shouldn't pass in a callee handle.
7755             CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7756                                                     ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7757                                                        ? nullptr
7758                                                        : methHnd;
7759             GenTreePtr thisArg = call->gtCall.gtCallObjp;
7760
7761             if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7762             {
7763                 canTailCall = true;
7764                 if (explicitTailCall)
7765                 {
7766                     // In case of explicit tail calls, mark it so that it is not considered
7767                     // for in-lining.
7768                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7769 #ifdef DEBUG
7770                     if (verbose)
7771                     {
7772                         printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7773                         printTreeID(call);
7774                         printf("\n");
7775                     }
7776 #endif
7777                 }
7778                 else
7779                 {
7780 #if FEATURE_TAILCALL_OPT
7781                     // Must be an implicit tail call.
7782                     assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7783
7784                     // It is possible that a call node is both an inline candidate and marked
7785                     // for opportunistic tail calling.  In-lining happens before morhphing of
7786                     // trees.  If in-lining of an in-line candidate gets aborted for whatever
7787                     // reason, it will survive to the morphing stage at which point it will be
7788                     // transformed into a tail call after performing additional checks.
7789
7790                     call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7791 #ifdef DEBUG
7792                     if (verbose)
7793                     {
7794                         printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7795                         printTreeID(call);
7796                         printf("\n");
7797                     }
7798 #endif
7799
7800 #else //! FEATURE_TAILCALL_OPT
7801                     NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7802
7803 #endif // FEATURE_TAILCALL_OPT
7804                 }
7805
7806                 // we can't report success just yet...
7807             }
7808             else
7809             {
7810                 canTailCall = false;
7811 // canTailCall reported its reasons already
7812 #ifdef DEBUG
7813                 if (verbose)
7814                 {
7815                     printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7816                     printTreeID(call);
7817                     printf("\n");
7818                 }
7819 #endif
7820             }
7821         }
7822         else
7823         {
7824             // If this assert fires it means that canTailCall was set to false without setting a reason!
7825             assert(szCanTailCallFailReason != nullptr);
7826
7827 #ifdef DEBUG
7828             if (verbose)
7829             {
7830                 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7831                 printTreeID(call);
7832                 printf(": %s\n", szCanTailCallFailReason);
7833             }
7834 #endif
7835             info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7836                                                      szCanTailCallFailReason);
7837         }
7838     }
7839
7840     // Note: we assume that small return types are already normalized by the managed callee
7841     // or by the pinvoke stub for calls to unmanaged code.
7842
7843     if (!bIntrinsicImported)
7844     {
7845         //
7846         // Things needed to be checked when bIntrinsicImported is false.
7847         //
7848
7849         assert(call->gtOper == GT_CALL);
7850         assert(sig != nullptr);
7851
7852         // Tail calls require us to save the call site's sig info so we can obtain an argument
7853         // copying thunk from the EE later on.
7854         if (call->gtCall.callSig == nullptr)
7855         {
7856             call->gtCall.callSig  = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7857             *call->gtCall.callSig = *sig;
7858         }
7859
7860         if (compIsForInlining() && opcode == CEE_CALLVIRT)
7861         {
7862             GenTreePtr callObj = call->gtCall.gtCallObjp;
7863             assert(callObj != nullptr);
7864
7865             unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7866
7867             if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7868                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7869                                                                    impInlineInfo->inlArgInfo))
7870             {
7871                 impInlineInfo->thisDereferencedFirst = true;
7872             }
7873         }
7874
7875 #if defined(DEBUG) || defined(INLINE_DATA)
7876
7877         // Keep track of the raw IL offset of the call
7878         call->gtCall.gtRawILOffset = rawILOffset;
7879
7880 #endif // defined(DEBUG) || defined(INLINE_DATA)
7881
7882         // Is it an inline candidate?
7883         impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7884     }
7885
7886 DONE_CALL:
7887     // Push or append the result of the call
7888     if (callRetTyp == TYP_VOID)
7889     {
7890         if (opcode == CEE_NEWOBJ)
7891         {
7892             // we actually did push something, so don't spill the thing we just pushed.
7893             assert(verCurrentState.esStackDepth > 0);
7894             impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7895         }
7896         else
7897         {
7898             impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7899         }
7900     }
7901     else
7902     {
7903         impSpillSpecialSideEff();
7904
7905         if (clsFlags & CORINFO_FLG_ARRAY)
7906         {
7907             eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7908         }
7909
7910         // Find the return type used for verification by interpreting the method signature.
7911         // NB: we are clobbering the already established sig.
7912         if (tiVerificationNeeded)
7913         {
7914             // Actually, we never get the sig for the original method.
7915             sig = &(callInfo->verSig);
7916         }
7917
7918         typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7919         tiRetVal.NormaliseForStack();
7920
7921         // The CEE_READONLY prefix modifies the verification semantics of an Address
7922         // operation on an array type.
7923         if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7924         {
7925             tiRetVal.SetIsReadonlyByRef();
7926         }
7927
7928         if (tiVerificationNeeded)
7929         {
7930             // We assume all calls return permanent home byrefs. If they
7931             // didn't they wouldn't be verifiable. This is also covering
7932             // the Address() helper for multidimensional arrays.
7933             if (tiRetVal.IsByRef())
7934             {
7935                 tiRetVal.SetIsPermanentHomeByRef();
7936             }
7937         }
7938
7939         if (call->IsCall())
7940         {
7941             // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7942
7943             bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7944             if (varTypeIsStruct(callRetTyp))
7945             {
7946                 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
7947             }
7948
7949             if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7950             {
7951                 assert(opts.OptEnabled(CLFLG_INLINING));
7952                 assert(!fatPointerCandidate); // We should not try to inline calli.
7953
7954                 // Make the call its own tree (spill the stack if needed).
7955                 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7956
7957                 // TODO: Still using the widened type.
7958                 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7959             }
7960             else
7961             {
7962                 if (fatPointerCandidate)
7963                 {
7964                     // fatPointer candidates should be in statements of the form call() or var = call().
7965                     // Such form allows to find statements with fat calls without walking through whole trees
7966                     // and removes problems with cutting trees.
7967                     assert(!bIntrinsicImported);
7968                     assert(IsTargetAbi(CORINFO_CORERT_ABI));
7969                     if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7970                     {
7971                         unsigned   calliSlot  = lvaGrabTemp(true DEBUGARG("calli"));
7972                         LclVarDsc* varDsc     = &lvaTable[calliSlot];
7973                         varDsc->lvVerTypeInfo = tiRetVal;
7974                         impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
7975                         // impAssignTempGen can change src arg list and return type for call that returns struct.
7976                         var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7977                         call           = gtNewLclvNode(calliSlot, type);
7978                     }
7979                 }
7980
7981                 // For non-candidates we must also spill, since we
7982                 // might have locals live on the eval stack that this
7983                 // call can modify.
7984                 //
7985                 // Suppress this for certain well-known call targets
7986                 // that we know won't modify locals, eg calls that are
7987                 // recognized in gtCanOptimizeTypeEquality. Otherwise
7988                 // we may break key fragile pattern matches later on.
7989                 bool spillStack = true;
7990                 if (call->IsCall())
7991                 {
7992                     GenTreeCall* callNode = call->AsCall();
7993                     if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
7994                     {
7995                         spillStack = false;
7996                     }
7997                     else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
7998                     {
7999                         spillStack = false;
8000                     }
8001                 }
8002
8003                 if (spillStack)
8004                 {
8005                     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8006                 }
8007             }
8008         }
8009
8010         if (!bIntrinsicImported)
8011         {
8012             //-------------------------------------------------------------------------
8013             //
8014             /* If the call is of a small type and the callee is managed, the callee will normalize the result
8015                 before returning.
8016                 However, we need to normalize small type values returned by unmanaged
8017                 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8018                 if we use the shorter inlined pinvoke stub. */
8019
8020             if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8021             {
8022                 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8023             }
8024         }
8025
8026         impPushOnStack(call, tiRetVal);
8027     }
8028
8029     // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8030     // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8031     // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8032     //  callInfoCache.uncacheCallInfo();
8033
8034     return callRetTyp;
8035 }
8036 #ifdef _PREFAST_
8037 #pragma warning(pop)
8038 #endif
8039
8040 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8041 {
8042     CorInfoType corType = methInfo->args.retType;
8043
8044     if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8045     {
8046         // We have some kind of STRUCT being returned
8047
8048         structPassingKind howToReturnStruct = SPK_Unknown;
8049
8050         var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8051
8052         if (howToReturnStruct == SPK_ByReference)
8053         {
8054             return true;
8055         }
8056     }
8057
8058     return false;
8059 }
8060
8061 #ifdef DEBUG
8062 //
8063 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8064 {
8065     TestLabelAndNum tlAndN;
8066     if (numArgs == 2)
8067     {
8068         tlAndN.m_num  = 0;
8069         StackEntry se = impPopStack();
8070         assert(se.seTypeInfo.GetType() == TI_INT);
8071         GenTreePtr val = se.val;
8072         assert(val->IsCnsIntOrI());
8073         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8074     }
8075     else if (numArgs == 3)
8076     {
8077         StackEntry se = impPopStack();
8078         assert(se.seTypeInfo.GetType() == TI_INT);
8079         GenTreePtr val = se.val;
8080         assert(val->IsCnsIntOrI());
8081         tlAndN.m_num = val->AsIntConCommon()->IconValue();
8082         se           = impPopStack();
8083         assert(se.seTypeInfo.GetType() == TI_INT);
8084         val = se.val;
8085         assert(val->IsCnsIntOrI());
8086         tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8087     }
8088     else
8089     {
8090         assert(false);
8091     }
8092
8093     StackEntry expSe = impPopStack();
8094     GenTreePtr node  = expSe.val;
8095
8096     // There are a small number of special cases, where we actually put the annotation on a subnode.
8097     if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8098     {
8099         // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8100         // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8101         // offset within the the static field block whose address is returned by the helper call.
8102         // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8103         GenTreePtr helperCall = nullptr;
8104         assert(node->OperGet() == GT_IND);
8105         tlAndN.m_num -= 100;
8106         GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8107         GetNodeTestData()->Remove(node);
8108     }
8109     else
8110     {
8111         GetNodeTestData()->Set(node, tlAndN);
8112     }
8113
8114     impPushOnStack(node, expSe.seTypeInfo);
8115     return node->TypeGet();
8116 }
8117 #endif // DEBUG
8118
8119 //-----------------------------------------------------------------------------------
8120 //  impFixupCallStructReturn: For a call node that returns a struct type either
8121 //  adjust the return type to an enregisterable type, or set the flag to indicate
8122 //  struct return via retbuf arg.
8123 //
8124 //  Arguments:
8125 //    call       -  GT_CALL GenTree node
8126 //    retClsHnd  -  Class handle of return type of the call
8127 //
8128 //  Return Value:
8129 //    Returns new GenTree node after fixing struct return of call node
8130 //
8131 GenTreePtr Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8132 {
8133     if (!varTypeIsStruct(call))
8134     {
8135         return call;
8136     }
8137
8138     call->gtRetClsHnd = retClsHnd;
8139
8140 #if FEATURE_MULTIREG_RET
8141     // Initialize Return type descriptor of call node
8142     ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8143     retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8144 #endif // FEATURE_MULTIREG_RET
8145
8146 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8147
8148     // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8149     assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8150
8151     // The return type will remain as the incoming struct type unless normalized to a
8152     // single eightbyte return type below.
8153     call->gtReturnType = call->gtType;
8154
8155     unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8156     if (retRegCount != 0)
8157     {
8158         if (retRegCount == 1)
8159         {
8160             // struct returned in a single register
8161             call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8162         }
8163         else
8164         {
8165             // must be a struct returned in two registers
8166             assert(retRegCount == 2);
8167
8168             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8169             {
8170                 // Force a call returning multi-reg struct to be always of the IR form
8171                 //   tmp = call
8172                 //
8173                 // No need to assign a multi-reg struct to a local var if:
8174                 //  - It is a tail call or
8175                 //  - The call is marked for in-lining later
8176                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8177             }
8178         }
8179     }
8180     else
8181     {
8182         // struct not returned in registers i.e returned via hiddden retbuf arg.
8183         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8184     }
8185
8186 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8187
8188 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8189     // There is no fixup necessary if the return type is a HFA struct.
8190     // HFA structs are returned in registers for ARM32 and ARM64
8191     //
8192     if (!call->IsVarargs() && IsHfa(retClsHnd))
8193     {
8194         if (call->CanTailCall())
8195         {
8196             if (info.compIsVarArgs)
8197             {
8198                 // We cannot tail call because control needs to return to fixup the calling
8199                 // convention for result return.
8200                 call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8201             }
8202             else
8203             {
8204                 // If we can tail call returning HFA, then don't assign it to
8205                 // a variable back and forth.
8206                 return call;
8207             }
8208         }
8209
8210         if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
8211         {
8212             return call;
8213         }
8214
8215         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8216         if (retRegCount >= 2)
8217         {
8218             return impAssignMultiRegTypeToVar(call, retClsHnd);
8219         }
8220     }
8221 #endif // _TARGET_ARM_
8222
8223     // Check for TYP_STRUCT type that wraps a primitive type
8224     // Such structs are returned using a single register
8225     // and we change the return type on those calls here.
8226     //
8227     structPassingKind howToReturnStruct;
8228     var_types         returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8229
8230     if (howToReturnStruct == SPK_ByReference)
8231     {
8232         assert(returnType == TYP_UNKNOWN);
8233         call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8234     }
8235     else
8236     {
8237         assert(returnType != TYP_UNKNOWN);
8238         call->gtReturnType = returnType;
8239
8240         // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8241         if ((returnType == TYP_LONG) && (compLongUsed == false))
8242         {
8243             compLongUsed = true;
8244         }
8245         else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8246         {
8247             compFloatingPointUsed = true;
8248         }
8249
8250 #if FEATURE_MULTIREG_RET
8251         unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8252         assert(retRegCount != 0);
8253
8254         if (retRegCount >= 2)
8255         {
8256             if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8257             {
8258                 // Force a call returning multi-reg struct to be always of the IR form
8259                 //   tmp = call
8260                 //
8261                 // No need to assign a multi-reg struct to a local var if:
8262                 //  - It is a tail call or
8263                 //  - The call is marked for in-lining later
8264                 return impAssignMultiRegTypeToVar(call, retClsHnd);
8265             }
8266         }
8267 #endif // FEATURE_MULTIREG_RET
8268     }
8269
8270 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8271
8272     return call;
8273 }
8274
8275 /*****************************************************************************
8276    For struct return values, re-type the operand in the case where the ABI
8277    does not use a struct return buffer
8278    Note that this method is only call for !_TARGET_X86_
8279  */
8280
8281 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8282 {
8283     assert(varTypeIsStruct(info.compRetType));
8284     assert(info.compRetBuffArg == BAD_VAR_NUM);
8285
8286 #if defined(_TARGET_XARCH_)
8287
8288 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8289     // No VarArgs for CoreCLR on x64 Unix
8290     assert(!info.compIsVarArgs);
8291
8292     // Is method returning a multi-reg struct?
8293     if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8294     {
8295         // In case of multi-reg struct return, we force IR to be one of the following:
8296         // GT_RETURN(lclvar) or GT_RETURN(call).  If op is anything other than a
8297         // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8298
8299         if (op->gtOper == GT_LCL_VAR)
8300         {
8301             // Make sure that this struct stays in memory and doesn't get promoted.
8302             unsigned lclNum                  = op->gtLclVarCommon.gtLclNum;
8303             lvaTable[lclNum].lvIsMultiRegRet = true;
8304
8305             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8306             op->gtFlags |= GTF_DONT_CSE;
8307
8308             return op;
8309         }
8310
8311         if (op->gtOper == GT_CALL)
8312         {
8313             return op;
8314         }
8315
8316         return impAssignMultiRegTypeToVar(op, retClsHnd);
8317     }
8318 #else  // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8319     assert(info.compRetNativeType != TYP_STRUCT);
8320 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8321
8322 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8323
8324     if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8325     {
8326         if (op->gtOper == GT_LCL_VAR)
8327         {
8328             // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8329             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8330             // Make sure this struct type stays as struct so that we can return it as an HFA
8331             lvaTable[lclNum].lvIsMultiRegRet = true;
8332
8333             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8334             op->gtFlags |= GTF_DONT_CSE;
8335
8336             return op;
8337         }
8338
8339         if (op->gtOper == GT_CALL)
8340         {
8341             if (op->gtCall.IsVarargs())
8342             {
8343                 // We cannot tail call because control needs to return to fixup the calling
8344                 // convention for result return.
8345                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8346                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8347             }
8348             else
8349             {
8350                 return op;
8351             }
8352         }
8353         return impAssignMultiRegTypeToVar(op, retClsHnd);
8354     }
8355
8356 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8357
8358     // Is method returning a multi-reg struct?
8359     if (IsMultiRegReturnedType(retClsHnd))
8360     {
8361         if (op->gtOper == GT_LCL_VAR)
8362         {
8363             // This LCL_VAR stays as a TYP_STRUCT
8364             unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8365
8366             // Make sure this struct type is not struct promoted
8367             lvaTable[lclNum].lvIsMultiRegRet = true;
8368
8369             // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8370             op->gtFlags |= GTF_DONT_CSE;
8371
8372             return op;
8373         }
8374
8375         if (op->gtOper == GT_CALL)
8376         {
8377             if (op->gtCall.IsVarargs())
8378             {
8379                 // We cannot tail call because control needs to return to fixup the calling
8380                 // convention for result return.
8381                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8382                 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8383             }
8384             else
8385             {
8386                 return op;
8387             }
8388         }
8389         return impAssignMultiRegTypeToVar(op, retClsHnd);
8390     }
8391
8392 #endif //  FEATURE_MULTIREG_RET && FEATURE_HFA
8393
8394 REDO_RETURN_NODE:
8395     // adjust the type away from struct to integral
8396     // and no normalizing
8397     if (op->gtOper == GT_LCL_VAR)
8398     {
8399         op->ChangeOper(GT_LCL_FLD);
8400     }
8401     else if (op->gtOper == GT_OBJ)
8402     {
8403         GenTreePtr op1 = op->AsObj()->Addr();
8404
8405         // We will fold away OBJ/ADDR
8406         // except for OBJ/ADDR/INDEX
8407         //     as the array type influences the array element's offset
8408         //     Later in this method we change op->gtType to info.compRetNativeType
8409         //     This is not correct when op is a GT_INDEX as the starting offset
8410         //     for the array elements 'elemOffs' is different for an array of
8411         //     TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8412         //     Also refer to the GTF_INX_REFARR_LAYOUT flag
8413         //
8414         if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8415         {
8416             // Change '*(&X)' to 'X' and see if we can do better
8417             op = op1->gtOp.gtOp1;
8418             goto REDO_RETURN_NODE;
8419         }
8420         op->gtObj.gtClass = NO_CLASS_HANDLE;
8421         op->ChangeOperUnchecked(GT_IND);
8422         op->gtFlags |= GTF_IND_TGTANYWHERE;
8423     }
8424     else if (op->gtOper == GT_CALL)
8425     {
8426         if (op->AsCall()->TreatAsHasRetBufArg(this))
8427         {
8428             // This must be one of those 'special' helpers that don't
8429             // really have a return buffer, but instead use it as a way
8430             // to keep the trees cleaner with fewer address-taken temps.
8431             //
8432             // Well now we have to materialize the the return buffer as
8433             // an address-taken temp. Then we can return the temp.
8434             //
8435             // NOTE: this code assumes that since the call directly
8436             // feeds the return, then the call must be returning the
8437             // same structure/class/type.
8438             //
8439             unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8440
8441             // No need to spill anything as we're about to return.
8442             impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8443
8444             // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8445             // jump directly to a GT_LCL_FLD.
8446             op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8447             op->ChangeOper(GT_LCL_FLD);
8448         }
8449         else
8450         {
8451             assert(info.compRetNativeType == op->gtCall.gtReturnType);
8452
8453             // Don't change the gtType of the node just yet, it will get changed later.
8454             return op;
8455         }
8456     }
8457     else if (op->gtOper == GT_COMMA)
8458     {
8459         op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8460     }
8461
8462     op->gtType = info.compRetNativeType;
8463
8464     return op;
8465 }
8466
8467 /*****************************************************************************
8468    CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8469    finally-protected try. We find the finally blocks protecting the current
8470    offset (in order) by walking over the complete exception table and
8471    finding enclosing clauses. This assumes that the table is sorted.
8472    This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8473
8474    If we are leaving a catch handler, we need to attach the
8475    CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8476
8477    After this function, the BBJ_LEAVE block has been converted to a different type.
8478  */
8479
8480 #if !FEATURE_EH_FUNCLETS
8481
8482 void Compiler::impImportLeave(BasicBlock* block)
8483 {
8484 #ifdef DEBUG
8485     if (verbose)
8486     {
8487         printf("\nBefore import CEE_LEAVE:\n");
8488         fgDispBasicBlocks();
8489         fgDispHandlerTab();
8490     }
8491 #endif // DEBUG
8492
8493     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8494     unsigned    blkAddr         = block->bbCodeOffs;
8495     BasicBlock* leaveTarget     = block->bbJumpDest;
8496     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8497
8498     // LEAVE clears the stack, spill side effects, and set stack to 0
8499
8500     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8501     verCurrentState.esStackDepth = 0;
8502
8503     assert(block->bbJumpKind == BBJ_LEAVE);
8504     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8505
8506     BasicBlock* step         = DUMMY_INIT(NULL);
8507     unsigned    encFinallies = 0; // Number of enclosing finallies.
8508     GenTreePtr  endCatches   = NULL;
8509     GenTreePtr  endLFin      = NULL; // The statement tree to indicate the end of locally-invoked finally.
8510
8511     unsigned  XTnum;
8512     EHblkDsc* HBtab;
8513
8514     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8515     {
8516         // Grab the handler offsets
8517
8518         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8519         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8520         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8521         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8522
8523         /* Is this a catch-handler we are CEE_LEAVEing out of?
8524          * If so, we need to call CORINFO_HELP_ENDCATCH.
8525          */
8526
8527         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8528         {
8529             // Can't CEE_LEAVE out of a finally/fault handler
8530             if (HBtab->HasFinallyOrFaultHandler())
8531                 BADCODE("leave out of fault/finally block");
8532
8533             // Create the call to CORINFO_HELP_ENDCATCH
8534             GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8535
8536             // Make a list of all the currently pending endCatches
8537             if (endCatches)
8538                 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8539             else
8540                 endCatches = endCatch;
8541
8542 #ifdef DEBUG
8543             if (verbose)
8544             {
8545                 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8546                        "CORINFO_HELP_ENDCATCH\n",
8547                        block->bbNum, XTnum);
8548             }
8549 #endif
8550         }
8551         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8552                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8553         {
8554             /* This is a finally-protected try we are jumping out of */
8555
8556             /* If there are any pending endCatches, and we have already
8557                jumped out of a finally-protected try, then the endCatches
8558                have to be put in a block in an outer try for async
8559                exceptions to work correctly.
8560                Else, just use append to the original block */
8561
8562             BasicBlock* callBlock;
8563
8564             assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8565
8566             if (encFinallies == 0)
8567             {
8568                 assert(step == DUMMY_INIT(NULL));
8569                 callBlock             = block;
8570                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8571
8572                 if (endCatches)
8573                     impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8574
8575 #ifdef DEBUG
8576                 if (verbose)
8577                 {
8578                     printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8579                            "block %s\n",
8580                            callBlock->dspToString());
8581                 }
8582 #endif
8583             }
8584             else
8585             {
8586                 assert(step != DUMMY_INIT(NULL));
8587
8588                 /* Calling the finally block */
8589                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8590                 assert(step->bbJumpKind == BBJ_ALWAYS);
8591                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8592                                               // finally in the chain)
8593                 step->bbJumpDest->bbRefs++;
8594
8595                 /* The new block will inherit this block's weight */
8596                 callBlock->setBBWeight(block->bbWeight);
8597                 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8598
8599 #ifdef DEBUG
8600                 if (verbose)
8601                 {
8602                     printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8603                            callBlock->dspToString());
8604                 }
8605 #endif
8606
8607                 GenTreePtr lastStmt;
8608
8609                 if (endCatches)
8610                 {
8611                     lastStmt         = gtNewStmt(endCatches);
8612                     endLFin->gtNext  = lastStmt;
8613                     lastStmt->gtPrev = endLFin;
8614                 }
8615                 else
8616                 {
8617                     lastStmt = endLFin;
8618                 }
8619
8620                 // note that this sets BBF_IMPORTED on the block
8621                 impEndTreeList(callBlock, endLFin, lastStmt);
8622             }
8623
8624             step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8625             /* The new block will inherit this block's weight */
8626             step->setBBWeight(block->bbWeight);
8627             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8628
8629 #ifdef DEBUG
8630             if (verbose)
8631             {
8632                 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8633                        step->dspToString());
8634             }
8635 #endif
8636
8637             unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8638             assert(finallyNesting <= compHndBBtabCount);
8639
8640             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8641             endLFin               = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8642             endLFin               = gtNewStmt(endLFin);
8643             endCatches            = NULL;
8644
8645             encFinallies++;
8646
8647             invalidatePreds = true;
8648         }
8649     }
8650
8651     /* Append any remaining endCatches, if any */
8652
8653     assert(!encFinallies == !endLFin);
8654
8655     if (encFinallies == 0)
8656     {
8657         assert(step == DUMMY_INIT(NULL));
8658         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8659
8660         if (endCatches)
8661             impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8662
8663 #ifdef DEBUG
8664         if (verbose)
8665         {
8666             printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8667                    "block %s\n",
8668                    block->dspToString());
8669         }
8670 #endif
8671     }
8672     else
8673     {
8674         // If leaveTarget is the start of another try block, we want to make sure that
8675         // we do not insert finalStep into that try block. Hence, we find the enclosing
8676         // try block.
8677         unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8678
8679         // Insert a new BB either in the try region indicated by tryIndex or
8680         // the handler region indicated by leaveTarget->bbHndIndex,
8681         // depending on which is the inner region.
8682         BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8683         finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8684         step->bbJumpDest = finalStep;
8685
8686         /* The new block will inherit this block's weight */
8687         finalStep->setBBWeight(block->bbWeight);
8688         finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8689
8690 #ifdef DEBUG
8691         if (verbose)
8692         {
8693             printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
8694                    finalStep->dspToString());
8695         }
8696 #endif
8697
8698         GenTreePtr lastStmt;
8699
8700         if (endCatches)
8701         {
8702             lastStmt         = gtNewStmt(endCatches);
8703             endLFin->gtNext  = lastStmt;
8704             lastStmt->gtPrev = endLFin;
8705         }
8706         else
8707         {
8708             lastStmt = endLFin;
8709         }
8710
8711         impEndTreeList(finalStep, endLFin, lastStmt);
8712
8713         finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8714
8715         // Queue up the jump target for importing
8716
8717         impImportBlockPending(leaveTarget);
8718
8719         invalidatePreds = true;
8720     }
8721
8722     if (invalidatePreds && fgComputePredsDone)
8723     {
8724         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8725         fgRemovePreds();
8726     }
8727
8728 #ifdef DEBUG
8729     fgVerifyHandlerTab();
8730
8731     if (verbose)
8732     {
8733         printf("\nAfter import CEE_LEAVE:\n");
8734         fgDispBasicBlocks();
8735         fgDispHandlerTab();
8736     }
8737 #endif // DEBUG
8738 }
8739
8740 #else // FEATURE_EH_FUNCLETS
8741
8742 void Compiler::impImportLeave(BasicBlock* block)
8743 {
8744 #ifdef DEBUG
8745     if (verbose)
8746     {
8747         printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8748         fgDispBasicBlocks();
8749         fgDispHandlerTab();
8750     }
8751 #endif // DEBUG
8752
8753     bool        invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8754     unsigned    blkAddr         = block->bbCodeOffs;
8755     BasicBlock* leaveTarget     = block->bbJumpDest;
8756     unsigned    jmpAddr         = leaveTarget->bbCodeOffs;
8757
8758     // LEAVE clears the stack, spill side effects, and set stack to 0
8759
8760     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8761     verCurrentState.esStackDepth = 0;
8762
8763     assert(block->bbJumpKind == BBJ_LEAVE);
8764     assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8765
8766     BasicBlock* step = nullptr;
8767
8768     enum StepType
8769     {
8770         // No step type; step == NULL.
8771         ST_None,
8772
8773         // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8774         // That is, is step->bbJumpDest where a finally will return to?
8775         ST_FinallyReturn,
8776
8777         // The step block is a catch return.
8778         ST_Catch,
8779
8780         // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8781         ST_Try
8782     };
8783     StepType stepType = ST_None;
8784
8785     unsigned  XTnum;
8786     EHblkDsc* HBtab;
8787
8788     for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8789     {
8790         // Grab the handler offsets
8791
8792         IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8793         IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8794         IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8795         IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8796
8797         /* Is this a catch-handler we are CEE_LEAVEing out of?
8798          */
8799
8800         if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8801         {
8802             // Can't CEE_LEAVE out of a finally/fault handler
8803             if (HBtab->HasFinallyOrFaultHandler())
8804             {
8805                 BADCODE("leave out of fault/finally block");
8806             }
8807
8808             /* We are jumping out of a catch */
8809
8810             if (step == nullptr)
8811             {
8812                 step             = block;
8813                 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8814                 stepType         = ST_Catch;
8815
8816 #ifdef DEBUG
8817                 if (verbose)
8818                 {
8819                     printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8820                            "block\n",
8821                            XTnum, step->bbNum);
8822                 }
8823 #endif
8824             }
8825             else
8826             {
8827                 BasicBlock* exitBlock;
8828
8829                 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8830                  * scope */
8831                 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8832
8833                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8834                 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8835                                               // exit) returns to this block
8836                 step->bbJumpDest->bbRefs++;
8837
8838 #if defined(_TARGET_ARM_)
8839                 if (stepType == ST_FinallyReturn)
8840                 {
8841                     assert(step->bbJumpKind == BBJ_ALWAYS);
8842                     // Mark the target of a finally return
8843                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8844                 }
8845 #endif // defined(_TARGET_ARM_)
8846
8847                 /* The new block will inherit this block's weight */
8848                 exitBlock->setBBWeight(block->bbWeight);
8849                 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8850
8851                 /* This exit block is the new step */
8852                 step     = exitBlock;
8853                 stepType = ST_Catch;
8854
8855                 invalidatePreds = true;
8856
8857 #ifdef DEBUG
8858                 if (verbose)
8859                 {
8860                     printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8861                            exitBlock->bbNum);
8862                 }
8863 #endif
8864             }
8865         }
8866         else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8867                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8868         {
8869             /* We are jumping out of a finally-protected try */
8870
8871             BasicBlock* callBlock;
8872
8873             if (step == nullptr)
8874             {
8875 #if FEATURE_EH_CALLFINALLY_THUNKS
8876
8877                 // Put the call to the finally in the enclosing region.
8878                 unsigned callFinallyTryIndex =
8879                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8880                 unsigned callFinallyHndIndex =
8881                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8882                 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8883
8884                 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8885                 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8886                 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8887                 // next block, and flow optimizations will remove it.
8888                 block->bbJumpKind = BBJ_ALWAYS;
8889                 block->bbJumpDest = callBlock;
8890                 block->bbJumpDest->bbRefs++;
8891
8892                 /* The new block will inherit this block's weight */
8893                 callBlock->setBBWeight(block->bbWeight);
8894                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8895
8896 #ifdef DEBUG
8897                 if (verbose)
8898                 {
8899                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8900                            "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8901                            XTnum, block->bbNum, callBlock->bbNum);
8902                 }
8903 #endif
8904
8905 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8906
8907                 callBlock             = block;
8908                 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8909
8910 #ifdef DEBUG
8911                 if (verbose)
8912                 {
8913                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8914                            "BBJ_CALLFINALLY block\n",
8915                            XTnum, callBlock->bbNum);
8916                 }
8917 #endif
8918
8919 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8920             }
8921             else
8922             {
8923                 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8924                 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8925                 // a 'finally'), or the step block is the return from a catch.
8926                 //
8927                 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8928                 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8929                 // automatically re-raise the exception, using the return address of the catch (that is, the target
8930                 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8931                 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8932                 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8933                 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8934                 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8935                 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8936                 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8937                 // stack walks.)
8938
8939                 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8940
8941 #if FEATURE_EH_CALLFINALLY_THUNKS
8942                 if (step->bbJumpKind == BBJ_EHCATCHRET)
8943                 {
8944                     // Need to create another step block in the 'try' region that will actually branch to the
8945                     // call-to-finally thunk.
8946                     BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8947                     step->bbJumpDest  = step2;
8948                     step->bbJumpDest->bbRefs++;
8949                     step2->setBBWeight(block->bbWeight);
8950                     step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8951
8952 #ifdef DEBUG
8953                     if (verbose)
8954                     {
8955                         printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8956                                "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8957                                XTnum, step->bbNum, step2->bbNum);
8958                     }
8959 #endif
8960
8961                     step = step2;
8962                     assert(stepType == ST_Catch); // Leave it as catch type for now.
8963                 }
8964 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8965
8966 #if FEATURE_EH_CALLFINALLY_THUNKS
8967                 unsigned callFinallyTryIndex =
8968                     (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8969                 unsigned callFinallyHndIndex =
8970                     (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8971 #else  // !FEATURE_EH_CALLFINALLY_THUNKS
8972                 unsigned callFinallyTryIndex = XTnum + 1;
8973                 unsigned callFinallyHndIndex = 0; // don't care
8974 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8975
8976                 callBlock        = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8977                 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8978                                               // finally in the chain)
8979                 step->bbJumpDest->bbRefs++;
8980
8981 #if defined(_TARGET_ARM_)
8982                 if (stepType == ST_FinallyReturn)
8983                 {
8984                     assert(step->bbJumpKind == BBJ_ALWAYS);
8985                     // Mark the target of a finally return
8986                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8987                 }
8988 #endif // defined(_TARGET_ARM_)
8989
8990                 /* The new block will inherit this block's weight */
8991                 callBlock->setBBWeight(block->bbWeight);
8992                 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8993
8994 #ifdef DEBUG
8995                 if (verbose)
8996                 {
8997                     printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8998                            "BB%02u\n",
8999                            XTnum, callBlock->bbNum);
9000                 }
9001 #endif
9002             }
9003
9004             step     = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9005             stepType = ST_FinallyReturn;
9006
9007             /* The new block will inherit this block's weight */
9008             step->setBBWeight(block->bbWeight);
9009             step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9010
9011 #ifdef DEBUG
9012             if (verbose)
9013             {
9014                 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9015                        "block BB%02u\n",
9016                        XTnum, step->bbNum);
9017             }
9018 #endif
9019
9020             callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9021
9022             invalidatePreds = true;
9023         }
9024         else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9025                  !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9026         {
9027             // We are jumping out of a catch-protected try.
9028             //
9029             // If we are returning from a call to a finally, then we must have a step block within a try
9030             // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9031             // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9032             // and invoke the appropriate catch.
9033             //
9034             // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9035             // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9036             // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9037             // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9038             // address of the catch return as the new exception address. That is, the re-raised exception appears to
9039             // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9040             // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9041             // For example:
9042             //
9043             // try {
9044             //    try {
9045             //       // something here raises ThreadAbortException
9046             //       LEAVE LABEL_1; // no need to stop at LABEL_2
9047             //    } catch (Exception) {
9048             //       // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9049             //       // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9050             //       // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9051             //       // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9052             //       // need to do this transformation if the current EH block is a try/catch that catches
9053             //       // ThreadAbortException (or one of its parents), however we might not be able to find that
9054             //       // information, so currently we do it for all catch types.
9055             //       LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9056             //    }
9057             //    LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9058             // } catch (ThreadAbortException) {
9059             // }
9060             // LABEL_1:
9061             //
9062             // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9063             // compiler.
9064
9065             if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9066             {
9067                 BasicBlock* catchStep;
9068
9069                 assert(step);
9070
9071                 if (stepType == ST_FinallyReturn)
9072                 {
9073                     assert(step->bbJumpKind == BBJ_ALWAYS);
9074                 }
9075                 else
9076                 {
9077                     assert(stepType == ST_Catch);
9078                     assert(step->bbJumpKind == BBJ_EHCATCHRET);
9079                 }
9080
9081                 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9082                 catchStep        = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9083                 step->bbJumpDest = catchStep;
9084                 step->bbJumpDest->bbRefs++;
9085
9086 #if defined(_TARGET_ARM_)
9087                 if (stepType == ST_FinallyReturn)
9088                 {
9089                     // Mark the target of a finally return
9090                     step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9091                 }
9092 #endif // defined(_TARGET_ARM_)
9093
9094                 /* The new block will inherit this block's weight */
9095                 catchStep->setBBWeight(block->bbWeight);
9096                 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9097
9098 #ifdef DEBUG
9099                 if (verbose)
9100                 {
9101                     if (stepType == ST_FinallyReturn)
9102                     {
9103                         printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9104                                "BBJ_ALWAYS block BB%02u\n",
9105                                XTnum, catchStep->bbNum);
9106                     }
9107                     else
9108                     {
9109                         assert(stepType == ST_Catch);
9110                         printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9111                                "BBJ_ALWAYS block BB%02u\n",
9112                                XTnum, catchStep->bbNum);
9113                     }
9114                 }
9115 #endif // DEBUG
9116
9117                 /* This block is the new step */
9118                 step     = catchStep;
9119                 stepType = ST_Try;
9120
9121                 invalidatePreds = true;
9122             }
9123         }
9124     }
9125
9126     if (step == nullptr)
9127     {
9128         block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9129
9130 #ifdef DEBUG
9131         if (verbose)
9132         {
9133             printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9134                    "block BB%02u to BBJ_ALWAYS\n",
9135                    block->bbNum);
9136         }
9137 #endif
9138     }
9139     else
9140     {
9141         step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9142
9143 #if defined(_TARGET_ARM_)
9144         if (stepType == ST_FinallyReturn)
9145         {
9146             assert(step->bbJumpKind == BBJ_ALWAYS);
9147             // Mark the target of a finally return
9148             step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9149         }
9150 #endif // defined(_TARGET_ARM_)
9151
9152 #ifdef DEBUG
9153         if (verbose)
9154         {
9155             printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9156         }
9157 #endif
9158
9159         // Queue up the jump target for importing
9160
9161         impImportBlockPending(leaveTarget);
9162     }
9163
9164     if (invalidatePreds && fgComputePredsDone)
9165     {
9166         JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9167         fgRemovePreds();
9168     }
9169
9170 #ifdef DEBUG
9171     fgVerifyHandlerTab();
9172
9173     if (verbose)
9174     {
9175         printf("\nAfter import CEE_LEAVE:\n");
9176         fgDispBasicBlocks();
9177         fgDispHandlerTab();
9178     }
9179 #endif // DEBUG
9180 }
9181
9182 #endif // FEATURE_EH_FUNCLETS
9183
9184 /*****************************************************************************/
9185 // This is called when reimporting a leave block. It resets the JumpKind,
9186 // JumpDest, and bbNext to the original values
9187
9188 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9189 {
9190 #if FEATURE_EH_FUNCLETS
9191     // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9192     // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY.   Say for some reason we reimport B0,
9193     // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9194     // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9195     // only predecessor are also considered orphans and attempted to be deleted.
9196     //
9197     //  try  {
9198     //     ....
9199     //     try
9200     //     {
9201     //         ....
9202     //         leave OUTSIDE;  // B0 is the block containing this leave, following this would be B1
9203     //     } finally { }
9204     //  } finally { }
9205     //  OUTSIDE:
9206     //
9207     // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9208     // where a finally would branch to (and such block is marked as finally target).  Block B1 branches to step block.
9209     // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed.  To
9210     // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9211     // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9212     // will be treated as pair and handled correctly.
9213     if (block->bbJumpKind == BBJ_CALLFINALLY)
9214     {
9215         BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9216         dupBlock->bbFlags    = block->bbFlags;
9217         dupBlock->bbJumpDest = block->bbJumpDest;
9218         dupBlock->copyEHRegion(block);
9219         dupBlock->bbCatchTyp = block->bbCatchTyp;
9220
9221         // Mark this block as
9222         //  a) not referenced by any other block to make sure that it gets deleted
9223         //  b) weight zero
9224         //  c) prevent from being imported
9225         //  d) as internal
9226         //  e) as rarely run
9227         dupBlock->bbRefs   = 0;
9228         dupBlock->bbWeight = 0;
9229         dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9230
9231         // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9232         // will be next to each other.
9233         fgInsertBBafter(block, dupBlock);
9234
9235 #ifdef DEBUG
9236         if (verbose)
9237         {
9238             printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9239         }
9240 #endif
9241     }
9242 #endif // FEATURE_EH_FUNCLETS
9243
9244     block->bbJumpKind = BBJ_LEAVE;
9245     fgInitBBLookup();
9246     block->bbJumpDest = fgLookupBB(jmpAddr);
9247
9248     // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9249     // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9250     // reason we don't want to remove the block at this point is that if we call
9251     // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9252     // added and the linked list length will be different than fgBBcount.
9253 }
9254
9255 /*****************************************************************************/
9256 // Get the first non-prefix opcode. Used for verification of valid combinations
9257 // of prefixes and actual opcodes.
9258
9259 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9260 {
9261     while (codeAddr < codeEndp)
9262     {
9263         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9264         codeAddr += sizeof(__int8);
9265
9266         if (opcode == CEE_PREFIX1)
9267         {
9268             if (codeAddr >= codeEndp)
9269             {
9270                 break;
9271             }
9272             opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9273             codeAddr += sizeof(__int8);
9274         }
9275
9276         switch (opcode)
9277         {
9278             case CEE_UNALIGNED:
9279             case CEE_VOLATILE:
9280             case CEE_TAILCALL:
9281             case CEE_CONSTRAINED:
9282             case CEE_READONLY:
9283                 break;
9284             default:
9285                 return opcode;
9286         }
9287
9288         codeAddr += opcodeSizes[opcode];
9289     }
9290
9291     return CEE_ILLEGAL;
9292 }
9293
9294 /*****************************************************************************/
9295 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9296
9297 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9298 {
9299     OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9300
9301     if (!(
9302             // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9303             ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9304             (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9305             (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9306             // volatile. prefix is allowed with the ldsfld and stsfld
9307             (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9308     {
9309         BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9310     }
9311 }
9312
9313 /*****************************************************************************/
9314
9315 #ifdef DEBUG
9316
9317 #undef RETURN // undef contracts RETURN macro
9318
9319 enum controlFlow_t
9320 {
9321     NEXT,
9322     CALL,
9323     RETURN,
9324     THROW,
9325     BRANCH,
9326     COND_BRANCH,
9327     BREAK,
9328     PHI,
9329     META,
9330 };
9331
9332 const static controlFlow_t controlFlow[] = {
9333 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9334 #include "opcode.def"
9335 #undef OPDEF
9336 };
9337
9338 #endif // DEBUG
9339
9340 /*****************************************************************************
9341  *  Determine the result type of an arithemetic operation
9342  *  On 64-bit inserts upcasts when native int is mixed with int32
9343  */
9344 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9345 {
9346     var_types  type = TYP_UNDEF;
9347     GenTreePtr op1 = *pOp1, op2 = *pOp2;
9348
9349     // Arithemetic operations are generally only allowed with
9350     // primitive types, but certain operations are allowed
9351     // with byrefs
9352
9353     if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9354     {
9355         if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9356         {
9357             // byref1-byref2 => gives a native int
9358             type = TYP_I_IMPL;
9359         }
9360         else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9361         {
9362             // [native] int - byref => gives a native int
9363
9364             //
9365             // The reason is that it is possible, in managed C++,
9366             // to have a tree like this:
9367             //
9368             //              -
9369             //             / \
9370             //            /   \
9371             //           /     \
9372             //          /       \
9373             // const(h) int     addr byref
9374             //
9375             // <BUGNUM> VSW 318822 </BUGNUM>
9376             //
9377             // So here we decide to make the resulting type to be a native int.
9378             CLANG_FORMAT_COMMENT_ANCHOR;
9379
9380 #ifdef _TARGET_64BIT_
9381             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9382             {
9383                 // insert an explicit upcast
9384                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9385             }
9386 #endif // _TARGET_64BIT_
9387
9388             type = TYP_I_IMPL;
9389         }
9390         else
9391         {
9392             // byref - [native] int => gives a byref
9393             assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9394
9395 #ifdef _TARGET_64BIT_
9396             if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9397             {
9398                 // insert an explicit upcast
9399                 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9400             }
9401 #endif // _TARGET_64BIT_
9402
9403             type = TYP_BYREF;
9404         }
9405     }
9406     else if ((oper == GT_ADD) &&
9407              (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9408     {
9409         // byref + [native] int => gives a byref
9410         // (or)
9411         // [native] int + byref => gives a byref
9412
9413         // only one can be a byref : byref op byref not allowed
9414         assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9415         assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9416
9417 #ifdef _TARGET_64BIT_
9418         if (genActualType(op2->TypeGet()) == TYP_BYREF)
9419         {
9420             if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9421             {
9422                 // insert an explicit upcast
9423                 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9424             }
9425         }
9426         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9427         {
9428             // insert an explicit upcast
9429             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9430         }
9431 #endif // _TARGET_64BIT_
9432
9433         type = TYP_BYREF;
9434     }
9435 #ifdef _TARGET_64BIT_
9436     else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9437     {
9438         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9439
9440         // int + long => gives long
9441         // long + int => gives long
9442         // we get this because in the IL the long isn't Int64, it's just IntPtr
9443
9444         if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9445         {
9446             // insert an explicit upcast
9447             op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9448         }
9449         else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9450         {
9451             // insert an explicit upcast
9452             op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9453         }
9454
9455         type = TYP_I_IMPL;
9456     }
9457 #else  // 32-bit TARGET
9458     else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9459     {
9460         assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9461
9462         // int + long => gives long
9463         // long + int => gives long
9464
9465         type = TYP_LONG;
9466     }
9467 #endif // _TARGET_64BIT_
9468     else
9469     {
9470         // int + int => gives an int
9471         assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9472
9473         assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9474                varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9475
9476         type = genActualType(op1->gtType);
9477
9478 #if FEATURE_X87_DOUBLES
9479
9480         // For x87, since we only have 1 size of registers, prefer double
9481         // For everybody else, be more precise
9482         if (type == TYP_FLOAT)
9483             type = TYP_DOUBLE;
9484
9485 #else // !FEATURE_X87_DOUBLES
9486
9487         // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9488         // Otherwise, turn floats into doubles
9489         if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9490         {
9491             assert(genActualType(op2->gtType) == TYP_DOUBLE);
9492             type = TYP_DOUBLE;
9493         }
9494
9495 #endif // FEATURE_X87_DOUBLES
9496     }
9497
9498 #if FEATURE_X87_DOUBLES
9499     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9500 #else  // FEATURE_X87_DOUBLES
9501     assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9502 #endif // FEATURE_X87_DOUBLES
9503
9504     return type;
9505 }
9506
9507 //------------------------------------------------------------------------
9508 // impCastClassOrIsInstToTree: build and import castclass/isinst
9509 //
9510 // Arguments:
9511 //   op1 - value to cast
9512 //   op2 - type handle for type to cast to
9513 //   pResolvedToken - resolved token from the cast operation
9514 //   isCastClass - true if this is castclass, false means isinst
9515 //
9516 // Return Value:
9517 //   Tree representing the cast
9518 //
9519 // Notes:
9520 //   May expand into a series of runtime checks or a helper call.
9521
9522 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr              op1,
9523                                                 GenTreePtr              op2,
9524                                                 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9525                                                 bool                    isCastClass)
9526 {
9527     assert(op1->TypeGet() == TYP_REF);
9528
9529     // Optimistically assume the jit should expand this as an inline test
9530     bool shouldExpandInline = true;
9531
9532     // Profitability check.
9533     //
9534     // Don't bother with inline expansion when jit is trying to
9535     // generate code quickly, or the cast is in code that won't run very
9536     // often, or the method already is pretty big.
9537     if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9538     {
9539         // not worth the code expansion if jitting fast or in a rarely run block
9540         shouldExpandInline = false;
9541     }
9542     else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9543     {
9544         // not worth creating an untracked local variable
9545         shouldExpandInline = false;
9546     }
9547
9548     // Pessimistically assume the jit cannot expand this as an inline test
9549     bool                  canExpandInline = false;
9550     const CorInfoHelpFunc helper          = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9551
9552     // Legality check.
9553     //
9554     // Not all classclass/isinst operations can be inline expanded.
9555     // Check legality only if an inline expansion is desirable.
9556     if (shouldExpandInline)
9557     {
9558         if (isCastClass)
9559         {
9560             // Jit can only inline expand the normal CHKCASTCLASS helper.
9561             canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9562         }
9563         else
9564         {
9565             if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9566             {
9567                 // Check the class attributes.
9568                 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9569
9570                 // If the class is final and is not marshal byref or
9571                 // contextful, the jit can expand the IsInst check inline.
9572                 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9573                 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9574             }
9575         }
9576     }
9577
9578     const bool expandInline = canExpandInline && shouldExpandInline;
9579
9580     if (!expandInline)
9581     {
9582         JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9583                 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9584
9585         // If we CSE this class handle we prevent assertionProp from making SubType assertions
9586         // so instead we force the CSE logic to not consider CSE-ing this class handle.
9587         //
9588         op2->gtFlags |= GTF_DONT_CSE;
9589
9590         return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9591     }
9592
9593     JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9594
9595     impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9596
9597     GenTreePtr temp;
9598     GenTreePtr condMT;
9599     //
9600     // expand the methodtable match:
9601     //
9602     //  condMT ==>   GT_NE
9603     //               /    \
9604     //           GT_IND   op2 (typically CNS_INT)
9605     //              |
9606     //           op1Copy
9607     //
9608
9609     // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9610     //
9611     op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9612     //
9613     // op1 is now known to be a non-complex tree
9614     // thus we can use gtClone(op1) from now on
9615     //
9616
9617     GenTreePtr op2Var = op2;
9618     if (isCastClass)
9619     {
9620         op2Var                                                  = fgInsertCommaFormTemp(&op2);
9621         lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9622     }
9623     temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9624     temp->gtFlags |= GTF_EXCEPT;
9625     condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9626
9627     GenTreePtr condNull;
9628     //
9629     // expand the null check:
9630     //
9631     //  condNull ==>   GT_EQ
9632     //                 /    \
9633     //             op1Copy CNS_INT
9634     //                      null
9635     //
9636     condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9637
9638     //
9639     // expand the true and false trees for the condMT
9640     //
9641     GenTreePtr condFalse = gtClone(op1);
9642     GenTreePtr condTrue;
9643     if (isCastClass)
9644     {
9645         //
9646         // use the special helper that skips the cases checked by our inlined cast
9647         //
9648         const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9649
9650         condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
9651     }
9652     else
9653     {
9654         condTrue = gtNewIconNode(0, TYP_REF);
9655     }
9656
9657 #define USE_QMARK_TREES
9658
9659 #ifdef USE_QMARK_TREES
9660     GenTreePtr qmarkMT;
9661     //
9662     // Generate first QMARK - COLON tree
9663     //
9664     //  qmarkMT ==>   GT_QMARK
9665     //                 /     \
9666     //            condMT   GT_COLON
9667     //                      /     \
9668     //                condFalse  condTrue
9669     //
9670     temp    = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9671     qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9672     condMT->gtFlags |= GTF_RELOP_QMARK;
9673
9674     GenTreePtr qmarkNull;
9675     //
9676     // Generate second QMARK - COLON tree
9677     //
9678     //  qmarkNull ==>  GT_QMARK
9679     //                 /     \
9680     //           condNull  GT_COLON
9681     //                      /     \
9682     //                qmarkMT   op1Copy
9683     //
9684     temp      = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9685     qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9686     qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9687     condNull->gtFlags |= GTF_RELOP_QMARK;
9688
9689     // Make QMark node a top level node by spilling it.
9690     unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9691     impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9692
9693     // TODO: Is it possible op1 has a better type?
9694     lvaSetClass(tmp, pResolvedToken->hClass);
9695     return gtNewLclvNode(tmp, TYP_REF);
9696 #endif
9697 }
9698
9699 #ifndef DEBUG
9700 #define assertImp(cond) ((void)0)
9701 #else
9702 #define assertImp(cond)                                                                                                \
9703     do                                                                                                                 \
9704     {                                                                                                                  \
9705         if (!(cond))                                                                                                   \
9706         {                                                                                                              \
9707             const int cchAssertImpBuf = 600;                                                                           \
9708             char*     assertImpBuf    = (char*)alloca(cchAssertImpBuf);                                                \
9709             _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1,                                            \
9710                         "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond,         \
9711                         impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL",                      \
9712                         op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth);                     \
9713             assertAbort(assertImpBuf, __FILE__, __LINE__);                                                             \
9714         }                                                                                                              \
9715     } while (0)
9716 #endif // DEBUG
9717
9718 #ifdef _PREFAST_
9719 #pragma warning(push)
9720 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9721 #endif
9722 /*****************************************************************************
9723  *  Import the instr for the given basic block
9724  */
9725 void Compiler::impImportBlockCode(BasicBlock* block)
9726 {
9727 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9728
9729 #ifdef DEBUG
9730
9731     if (verbose)
9732     {
9733         printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9734     }
9735 #endif
9736
9737     unsigned  nxtStmtIndex = impInitBlockLineInfo();
9738     IL_OFFSET nxtStmtOffs;
9739
9740     GenTreePtr                   arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9741     CorInfoHelpFunc              helper;
9742     CorInfoIsAccessAllowedResult accessAllowedResult;
9743     CORINFO_HELPER_DESC          calloutHelper;
9744     const BYTE*                  lastLoadToken = nullptr;
9745
9746     // reject cyclic constraints
9747     if (tiVerificationNeeded)
9748     {
9749         Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9750         Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9751     }
9752
9753     /* Get the tree list started */
9754
9755     impBeginTreeList();
9756
9757     /* Walk the opcodes that comprise the basic block */
9758
9759     const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9760     const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9761
9762     IL_OFFSET opcodeOffs    = block->bbCodeOffs;
9763     IL_OFFSET lastSpillOffs = opcodeOffs;
9764
9765     signed jmpDist;
9766
9767     /* remember the start of the delegate creation sequence (used for verification) */
9768     const BYTE* delegateCreateStart = nullptr;
9769
9770     int  prefixFlags = 0;
9771     bool explicitTailCall, constraintCall, readonlyCall;
9772
9773     typeInfo tiRetVal;
9774
9775     unsigned numArgs = info.compArgsCount;
9776
9777     /* Now process all the opcodes in the block */
9778
9779     var_types callTyp    = TYP_COUNT;
9780     OPCODE    prevOpcode = CEE_ILLEGAL;
9781
9782     if (block->bbCatchTyp)
9783     {
9784         if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9785         {
9786             impCurStmtOffsSet(block->bbCodeOffs);
9787         }
9788
9789         // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9790         // to a temp. This is a trade off for code simplicity
9791         impSpillSpecialSideEff();
9792     }
9793
9794     while (codeAddr < codeEndp)
9795     {
9796         bool                   usingReadyToRunHelper = false;
9797         CORINFO_RESOLVED_TOKEN resolvedToken;
9798         CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9799         CORINFO_CALL_INFO      callInfo;
9800         CORINFO_FIELD_INFO     fieldInfo;
9801
9802         tiRetVal = typeInfo(); // Default type info
9803
9804         //---------------------------------------------------------------------
9805
9806         /* We need to restrict the max tree depth as many of the Compiler
9807            functions are recursive. We do this by spilling the stack */
9808
9809         if (verCurrentState.esStackDepth)
9810         {
9811             /* Has it been a while since we last saw a non-empty stack (which
9812                guarantees that the tree depth isnt accumulating. */
9813
9814             if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
9815             {
9816                 impSpillStackEnsure();
9817                 lastSpillOffs = opcodeOffs;
9818             }
9819         }
9820         else
9821         {
9822             lastSpillOffs   = opcodeOffs;
9823             impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9824         }
9825
9826         /* Compute the current instr offset */
9827
9828         opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9829
9830 #ifndef DEBUG
9831         if (opts.compDbgInfo)
9832 #endif
9833         {
9834             if (!compIsForInlining())
9835             {
9836                 nxtStmtOffs =
9837                     (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9838
9839                 /* Have we reached the next stmt boundary ? */
9840
9841                 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9842                 {
9843                     assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9844
9845                     if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9846                     {
9847                         /* We need to provide accurate IP-mapping at this point.
9848                            So spill anything on the stack so that it will form
9849                            gtStmts with the correct stmt offset noted */
9850
9851                         impSpillStackEnsure(true);
9852                     }
9853
9854                     // Has impCurStmtOffs been reported in any tree?
9855
9856                     if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9857                     {
9858                         GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9859                         impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9860
9861                         assert(impCurStmtOffs == BAD_IL_OFFSET);
9862                     }
9863
9864                     if (impCurStmtOffs == BAD_IL_OFFSET)
9865                     {
9866                         /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9867                            If opcodeOffs has gone past nxtStmtIndex, catch up */
9868
9869                         while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9870                                info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9871                         {
9872                             nxtStmtIndex++;
9873                         }
9874
9875                         /* Go to the new stmt */
9876
9877                         impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9878
9879                         /* Update the stmt boundary index */
9880
9881                         nxtStmtIndex++;
9882                         assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9883
9884                         /* Are there any more line# entries after this one? */
9885
9886                         if (nxtStmtIndex < info.compStmtOffsetsCount)
9887                         {
9888                             /* Remember where the next line# starts */
9889
9890                             nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9891                         }
9892                         else
9893                         {
9894                             /* No more line# entries */
9895
9896                             nxtStmtOffs = BAD_IL_OFFSET;
9897                         }
9898                     }
9899                 }
9900                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9901                          (verCurrentState.esStackDepth == 0))
9902                 {
9903                     /* At stack-empty locations, we have already added the tree to
9904                        the stmt list with the last offset. We just need to update
9905                        impCurStmtOffs
9906                      */
9907
9908                     impCurStmtOffsSet(opcodeOffs);
9909                 }
9910                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9911                          impOpcodeIsCallSiteBoundary(prevOpcode))
9912                 {
9913                     /* Make sure we have a type cached */
9914                     assert(callTyp != TYP_COUNT);
9915
9916                     if (callTyp == TYP_VOID)
9917                     {
9918                         impCurStmtOffsSet(opcodeOffs);
9919                     }
9920                     else if (opts.compDbgCode)
9921                     {
9922                         impSpillStackEnsure(true);
9923                         impCurStmtOffsSet(opcodeOffs);
9924                     }
9925                 }
9926                 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9927                 {
9928                     if (opts.compDbgCode)
9929                     {
9930                         impSpillStackEnsure(true);
9931                     }
9932
9933                     impCurStmtOffsSet(opcodeOffs);
9934                 }
9935
9936                 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9937                        jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9938             }
9939         }
9940
9941         CORINFO_CLASS_HANDLE clsHnd       = DUMMY_INIT(NULL);
9942         CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9943         CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9944
9945         var_types       lclTyp, ovflType = TYP_UNKNOWN;
9946         GenTreePtr      op1           = DUMMY_INIT(NULL);
9947         GenTreePtr      op2           = DUMMY_INIT(NULL);
9948         GenTreeArgList* args          = nullptr; // What good do these "DUMMY_INIT"s do?
9949         GenTreePtr      newObjThisPtr = DUMMY_INIT(NULL);
9950         bool            uns           = DUMMY_INIT(false);
9951         bool            isLocal       = false;
9952
9953         /* Get the next opcode and the size of its parameters */
9954
9955         OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9956         codeAddr += sizeof(__int8);
9957
9958 #ifdef DEBUG
9959         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9960         JITDUMP("\n    [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9961 #endif
9962
9963     DECODE_OPCODE:
9964
9965         // Return if any previous code has caused inline to fail.
9966         if (compDonotInline())
9967         {
9968             return;
9969         }
9970
9971         /* Get the size of additional parameters */
9972
9973         signed int sz = opcodeSizes[opcode];
9974
9975 #ifdef DEBUG
9976         clsHnd  = NO_CLASS_HANDLE;
9977         lclTyp  = TYP_COUNT;
9978         callTyp = TYP_COUNT;
9979
9980         impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9981         impCurOpcName = opcodeNames[opcode];
9982
9983         if (verbose && (opcode != CEE_PREFIX1))
9984         {
9985             printf("%s", impCurOpcName);
9986         }
9987
9988         /* Use assertImp() to display the opcode */
9989
9990         op1 = op2 = nullptr;
9991 #endif
9992
9993         /* See what kind of an opcode we have, then */
9994
9995         unsigned mflags   = 0;
9996         unsigned clsFlags = 0;
9997
9998         switch (opcode)
9999         {
10000             unsigned  lclNum;
10001             var_types type;
10002
10003             GenTreePtr op3;
10004             genTreeOps oper;
10005             unsigned   size;
10006
10007             int val;
10008
10009             CORINFO_SIG_INFO     sig;
10010             unsigned             flags;
10011             IL_OFFSET            jmpAddr;
10012             bool                 ovfl, unordered, callNode;
10013             bool                 ldstruct;
10014             CORINFO_CLASS_HANDLE tokenType;
10015
10016             union {
10017                 int     intVal;
10018                 float   fltVal;
10019                 __int64 lngVal;
10020                 double  dblVal;
10021             } cval;
10022
10023             case CEE_PREFIX1:
10024                 opcode     = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10025                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10026                 codeAddr += sizeof(__int8);
10027                 goto DECODE_OPCODE;
10028
10029             SPILL_APPEND:
10030
10031                 // We need to call impSpillLclRefs() for a struct type lclVar.
10032                 // This is done for non-block assignments in the handling of stloc.
10033                 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10034                     (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10035                 {
10036                     impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10037                 }
10038
10039                 /* Append 'op1' to the list of statements */
10040                 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10041                 goto DONE_APPEND;
10042
10043             APPEND:
10044
10045                 /* Append 'op1' to the list of statements */
10046
10047                 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10048                 goto DONE_APPEND;
10049
10050             DONE_APPEND:
10051
10052 #ifdef DEBUG
10053                 // Remember at which BC offset the tree was finished
10054                 impNoteLastILoffs();
10055 #endif
10056                 break;
10057
10058             case CEE_LDNULL:
10059                 impPushNullObjRefOnStack();
10060                 break;
10061
10062             case CEE_LDC_I4_M1:
10063             case CEE_LDC_I4_0:
10064             case CEE_LDC_I4_1:
10065             case CEE_LDC_I4_2:
10066             case CEE_LDC_I4_3:
10067             case CEE_LDC_I4_4:
10068             case CEE_LDC_I4_5:
10069             case CEE_LDC_I4_6:
10070             case CEE_LDC_I4_7:
10071             case CEE_LDC_I4_8:
10072                 cval.intVal = (opcode - CEE_LDC_I4_0);
10073                 assert(-1 <= cval.intVal && cval.intVal <= 8);
10074                 goto PUSH_I4CON;
10075
10076             case CEE_LDC_I4_S:
10077                 cval.intVal = getI1LittleEndian(codeAddr);
10078                 goto PUSH_I4CON;
10079             case CEE_LDC_I4:
10080                 cval.intVal = getI4LittleEndian(codeAddr);
10081                 goto PUSH_I4CON;
10082             PUSH_I4CON:
10083                 JITDUMP(" %d", cval.intVal);
10084                 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10085                 break;
10086
10087             case CEE_LDC_I8:
10088                 cval.lngVal = getI8LittleEndian(codeAddr);
10089                 JITDUMP(" 0x%016llx", cval.lngVal);
10090                 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10091                 break;
10092
10093             case CEE_LDC_R8:
10094                 cval.dblVal = getR8LittleEndian(codeAddr);
10095                 JITDUMP(" %#.17g", cval.dblVal);
10096                 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10097                 break;
10098
10099             case CEE_LDC_R4:
10100                 cval.dblVal = getR4LittleEndian(codeAddr);
10101                 JITDUMP(" %#.17g", cval.dblVal);
10102                 {
10103                     GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
10104 #if !FEATURE_X87_DOUBLES
10105                     // X87 stack doesn't differentiate between float/double
10106                     // so R4 is treated as R8, but everybody else does
10107                     cnsOp->gtType = TYP_FLOAT;
10108 #endif // FEATURE_X87_DOUBLES
10109                     impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10110                 }
10111                 break;
10112
10113             case CEE_LDSTR:
10114
10115                 if (compIsForInlining())
10116                 {
10117                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10118                     {
10119                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10120                         return;
10121                     }
10122                 }
10123
10124                 val = getU4LittleEndian(codeAddr);
10125                 JITDUMP(" %08X", val);
10126                 if (tiVerificationNeeded)
10127                 {
10128                     Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10129                     tiRetVal = typeInfo(TI_REF, impGetStringClass());
10130                 }
10131                 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10132
10133                 break;
10134
10135             case CEE_LDARG:
10136                 lclNum = getU2LittleEndian(codeAddr);
10137                 JITDUMP(" %u", lclNum);
10138                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10139                 break;
10140
10141             case CEE_LDARG_S:
10142                 lclNum = getU1LittleEndian(codeAddr);
10143                 JITDUMP(" %u", lclNum);
10144                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10145                 break;
10146
10147             case CEE_LDARG_0:
10148             case CEE_LDARG_1:
10149             case CEE_LDARG_2:
10150             case CEE_LDARG_3:
10151                 lclNum = (opcode - CEE_LDARG_0);
10152                 assert(lclNum >= 0 && lclNum < 4);
10153                 impLoadArg(lclNum, opcodeOffs + sz + 1);
10154                 break;
10155
10156             case CEE_LDLOC:
10157                 lclNum = getU2LittleEndian(codeAddr);
10158                 JITDUMP(" %u", lclNum);
10159                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10160                 break;
10161
10162             case CEE_LDLOC_S:
10163                 lclNum = getU1LittleEndian(codeAddr);
10164                 JITDUMP(" %u", lclNum);
10165                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10166                 break;
10167
10168             case CEE_LDLOC_0:
10169             case CEE_LDLOC_1:
10170             case CEE_LDLOC_2:
10171             case CEE_LDLOC_3:
10172                 lclNum = (opcode - CEE_LDLOC_0);
10173                 assert(lclNum >= 0 && lclNum < 4);
10174                 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10175                 break;
10176
10177             case CEE_STARG:
10178                 lclNum = getU2LittleEndian(codeAddr);
10179                 goto STARG;
10180
10181             case CEE_STARG_S:
10182                 lclNum = getU1LittleEndian(codeAddr);
10183             STARG:
10184                 JITDUMP(" %u", lclNum);
10185
10186                 if (tiVerificationNeeded)
10187                 {
10188                     Verify(lclNum < info.compILargsCount, "bad arg num");
10189                 }
10190
10191                 if (compIsForInlining())
10192                 {
10193                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10194                     noway_assert(op1->gtOper == GT_LCL_VAR);
10195                     lclNum = op1->AsLclVar()->gtLclNum;
10196
10197                     goto VAR_ST_VALID;
10198                 }
10199
10200                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10201                 assertImp(lclNum < numArgs);
10202
10203                 if (lclNum == info.compThisArg)
10204                 {
10205                     lclNum = lvaArg0Var;
10206                 }
10207
10208                 // We should have seen this arg write in the prescan
10209                 assert(lvaTable[lclNum].lvHasILStoreOp);
10210
10211                 if (tiVerificationNeeded)
10212                 {
10213                     typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10214                     Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10215                            "type mismatch");
10216
10217                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10218                     {
10219                         Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10220                     }
10221                 }
10222
10223                 goto VAR_ST;
10224
10225             case CEE_STLOC:
10226                 lclNum  = getU2LittleEndian(codeAddr);
10227                 isLocal = true;
10228                 JITDUMP(" %u", lclNum);
10229                 goto LOC_ST;
10230
10231             case CEE_STLOC_S:
10232                 lclNum  = getU1LittleEndian(codeAddr);
10233                 isLocal = true;
10234                 JITDUMP(" %u", lclNum);
10235                 goto LOC_ST;
10236
10237             case CEE_STLOC_0:
10238             case CEE_STLOC_1:
10239             case CEE_STLOC_2:
10240             case CEE_STLOC_3:
10241                 isLocal = true;
10242                 lclNum  = (opcode - CEE_STLOC_0);
10243                 assert(lclNum >= 0 && lclNum < 4);
10244
10245             LOC_ST:
10246                 if (tiVerificationNeeded)
10247                 {
10248                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10249                     Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10250                                             NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10251                            "type mismatch");
10252                 }
10253
10254                 if (compIsForInlining())
10255                 {
10256                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10257
10258                     /* Have we allocated a temp for this local? */
10259
10260                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10261
10262                     goto _PopValue;
10263                 }
10264
10265                 lclNum += numArgs;
10266
10267             VAR_ST:
10268
10269                 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10270                 {
10271                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10272                     BADCODE("Bad IL");
10273                 }
10274
10275             VAR_ST_VALID:
10276
10277                 /* if it is a struct assignment, make certain we don't overflow the buffer */
10278                 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10279
10280                 if (lvaTable[lclNum].lvNormalizeOnLoad())
10281                 {
10282                     lclTyp = lvaGetRealType(lclNum);
10283                 }
10284                 else
10285                 {
10286                     lclTyp = lvaGetActualType(lclNum);
10287                 }
10288
10289             _PopValue:
10290                 /* Pop the value being assigned */
10291
10292                 {
10293                     StackEntry se = impPopStack();
10294                     clsHnd        = se.seTypeInfo.GetClassHandle();
10295                     op1           = se.val;
10296                     tiRetVal      = se.seTypeInfo;
10297                 }
10298
10299 #ifdef FEATURE_SIMD
10300                 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10301                 {
10302                     assert(op1->TypeGet() == TYP_STRUCT);
10303                     op1->gtType = lclTyp;
10304                 }
10305 #endif // FEATURE_SIMD
10306
10307                 op1 = impImplicitIorI4Cast(op1, lclTyp);
10308
10309 #ifdef _TARGET_64BIT_
10310                 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10311                 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10312                 {
10313                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10314                     op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10315                 }
10316 #endif // _TARGET_64BIT_
10317
10318                 // We had better assign it a value of the correct type
10319                 assertImp(
10320                     genActualType(lclTyp) == genActualType(op1->gtType) ||
10321                     genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10322                     (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10323                     (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10324                     (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10325                     ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10326
10327                 /* If op1 is "&var" then its type is the transient "*" and it can
10328                    be used either as TYP_BYREF or TYP_I_IMPL */
10329
10330                 if (op1->IsVarAddr())
10331                 {
10332                     assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10333
10334                     /* When "&var" is created, we assume it is a byref. If it is
10335                        being assigned to a TYP_I_IMPL var, change the type to
10336                        prevent unnecessary GC info */
10337
10338                     if (genActualType(lclTyp) == TYP_I_IMPL)
10339                     {
10340                         op1->gtType = TYP_I_IMPL;
10341                     }
10342                 }
10343
10344                 // If this is a local and the local is a ref type, see
10345                 // if we can improve type information based on the
10346                 // value being assigned.
10347                 if (isLocal && (lclTyp == TYP_REF))
10348                 {
10349                     // We should have seen a stloc in our IL prescan.
10350                     assert(lvaTable[lclNum].lvHasILStoreOp);
10351
10352                     const bool isSingleILStoreLocal =
10353                         !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10354
10355                     // Conservative check that there is just one
10356                     // definition that reaches this store.
10357                     const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10358
10359                     if (isSingleILStoreLocal && hasSingleReachingDef)
10360                     {
10361                         lvaUpdateClass(lclNum, op1, clsHnd);
10362                     }
10363                 }
10364
10365                 /* Filter out simple assignments to itself */
10366
10367                 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10368                 {
10369                     if (opts.compDbgCode)
10370                     {
10371                         op1 = gtNewNothingNode();
10372                         goto SPILL_APPEND;
10373                     }
10374                     else
10375                     {
10376                         break;
10377                     }
10378                 }
10379
10380                 /* Create the assignment node */
10381
10382                 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10383
10384                 /* If the local is aliased, we need to spill calls and
10385                    indirections from the stack. */
10386
10387                 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10388                     verCurrentState.esStackDepth > 0)
10389                 {
10390                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10391                 }
10392
10393                 /* Spill any refs to the local from the stack */
10394
10395                 impSpillLclRefs(lclNum);
10396
10397 #if !FEATURE_X87_DOUBLES
10398                 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10399                 // We insert a cast to the dest 'op2' type
10400                 //
10401                 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10402                     varTypeIsFloating(op2->gtType))
10403                 {
10404                     op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10405                 }
10406 #endif // !FEATURE_X87_DOUBLES
10407
10408                 if (varTypeIsStruct(lclTyp))
10409                 {
10410                     op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10411                 }
10412                 else
10413                 {
10414                     // The code generator generates GC tracking information
10415                     // based on the RHS of the assignment.  Later the LHS (which is
10416                     // is a BYREF) gets used and the emitter checks that that variable
10417                     // is being tracked.  It is not (since the RHS was an int and did
10418                     // not need tracking).  To keep this assert happy, we change the RHS
10419                     if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10420                     {
10421                         op1->gtType = TYP_BYREF;
10422                     }
10423                     op1 = gtNewAssignNode(op2, op1);
10424                 }
10425
10426                 goto SPILL_APPEND;
10427
10428             case CEE_LDLOCA:
10429                 lclNum = getU2LittleEndian(codeAddr);
10430                 goto LDLOCA;
10431
10432             case CEE_LDLOCA_S:
10433                 lclNum = getU1LittleEndian(codeAddr);
10434             LDLOCA:
10435                 JITDUMP(" %u", lclNum);
10436                 if (tiVerificationNeeded)
10437                 {
10438                     Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10439                     Verify(info.compInitMem, "initLocals not set");
10440                 }
10441
10442                 if (compIsForInlining())
10443                 {
10444                     // Get the local type
10445                     lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10446
10447                     /* Have we allocated a temp for this local? */
10448
10449                     lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10450
10451                     op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10452
10453                     goto _PUSH_ADRVAR;
10454                 }
10455
10456                 lclNum += numArgs;
10457                 assertImp(lclNum < info.compLocalsCount);
10458                 goto ADRVAR;
10459
10460             case CEE_LDARGA:
10461                 lclNum = getU2LittleEndian(codeAddr);
10462                 goto LDARGA;
10463
10464             case CEE_LDARGA_S:
10465                 lclNum = getU1LittleEndian(codeAddr);
10466             LDARGA:
10467                 JITDUMP(" %u", lclNum);
10468                 Verify(lclNum < info.compILargsCount, "bad arg num");
10469
10470                 if (compIsForInlining())
10471                 {
10472                     // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10473                     // followed by a ldfld to load the field.
10474
10475                     op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10476                     if (op1->gtOper != GT_LCL_VAR)
10477                     {
10478                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10479                         return;
10480                     }
10481
10482                     assert(op1->gtOper == GT_LCL_VAR);
10483
10484                     goto _PUSH_ADRVAR;
10485                 }
10486
10487                 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10488                 assertImp(lclNum < numArgs);
10489
10490                 if (lclNum == info.compThisArg)
10491                 {
10492                     lclNum = lvaArg0Var;
10493                 }
10494
10495                 goto ADRVAR;
10496
10497             ADRVAR:
10498
10499                 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10500
10501             _PUSH_ADRVAR:
10502                 assert(op1->gtOper == GT_LCL_VAR);
10503
10504                 /* Note that this is supposed to create the transient type "*"
10505                    which may be used as a TYP_I_IMPL. However we catch places
10506                    where it is used as a TYP_I_IMPL and change the node if needed.
10507                    Thus we are pessimistic and may report byrefs in the GC info
10508                    where it was not absolutely needed, but it is safer this way.
10509                  */
10510                 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10511
10512                 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10513                 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10514
10515                 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10516                 if (tiVerificationNeeded)
10517                 {
10518                     // Don't allow taking address of uninit this ptr.
10519                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10520                     {
10521                         Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10522                     }
10523
10524                     if (!tiRetVal.IsByRef())
10525                     {
10526                         tiRetVal.MakeByRef();
10527                     }
10528                     else
10529                     {
10530                         Verify(false, "byref to byref");
10531                     }
10532                 }
10533
10534                 impPushOnStack(op1, tiRetVal);
10535                 break;
10536
10537             case CEE_ARGLIST:
10538
10539                 if (!info.compIsVarArgs)
10540                 {
10541                     BADCODE("arglist in non-vararg method");
10542                 }
10543
10544                 if (tiVerificationNeeded)
10545                 {
10546                     tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10547                 }
10548                 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10549
10550                 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10551                    adjusted the arg count cos this is like fetching the last param */
10552                 assertImp(0 < numArgs);
10553                 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10554                 lclNum = lvaVarargsHandleArg;
10555                 op1    = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10556                 op1    = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10557                 impPushOnStack(op1, tiRetVal);
10558                 break;
10559
10560             case CEE_ENDFINALLY:
10561
10562                 if (compIsForInlining())
10563                 {
10564                     assert(!"Shouldn't have exception handlers in the inliner!");
10565                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10566                     return;
10567                 }
10568
10569                 if (verCurrentState.esStackDepth > 0)
10570                 {
10571                     impEvalSideEffects();
10572                 }
10573
10574                 if (info.compXcptnsCount == 0)
10575                 {
10576                     BADCODE("endfinally outside finally");
10577                 }
10578
10579                 assert(verCurrentState.esStackDepth == 0);
10580
10581                 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10582                 goto APPEND;
10583
10584             case CEE_ENDFILTER:
10585
10586                 if (compIsForInlining())
10587                 {
10588                     assert(!"Shouldn't have exception handlers in the inliner!");
10589                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10590                     return;
10591                 }
10592
10593                 block->bbSetRunRarely(); // filters are rare
10594
10595                 if (info.compXcptnsCount == 0)
10596                 {
10597                     BADCODE("endfilter outside filter");
10598                 }
10599
10600                 if (tiVerificationNeeded)
10601                 {
10602                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10603                 }
10604
10605                 op1 = impPopStack().val;
10606                 assertImp(op1->gtType == TYP_INT);
10607                 if (!bbInFilterILRange(block))
10608                 {
10609                     BADCODE("EndFilter outside a filter handler");
10610                 }
10611
10612                 /* Mark current bb as end of filter */
10613
10614                 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10615                 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10616
10617                 /* Mark catch handler as successor */
10618
10619                 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10620                 if (verCurrentState.esStackDepth != 0)
10621                 {
10622                     verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10623                                                 DEBUGARG(__LINE__));
10624                 }
10625                 goto APPEND;
10626
10627             case CEE_RET:
10628                 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10629             RET:
10630                 if (!impReturnInstruction(block, prefixFlags, opcode))
10631                 {
10632                     return; // abort
10633                 }
10634                 else
10635                 {
10636                     break;
10637                 }
10638
10639             case CEE_JMP:
10640
10641                 assert(!compIsForInlining());
10642
10643                 if (tiVerificationNeeded)
10644                 {
10645                     Verify(false, "Invalid opcode: CEE_JMP");
10646                 }
10647
10648                 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10649                 {
10650                     /* CEE_JMP does not make sense in some "protected" regions. */
10651
10652                     BADCODE("Jmp not allowed in protected region");
10653                 }
10654
10655                 if (verCurrentState.esStackDepth != 0)
10656                 {
10657                     BADCODE("Stack must be empty after CEE_JMPs");
10658                 }
10659
10660                 _impResolveToken(CORINFO_TOKENKIND_Method);
10661
10662                 JITDUMP(" %08X", resolvedToken.token);
10663
10664                 /* The signature of the target has to be identical to ours.
10665                    At least check that argCnt and returnType match */
10666
10667                 eeGetMethodSig(resolvedToken.hMethod, &sig);
10668                 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10669                     sig.retType != info.compMethodInfo->args.retType ||
10670                     sig.callConv != info.compMethodInfo->args.callConv)
10671                 {
10672                     BADCODE("Incompatible target for CEE_JMPs");
10673                 }
10674
10675 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10676
10677                 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10678
10679                 /* Mark the basic block as being a JUMP instead of RETURN */
10680
10681                 block->bbFlags |= BBF_HAS_JMP;
10682
10683                 /* Set this flag to make sure register arguments have a location assigned
10684                  * even if we don't use them inside the method */
10685
10686                 compJmpOpUsed = true;
10687
10688                 fgNoStructPromotion = true;
10689
10690                 goto APPEND;
10691
10692 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10693
10694                 // Import this just like a series of LDARGs + tail. + call + ret
10695
10696                 if (info.compIsVarArgs)
10697                 {
10698                     // For now we don't implement true tail calls, so this breaks varargs.
10699                     // So warn the user instead of generating bad code.
10700                     // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10701                     // implement true tail calls.
10702                     IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10703                 }
10704
10705                 // First load up the arguments (0 - N)
10706                 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10707                 {
10708                     impLoadArg(argNum, opcodeOffs + sz + 1);
10709                 }
10710
10711                 // Now generate the tail call
10712                 noway_assert(prefixFlags == 0);
10713                 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10714                 opcode      = CEE_CALL;
10715
10716                 eeGetCallInfo(&resolvedToken, NULL,
10717                               combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10718
10719                 // All calls and delegates need a security callout.
10720                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10721
10722                 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10723                                         opcodeOffs);
10724
10725                 // And finish with the ret
10726                 goto RET;
10727
10728 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10729
10730             case CEE_LDELEMA:
10731                 assertImp(sz == sizeof(unsigned));
10732
10733                 _impResolveToken(CORINFO_TOKENKIND_Class);
10734
10735                 JITDUMP(" %08X", resolvedToken.token);
10736
10737                 ldelemClsHnd = resolvedToken.hClass;
10738
10739                 if (tiVerificationNeeded)
10740                 {
10741                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10742                     typeInfo tiIndex = impStackTop().seTypeInfo;
10743
10744                     // As per ECMA 'index' specified can be either int32 or native int.
10745                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10746
10747                     typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10748                     Verify(tiArray.IsNullObjRef() ||
10749                                typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10750                            "bad array");
10751
10752                     tiRetVal = arrayElemType;
10753                     tiRetVal.MakeByRef();
10754                     if (prefixFlags & PREFIX_READONLY)
10755                     {
10756                         tiRetVal.SetIsReadonlyByRef();
10757                     }
10758
10759                     // an array interior pointer is always in the heap
10760                     tiRetVal.SetIsPermanentHomeByRef();
10761                 }
10762
10763                 // If it's a value class array we just do a simple address-of
10764                 if (eeIsValueClass(ldelemClsHnd))
10765                 {
10766                     CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10767                     if (cit == CORINFO_TYPE_UNDEF)
10768                     {
10769                         lclTyp = TYP_STRUCT;
10770                     }
10771                     else
10772                     {
10773                         lclTyp = JITtype2varType(cit);
10774                     }
10775                     goto ARR_LD_POST_VERIFY;
10776                 }
10777
10778                 // Similarly, if its a readonly access, we can do a simple address-of
10779                 // without doing a runtime type-check
10780                 if (prefixFlags & PREFIX_READONLY)
10781                 {
10782                     lclTyp = TYP_REF;
10783                     goto ARR_LD_POST_VERIFY;
10784                 }
10785
10786                 // Otherwise we need the full helper function with run-time type check
10787                 op1 = impTokenToHandle(&resolvedToken);
10788                 if (op1 == nullptr)
10789                 { // compDonotInline()
10790                     return;
10791                 }
10792
10793                 args = gtNewArgList(op1);                      // Type
10794                 args = gtNewListNode(impPopStack().val, args); // index
10795                 args = gtNewListNode(impPopStack().val, args); // array
10796                 op1  = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
10797
10798                 impPushOnStack(op1, tiRetVal);
10799                 break;
10800
10801             // ldelem for reference and value types
10802             case CEE_LDELEM:
10803                 assertImp(sz == sizeof(unsigned));
10804
10805                 _impResolveToken(CORINFO_TOKENKIND_Class);
10806
10807                 JITDUMP(" %08X", resolvedToken.token);
10808
10809                 ldelemClsHnd = resolvedToken.hClass;
10810
10811                 if (tiVerificationNeeded)
10812                 {
10813                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10814                     typeInfo tiIndex = impStackTop().seTypeInfo;
10815
10816                     // As per ECMA 'index' specified can be either int32 or native int.
10817                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10818                     tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10819
10820                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10821                            "type of array incompatible with type operand");
10822                     tiRetVal.NormaliseForStack();
10823                 }
10824
10825                 // If it's a reference type or generic variable type
10826                 // then just generate code as though it's a ldelem.ref instruction
10827                 if (!eeIsValueClass(ldelemClsHnd))
10828                 {
10829                     lclTyp = TYP_REF;
10830                     opcode = CEE_LDELEM_REF;
10831                 }
10832                 else
10833                 {
10834                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10835                     lclTyp             = JITtype2varType(jitTyp);
10836                     tiRetVal           = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10837                     tiRetVal.NormaliseForStack();
10838                 }
10839                 goto ARR_LD_POST_VERIFY;
10840
10841             case CEE_LDELEM_I1:
10842                 lclTyp = TYP_BYTE;
10843                 goto ARR_LD;
10844             case CEE_LDELEM_I2:
10845                 lclTyp = TYP_SHORT;
10846                 goto ARR_LD;
10847             case CEE_LDELEM_I:
10848                 lclTyp = TYP_I_IMPL;
10849                 goto ARR_LD;
10850
10851             // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10852             // and treating it as TYP_INT avoids other asserts.
10853             case CEE_LDELEM_U4:
10854                 lclTyp = TYP_INT;
10855                 goto ARR_LD;
10856
10857             case CEE_LDELEM_I4:
10858                 lclTyp = TYP_INT;
10859                 goto ARR_LD;
10860             case CEE_LDELEM_I8:
10861                 lclTyp = TYP_LONG;
10862                 goto ARR_LD;
10863             case CEE_LDELEM_REF:
10864                 lclTyp = TYP_REF;
10865                 goto ARR_LD;
10866             case CEE_LDELEM_R4:
10867                 lclTyp = TYP_FLOAT;
10868                 goto ARR_LD;
10869             case CEE_LDELEM_R8:
10870                 lclTyp = TYP_DOUBLE;
10871                 goto ARR_LD;
10872             case CEE_LDELEM_U1:
10873                 lclTyp = TYP_UBYTE;
10874                 goto ARR_LD;
10875             case CEE_LDELEM_U2:
10876                 lclTyp = TYP_CHAR;
10877                 goto ARR_LD;
10878
10879             ARR_LD:
10880
10881                 if (tiVerificationNeeded)
10882                 {
10883                     typeInfo tiArray = impStackTop(1).seTypeInfo;
10884                     typeInfo tiIndex = impStackTop().seTypeInfo;
10885
10886                     // As per ECMA 'index' specified can be either int32 or native int.
10887                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10888                     if (tiArray.IsNullObjRef())
10889                     {
10890                         if (lclTyp == TYP_REF)
10891                         { // we will say a deref of a null array yields a null ref
10892                             tiRetVal = typeInfo(TI_NULL);
10893                         }
10894                         else
10895                         {
10896                             tiRetVal = typeInfo(lclTyp);
10897                         }
10898                     }
10899                     else
10900                     {
10901                         tiRetVal             = verGetArrayElemType(tiArray);
10902                         typeInfo arrayElemTi = typeInfo(lclTyp);
10903 #ifdef _TARGET_64BIT_
10904                         if (opcode == CEE_LDELEM_I)
10905                         {
10906                             arrayElemTi = typeInfo::nativeInt();
10907                         }
10908
10909                         if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10910                         {
10911                             Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10912                         }
10913                         else
10914 #endif // _TARGET_64BIT_
10915                         {
10916                             Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10917                         }
10918                     }
10919                     tiRetVal.NormaliseForStack();
10920                 }
10921             ARR_LD_POST_VERIFY:
10922
10923                 /* Pull the index value and array address */
10924                 op2 = impPopStack().val;
10925                 op1 = impPopStack().val;
10926                 assertImp(op1->gtType == TYP_REF);
10927
10928                 /* Check for null pointer - in the inliner case we simply abort */
10929
10930                 if (compIsForInlining())
10931                 {
10932                     if (op1->gtOper == GT_CNS_INT)
10933                     {
10934                         compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10935                         return;
10936                     }
10937                 }
10938
10939                 op1 = impCheckForNullPointer(op1);
10940
10941                 /* Mark the block as containing an index expression */
10942
10943                 if (op1->gtOper == GT_LCL_VAR)
10944                 {
10945                     if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10946                     {
10947                         block->bbFlags |= BBF_HAS_IDX_LEN;
10948                         optMethodFlags |= OMF_HAS_ARRAYREF;
10949                     }
10950                 }
10951
10952                 /* Create the index node and push it on the stack */
10953
10954                 op1 = gtNewIndexRef(lclTyp, op1, op2);
10955
10956                 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10957
10958                 if ((opcode == CEE_LDELEMA) || ldstruct ||
10959                     (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10960                 {
10961                     assert(ldelemClsHnd != DUMMY_INIT(NULL));
10962
10963                     // remember the element size
10964                     if (lclTyp == TYP_REF)
10965                     {
10966                         op1->gtIndex.gtIndElemSize = sizeof(void*);
10967                     }
10968                     else
10969                     {
10970                         // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10971                         if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10972                         {
10973                             op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10974                         }
10975                         assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10976                         if (lclTyp == TYP_STRUCT)
10977                         {
10978                             size                       = info.compCompHnd->getClassSize(ldelemClsHnd);
10979                             op1->gtIndex.gtIndElemSize = size;
10980                             op1->gtType                = lclTyp;
10981                         }
10982                     }
10983
10984                     if ((opcode == CEE_LDELEMA) || ldstruct)
10985                     {
10986                         // wrap it in a &
10987                         lclTyp = TYP_BYREF;
10988
10989                         op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10990                     }
10991                     else
10992                     {
10993                         assert(lclTyp != TYP_STRUCT);
10994                     }
10995                 }
10996
10997                 if (ldstruct)
10998                 {
10999                     // Create an OBJ for the result
11000                     op1 = gtNewObjNode(ldelemClsHnd, op1);
11001                     op1->gtFlags |= GTF_EXCEPT;
11002                 }
11003                 impPushOnStack(op1, tiRetVal);
11004                 break;
11005
11006             // stelem for reference and value types
11007             case CEE_STELEM:
11008
11009                 assertImp(sz == sizeof(unsigned));
11010
11011                 _impResolveToken(CORINFO_TOKENKIND_Class);
11012
11013                 JITDUMP(" %08X", resolvedToken.token);
11014
11015                 stelemClsHnd = resolvedToken.hClass;
11016
11017                 if (tiVerificationNeeded)
11018                 {
11019                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11020                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11021                     typeInfo tiValue = impStackTop().seTypeInfo;
11022
11023                     // As per ECMA 'index' specified can be either int32 or native int.
11024                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11025                     typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11026
11027                     Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11028                            "type operand incompatible with array element type");
11029                     arrayElem.NormaliseForStack();
11030                     Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11031                 }
11032
11033                 // If it's a reference type just behave as though it's a stelem.ref instruction
11034                 if (!eeIsValueClass(stelemClsHnd))
11035                 {
11036                     goto STELEM_REF_POST_VERIFY;
11037                 }
11038
11039                 // Otherwise extract the type
11040                 {
11041                     CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11042                     lclTyp             = JITtype2varType(jitTyp);
11043                     goto ARR_ST_POST_VERIFY;
11044                 }
11045
11046             case CEE_STELEM_REF:
11047
11048                 if (tiVerificationNeeded)
11049                 {
11050                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11051                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11052                     typeInfo tiValue = impStackTop().seTypeInfo;
11053
11054                     // As per ECMA 'index' specified can be either int32 or native int.
11055                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11056                     Verify(tiValue.IsObjRef(), "bad value");
11057
11058                     // we only check that it is an object referece, The helper does additional checks
11059                     Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11060                 }
11061
11062                 arrayNodeTo      = impStackTop(2).val;
11063                 arrayNodeToIndex = impStackTop(1).val;
11064                 arrayNodeFrom    = impStackTop().val;
11065
11066                 //
11067                 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11068                 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11069                 //
11070
11071                 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11072                 // This does not need CORINFO_HELP_ARRADDR_ST
11073
11074                 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11075                     arrayNodeTo->gtOper == GT_LCL_VAR &&
11076                     arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11077                     !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11078                 {
11079                     lclTyp = TYP_REF;
11080                     goto ARR_ST_POST_VERIFY;
11081                 }
11082
11083                 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11084
11085                 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11086                 {
11087                     assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11088
11089                     lclTyp = TYP_REF;
11090                     goto ARR_ST_POST_VERIFY;
11091                 }
11092
11093             STELEM_REF_POST_VERIFY:
11094
11095                 /* Call a helper function to do the assignment */
11096                 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11097
11098                 goto SPILL_APPEND;
11099
11100             case CEE_STELEM_I1:
11101                 lclTyp = TYP_BYTE;
11102                 goto ARR_ST;
11103             case CEE_STELEM_I2:
11104                 lclTyp = TYP_SHORT;
11105                 goto ARR_ST;
11106             case CEE_STELEM_I:
11107                 lclTyp = TYP_I_IMPL;
11108                 goto ARR_ST;
11109             case CEE_STELEM_I4:
11110                 lclTyp = TYP_INT;
11111                 goto ARR_ST;
11112             case CEE_STELEM_I8:
11113                 lclTyp = TYP_LONG;
11114                 goto ARR_ST;
11115             case CEE_STELEM_R4:
11116                 lclTyp = TYP_FLOAT;
11117                 goto ARR_ST;
11118             case CEE_STELEM_R8:
11119                 lclTyp = TYP_DOUBLE;
11120                 goto ARR_ST;
11121
11122             ARR_ST:
11123
11124                 if (tiVerificationNeeded)
11125                 {
11126                     typeInfo tiArray = impStackTop(2).seTypeInfo;
11127                     typeInfo tiIndex = impStackTop(1).seTypeInfo;
11128                     typeInfo tiValue = impStackTop().seTypeInfo;
11129
11130                     // As per ECMA 'index' specified can be either int32 or native int.
11131                     Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11132                     typeInfo arrayElem = typeInfo(lclTyp);
11133 #ifdef _TARGET_64BIT_
11134                     if (opcode == CEE_STELEM_I)
11135                     {
11136                         arrayElem = typeInfo::nativeInt();
11137                     }
11138 #endif // _TARGET_64BIT_
11139                     Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11140                            "bad array");
11141
11142                     Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11143                            "bad value");
11144                 }
11145
11146             ARR_ST_POST_VERIFY:
11147                 /* The strict order of evaluation is LHS-operands, RHS-operands,
11148                    range-check, and then assignment. However, codegen currently
11149                    does the range-check before evaluation the RHS-operands. So to
11150                    maintain strict ordering, we spill the stack. */
11151
11152                 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11153                 {
11154                     impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11155                                                    "Strict ordering of exceptions for Array store"));
11156                 }
11157
11158                 /* Pull the new value from the stack */
11159                 op2 = impPopStack().val;
11160
11161                 /* Pull the index value */
11162                 op1 = impPopStack().val;
11163
11164                 /* Pull the array address */
11165                 op3 = impPopStack().val;
11166
11167                 assertImp(op3->gtType == TYP_REF);
11168                 if (op2->IsVarAddr())
11169                 {
11170                     op2->gtType = TYP_I_IMPL;
11171                 }
11172
11173                 op3 = impCheckForNullPointer(op3);
11174
11175                 // Mark the block as containing an index expression
11176
11177                 if (op3->gtOper == GT_LCL_VAR)
11178                 {
11179                     if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11180                     {
11181                         block->bbFlags |= BBF_HAS_IDX_LEN;
11182                         optMethodFlags |= OMF_HAS_ARRAYREF;
11183                     }
11184                 }
11185
11186                 /* Create the index node */
11187
11188                 op1 = gtNewIndexRef(lclTyp, op3, op1);
11189
11190                 /* Create the assignment node and append it */
11191
11192                 if (lclTyp == TYP_STRUCT)
11193                 {
11194                     assert(stelemClsHnd != DUMMY_INIT(NULL));
11195
11196                     op1->gtIndex.gtStructElemClass = stelemClsHnd;
11197                     op1->gtIndex.gtIndElemSize     = info.compCompHnd->getClassSize(stelemClsHnd);
11198                 }
11199                 if (varTypeIsStruct(op1))
11200                 {
11201                     op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11202                 }
11203                 else
11204                 {
11205                     op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11206                     op1 = gtNewAssignNode(op1, op2);
11207                 }
11208
11209                 /* Mark the expression as containing an assignment */
11210
11211                 op1->gtFlags |= GTF_ASG;
11212
11213                 goto SPILL_APPEND;
11214
11215             case CEE_ADD:
11216                 oper = GT_ADD;
11217                 goto MATH_OP2;
11218
11219             case CEE_ADD_OVF:
11220                 uns = false;
11221                 goto ADD_OVF;
11222             case CEE_ADD_OVF_UN:
11223                 uns = true;
11224                 goto ADD_OVF;
11225
11226             ADD_OVF:
11227                 ovfl     = true;
11228                 callNode = false;
11229                 oper     = GT_ADD;
11230                 goto MATH_OP2_FLAGS;
11231
11232             case CEE_SUB:
11233                 oper = GT_SUB;
11234                 goto MATH_OP2;
11235
11236             case CEE_SUB_OVF:
11237                 uns = false;
11238                 goto SUB_OVF;
11239             case CEE_SUB_OVF_UN:
11240                 uns = true;
11241                 goto SUB_OVF;
11242
11243             SUB_OVF:
11244                 ovfl     = true;
11245                 callNode = false;
11246                 oper     = GT_SUB;
11247                 goto MATH_OP2_FLAGS;
11248
11249             case CEE_MUL:
11250                 oper = GT_MUL;
11251                 goto MATH_MAYBE_CALL_NO_OVF;
11252
11253             case CEE_MUL_OVF:
11254                 uns = false;
11255                 goto MUL_OVF;
11256             case CEE_MUL_OVF_UN:
11257                 uns = true;
11258                 goto MUL_OVF;
11259
11260             MUL_OVF:
11261                 ovfl = true;
11262                 oper = GT_MUL;
11263                 goto MATH_MAYBE_CALL_OVF;
11264
11265             // Other binary math operations
11266
11267             case CEE_DIV:
11268                 oper = GT_DIV;
11269                 goto MATH_MAYBE_CALL_NO_OVF;
11270
11271             case CEE_DIV_UN:
11272                 oper = GT_UDIV;
11273                 goto MATH_MAYBE_CALL_NO_OVF;
11274
11275             case CEE_REM:
11276                 oper = GT_MOD;
11277                 goto MATH_MAYBE_CALL_NO_OVF;
11278
11279             case CEE_REM_UN:
11280                 oper = GT_UMOD;
11281                 goto MATH_MAYBE_CALL_NO_OVF;
11282
11283             MATH_MAYBE_CALL_NO_OVF:
11284                 ovfl = false;
11285             MATH_MAYBE_CALL_OVF:
11286                 // Morpher has some complex logic about when to turn different
11287                 // typed nodes on different platforms into helper calls. We
11288                 // need to either duplicate that logic here, or just
11289                 // pessimistically make all the nodes large enough to become
11290                 // call nodes.  Since call nodes aren't that much larger and
11291                 // these opcodes are infrequent enough I chose the latter.
11292                 callNode = true;
11293                 goto MATH_OP2_FLAGS;
11294
11295             case CEE_AND:
11296                 oper = GT_AND;
11297                 goto MATH_OP2;
11298             case CEE_OR:
11299                 oper = GT_OR;
11300                 goto MATH_OP2;
11301             case CEE_XOR:
11302                 oper = GT_XOR;
11303                 goto MATH_OP2;
11304
11305             MATH_OP2: // For default values of 'ovfl' and 'callNode'
11306
11307                 ovfl     = false;
11308                 callNode = false;
11309
11310             MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11311
11312                 /* Pull two values and push back the result */
11313
11314                 if (tiVerificationNeeded)
11315                 {
11316                     const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11317                     const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11318
11319                     Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11320                     if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11321                     {
11322                         Verify(tiOp1.IsNumberType(), "not number");
11323                     }
11324                     else
11325                     {
11326                         Verify(tiOp1.IsIntegerType(), "not integer");
11327                     }
11328
11329                     Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11330
11331                     tiRetVal = tiOp1;
11332
11333 #ifdef _TARGET_64BIT_
11334                     if (tiOp2.IsNativeIntType())
11335                     {
11336                         tiRetVal = tiOp2;
11337                     }
11338 #endif // _TARGET_64BIT_
11339                 }
11340
11341                 op2 = impPopStack().val;
11342                 op1 = impPopStack().val;
11343
11344 #if !CPU_HAS_FP_SUPPORT
11345                 if (varTypeIsFloating(op1->gtType))
11346                 {
11347                     callNode = true;
11348                 }
11349 #endif
11350                 /* Can't do arithmetic with references */
11351                 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11352
11353                 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11354                 // if it is in the stack)
11355                 impBashVarAddrsToI(op1, op2);
11356
11357                 type = impGetByRefResultType(oper, uns, &op1, &op2);
11358
11359                 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11360
11361                 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11362
11363                 if (op2->gtOper == GT_CNS_INT)
11364                 {
11365                     if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11366                         (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11367
11368                     {
11369                         impPushOnStack(op1, tiRetVal);
11370                         break;
11371                     }
11372                 }
11373
11374 #if !FEATURE_X87_DOUBLES
11375                 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11376                 //
11377                 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11378                 {
11379                     if (op1->TypeGet() != type)
11380                     {
11381                         // We insert a cast of op1 to 'type'
11382                         op1 = gtNewCastNode(type, op1, type);
11383                     }
11384                     if (op2->TypeGet() != type)
11385                     {
11386                         // We insert a cast of op2 to 'type'
11387                         op2 = gtNewCastNode(type, op2, type);
11388                     }
11389                 }
11390 #endif // !FEATURE_X87_DOUBLES
11391
11392 #if SMALL_TREE_NODES
11393                 if (callNode)
11394                 {
11395                     /* These operators can later be transformed into 'GT_CALL' */
11396
11397                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11398 #ifndef _TARGET_ARM_
11399                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11400                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11401                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11402                     assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11403 #endif
11404                     // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11405                     // that we'll need to transform into a general large node, but rather specifically
11406                     // to a call: by doing it this way, things keep working if there are multiple sizes,
11407                     // and a CALL is no longer the largest.
11408                     // That said, as of now it *is* a large node, so we'll do this with an assert rather
11409                     // than an "if".
11410                     assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11411                     op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11412                 }
11413                 else
11414 #endif // SMALL_TREE_NODES
11415                 {
11416                     op1 = gtNewOperNode(oper, type, op1, op2);
11417                 }
11418
11419                 /* Special case: integer/long division may throw an exception */
11420
11421                 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11422                 {
11423                     op1->gtFlags |= GTF_EXCEPT;
11424                 }
11425
11426                 if (ovfl)
11427                 {
11428                     assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11429                     if (ovflType != TYP_UNKNOWN)
11430                     {
11431                         op1->gtType = ovflType;
11432                     }
11433                     op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11434                     if (uns)
11435                     {
11436                         op1->gtFlags |= GTF_UNSIGNED;
11437                     }
11438                 }
11439
11440                 impPushOnStack(op1, tiRetVal);
11441                 break;
11442
11443             case CEE_SHL:
11444                 oper = GT_LSH;
11445                 goto CEE_SH_OP2;
11446
11447             case CEE_SHR:
11448                 oper = GT_RSH;
11449                 goto CEE_SH_OP2;
11450             case CEE_SHR_UN:
11451                 oper = GT_RSZ;
11452                 goto CEE_SH_OP2;
11453
11454             CEE_SH_OP2:
11455                 if (tiVerificationNeeded)
11456                 {
11457                     const typeInfo& tiVal   = impStackTop(1).seTypeInfo;
11458                     const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11459                     Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11460                     tiRetVal = tiVal;
11461                 }
11462                 op2 = impPopStack().val;
11463                 op1 = impPopStack().val; // operand to be shifted
11464                 impBashVarAddrsToI(op1, op2);
11465
11466                 type = genActualType(op1->TypeGet());
11467                 op1  = gtNewOperNode(oper, type, op1, op2);
11468
11469                 impPushOnStack(op1, tiRetVal);
11470                 break;
11471
11472             case CEE_NOT:
11473                 if (tiVerificationNeeded)
11474                 {
11475                     tiRetVal = impStackTop().seTypeInfo;
11476                     Verify(tiRetVal.IsIntegerType(), "bad int value");
11477                 }
11478
11479                 op1 = impPopStack().val;
11480                 impBashVarAddrsToI(op1, nullptr);
11481                 type = genActualType(op1->TypeGet());
11482                 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11483                 break;
11484
11485             case CEE_CKFINITE:
11486                 if (tiVerificationNeeded)
11487                 {
11488                     tiRetVal = impStackTop().seTypeInfo;
11489                     Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11490                 }
11491                 op1  = impPopStack().val;
11492                 type = op1->TypeGet();
11493                 op1  = gtNewOperNode(GT_CKFINITE, type, op1);
11494                 op1->gtFlags |= GTF_EXCEPT;
11495
11496                 impPushOnStack(op1, tiRetVal);
11497                 break;
11498
11499             case CEE_LEAVE:
11500
11501                 val     = getI4LittleEndian(codeAddr); // jump distance
11502                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11503                 goto LEAVE;
11504
11505             case CEE_LEAVE_S:
11506                 val     = getI1LittleEndian(codeAddr); // jump distance
11507                 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11508
11509             LEAVE:
11510
11511                 if (compIsForInlining())
11512                 {
11513                     compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11514                     return;
11515                 }
11516
11517                 JITDUMP(" %04X", jmpAddr);
11518                 if (block->bbJumpKind != BBJ_LEAVE)
11519                 {
11520                     impResetLeaveBlock(block, jmpAddr);
11521                 }
11522
11523                 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11524                 impImportLeave(block);
11525                 impNoteBranchOffs();
11526
11527                 break;
11528
11529             case CEE_BR:
11530             case CEE_BR_S:
11531                 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11532
11533                 if (compIsForInlining() && jmpDist == 0)
11534                 {
11535                     break; /* NOP */
11536                 }
11537
11538                 impNoteBranchOffs();
11539                 break;
11540
11541             case CEE_BRTRUE:
11542             case CEE_BRTRUE_S:
11543             case CEE_BRFALSE:
11544             case CEE_BRFALSE_S:
11545
11546                 /* Pop the comparand (now there's a neat term) from the stack */
11547                 if (tiVerificationNeeded)
11548                 {
11549                     typeInfo& tiVal = impStackTop().seTypeInfo;
11550                     Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11551                            "bad value");
11552                 }
11553
11554                 op1  = impPopStack().val;
11555                 type = op1->TypeGet();
11556
11557                 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11558                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11559                 {
11560                     block->bbJumpKind = BBJ_NONE;
11561
11562                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11563                     {
11564                         op1 = gtUnusedValNode(op1);
11565                         goto SPILL_APPEND;
11566                     }
11567                     else
11568                     {
11569                         break;
11570                     }
11571                 }
11572
11573                 if (op1->OperIsCompare())
11574                 {
11575                     if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11576                     {
11577                         // Flip the sense of the compare
11578
11579                         op1 = gtReverseCond(op1);
11580                     }
11581                 }
11582                 else
11583                 {
11584                     /* We'll compare against an equally-sized integer 0 */
11585                     /* For small types, we always compare against int   */
11586                     op2 = gtNewZeroConNode(genActualType(op1->gtType));
11587
11588                     /* Create the comparison operator and try to fold it */
11589
11590                     oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11591                     op1  = gtNewOperNode(oper, TYP_INT, op1, op2);
11592                 }
11593
11594             // fall through
11595
11596             COND_JUMP:
11597
11598                 /* Fold comparison if we can */
11599
11600                 op1 = gtFoldExpr(op1);
11601
11602                 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11603                 /* Don't make any blocks unreachable in import only mode */
11604
11605                 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11606                 {
11607                     /* gtFoldExpr() should prevent this as we don't want to make any blocks
11608                        unreachable under compDbgCode */
11609                     assert(!opts.compDbgCode);
11610
11611                     BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11612                     assertImp((block->bbJumpKind == BBJ_COND)            // normal case
11613                               || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11614                                                                          // block for the second time
11615
11616                     block->bbJumpKind = foldedJumpKind;
11617 #ifdef DEBUG
11618                     if (verbose)
11619                     {
11620                         if (op1->gtIntCon.gtIconVal)
11621                         {
11622                             printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11623                                    block->bbJumpDest->bbNum);
11624                         }
11625                         else
11626                         {
11627                             printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11628                         }
11629                     }
11630 #endif
11631                     break;
11632                 }
11633
11634                 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11635
11636                 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11637                    in impImportBlock(block). For correct line numbers, spill stack. */
11638
11639                 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11640                 {
11641                     impSpillStackEnsure(true);
11642                 }
11643
11644                 goto SPILL_APPEND;
11645
11646             case CEE_CEQ:
11647                 oper = GT_EQ;
11648                 uns  = false;
11649                 goto CMP_2_OPs;
11650             case CEE_CGT_UN:
11651                 oper = GT_GT;
11652                 uns  = true;
11653                 goto CMP_2_OPs;
11654             case CEE_CGT:
11655                 oper = GT_GT;
11656                 uns  = false;
11657                 goto CMP_2_OPs;
11658             case CEE_CLT_UN:
11659                 oper = GT_LT;
11660                 uns  = true;
11661                 goto CMP_2_OPs;
11662             case CEE_CLT:
11663                 oper = GT_LT;
11664                 uns  = false;
11665                 goto CMP_2_OPs;
11666
11667             CMP_2_OPs:
11668                 if (tiVerificationNeeded)
11669                 {
11670                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11671                     tiRetVal = typeInfo(TI_INT);
11672                 }
11673
11674                 op2 = impPopStack().val;
11675                 op1 = impPopStack().val;
11676
11677 #ifdef _TARGET_64BIT_
11678                 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11679                 {
11680                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11681                 }
11682                 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11683                 {
11684                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11685                 }
11686 #endif // _TARGET_64BIT_
11687
11688                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11689                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11690                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11691
11692                 /* Create the comparison node */
11693
11694                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11695
11696                 /* TODO: setting both flags when only one is appropriate */
11697                 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11698                 {
11699                     op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11700                 }
11701
11702                 impPushOnStack(op1, tiRetVal);
11703                 break;
11704
11705             case CEE_BEQ_S:
11706             case CEE_BEQ:
11707                 oper = GT_EQ;
11708                 goto CMP_2_OPs_AND_BR;
11709
11710             case CEE_BGE_S:
11711             case CEE_BGE:
11712                 oper = GT_GE;
11713                 goto CMP_2_OPs_AND_BR;
11714
11715             case CEE_BGE_UN_S:
11716             case CEE_BGE_UN:
11717                 oper = GT_GE;
11718                 goto CMP_2_OPs_AND_BR_UN;
11719
11720             case CEE_BGT_S:
11721             case CEE_BGT:
11722                 oper = GT_GT;
11723                 goto CMP_2_OPs_AND_BR;
11724
11725             case CEE_BGT_UN_S:
11726             case CEE_BGT_UN:
11727                 oper = GT_GT;
11728                 goto CMP_2_OPs_AND_BR_UN;
11729
11730             case CEE_BLE_S:
11731             case CEE_BLE:
11732                 oper = GT_LE;
11733                 goto CMP_2_OPs_AND_BR;
11734
11735             case CEE_BLE_UN_S:
11736             case CEE_BLE_UN:
11737                 oper = GT_LE;
11738                 goto CMP_2_OPs_AND_BR_UN;
11739
11740             case CEE_BLT_S:
11741             case CEE_BLT:
11742                 oper = GT_LT;
11743                 goto CMP_2_OPs_AND_BR;
11744
11745             case CEE_BLT_UN_S:
11746             case CEE_BLT_UN:
11747                 oper = GT_LT;
11748                 goto CMP_2_OPs_AND_BR_UN;
11749
11750             case CEE_BNE_UN_S:
11751             case CEE_BNE_UN:
11752                 oper = GT_NE;
11753                 goto CMP_2_OPs_AND_BR_UN;
11754
11755             CMP_2_OPs_AND_BR_UN:
11756                 uns       = true;
11757                 unordered = true;
11758                 goto CMP_2_OPs_AND_BR_ALL;
11759             CMP_2_OPs_AND_BR:
11760                 uns       = false;
11761                 unordered = false;
11762                 goto CMP_2_OPs_AND_BR_ALL;
11763             CMP_2_OPs_AND_BR_ALL:
11764
11765                 if (tiVerificationNeeded)
11766                 {
11767                     verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11768                 }
11769
11770                 /* Pull two values */
11771                 op2 = impPopStack().val;
11772                 op1 = impPopStack().val;
11773
11774 #ifdef _TARGET_64BIT_
11775                 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11776                 {
11777                     op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11778                 }
11779                 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11780                 {
11781                     op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11782                 }
11783 #endif // _TARGET_64BIT_
11784
11785                 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11786                           varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11787                           varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11788
11789                 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11790                 {
11791                     block->bbJumpKind = BBJ_NONE;
11792
11793                     if (op1->gtFlags & GTF_GLOB_EFFECT)
11794                     {
11795                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11796                                                        "Branch to next Optimization, op1 side effect"));
11797                         impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11798                     }
11799                     if (op2->gtFlags & GTF_GLOB_EFFECT)
11800                     {
11801                         impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11802                                                        "Branch to next Optimization, op2 side effect"));
11803                         impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11804                     }
11805
11806 #ifdef DEBUG
11807                     if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11808                     {
11809                         impNoteLastILoffs();
11810                     }
11811 #endif
11812                     break;
11813                 }
11814 #if !FEATURE_X87_DOUBLES
11815                 // We can generate an compare of different sized floating point op1 and op2
11816                 // We insert a cast
11817                 //
11818                 if (varTypeIsFloating(op1->TypeGet()))
11819                 {
11820                     if (op1->TypeGet() != op2->TypeGet())
11821                     {
11822                         assert(varTypeIsFloating(op2->TypeGet()));
11823
11824                         // say op1=double, op2=float. To avoid loss of precision
11825                         // while comparing, op2 is converted to double and double
11826                         // comparison is done.
11827                         if (op1->TypeGet() == TYP_DOUBLE)
11828                         {
11829                             // We insert a cast of op2 to TYP_DOUBLE
11830                             op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11831                         }
11832                         else if (op2->TypeGet() == TYP_DOUBLE)
11833                         {
11834                             // We insert a cast of op1 to TYP_DOUBLE
11835                             op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11836                         }
11837                     }
11838                 }
11839 #endif // !FEATURE_X87_DOUBLES
11840
11841                 /* Create and append the operator */
11842
11843                 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11844
11845                 if (uns)
11846                 {
11847                     op1->gtFlags |= GTF_UNSIGNED;
11848                 }
11849
11850                 if (unordered)
11851                 {
11852                     op1->gtFlags |= GTF_RELOP_NAN_UN;
11853                 }
11854
11855                 goto COND_JUMP;
11856
11857             case CEE_SWITCH:
11858                 assert(!compIsForInlining());
11859
11860                 if (tiVerificationNeeded)
11861                 {
11862                     Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11863                 }
11864                 /* Pop the switch value off the stack */
11865                 op1 = impPopStack().val;
11866                 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11867
11868                 /* We can create a switch node */
11869
11870                 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11871
11872                 val = (int)getU4LittleEndian(codeAddr);
11873                 codeAddr += 4 + val * 4; // skip over the switch-table
11874
11875                 goto SPILL_APPEND;
11876
11877             /************************** Casting OPCODES ***************************/
11878
11879             case CEE_CONV_OVF_I1:
11880                 lclTyp = TYP_BYTE;
11881                 goto CONV_OVF;
11882             case CEE_CONV_OVF_I2:
11883                 lclTyp = TYP_SHORT;
11884                 goto CONV_OVF;
11885             case CEE_CONV_OVF_I:
11886                 lclTyp = TYP_I_IMPL;
11887                 goto CONV_OVF;
11888             case CEE_CONV_OVF_I4:
11889                 lclTyp = TYP_INT;
11890                 goto CONV_OVF;
11891             case CEE_CONV_OVF_I8:
11892                 lclTyp = TYP_LONG;
11893                 goto CONV_OVF;
11894
11895             case CEE_CONV_OVF_U1:
11896                 lclTyp = TYP_UBYTE;
11897                 goto CONV_OVF;
11898             case CEE_CONV_OVF_U2:
11899                 lclTyp = TYP_CHAR;
11900                 goto CONV_OVF;
11901             case CEE_CONV_OVF_U:
11902                 lclTyp = TYP_U_IMPL;
11903                 goto CONV_OVF;
11904             case CEE_CONV_OVF_U4:
11905                 lclTyp = TYP_UINT;
11906                 goto CONV_OVF;
11907             case CEE_CONV_OVF_U8:
11908                 lclTyp = TYP_ULONG;
11909                 goto CONV_OVF;
11910
11911             case CEE_CONV_OVF_I1_UN:
11912                 lclTyp = TYP_BYTE;
11913                 goto CONV_OVF_UN;
11914             case CEE_CONV_OVF_I2_UN:
11915                 lclTyp = TYP_SHORT;
11916                 goto CONV_OVF_UN;
11917             case CEE_CONV_OVF_I_UN:
11918                 lclTyp = TYP_I_IMPL;
11919                 goto CONV_OVF_UN;
11920             case CEE_CONV_OVF_I4_UN:
11921                 lclTyp = TYP_INT;
11922                 goto CONV_OVF_UN;
11923             case CEE_CONV_OVF_I8_UN:
11924                 lclTyp = TYP_LONG;
11925                 goto CONV_OVF_UN;
11926
11927             case CEE_CONV_OVF_U1_UN:
11928                 lclTyp = TYP_UBYTE;
11929                 goto CONV_OVF_UN;
11930             case CEE_CONV_OVF_U2_UN:
11931                 lclTyp = TYP_CHAR;
11932                 goto CONV_OVF_UN;
11933             case CEE_CONV_OVF_U_UN:
11934                 lclTyp = TYP_U_IMPL;
11935                 goto CONV_OVF_UN;
11936             case CEE_CONV_OVF_U4_UN:
11937                 lclTyp = TYP_UINT;
11938                 goto CONV_OVF_UN;
11939             case CEE_CONV_OVF_U8_UN:
11940                 lclTyp = TYP_ULONG;
11941                 goto CONV_OVF_UN;
11942
11943             CONV_OVF_UN:
11944                 uns = true;
11945                 goto CONV_OVF_COMMON;
11946             CONV_OVF:
11947                 uns = false;
11948                 goto CONV_OVF_COMMON;
11949
11950             CONV_OVF_COMMON:
11951                 ovfl = true;
11952                 goto _CONV;
11953
11954             case CEE_CONV_I1:
11955                 lclTyp = TYP_BYTE;
11956                 goto CONV;
11957             case CEE_CONV_I2:
11958                 lclTyp = TYP_SHORT;
11959                 goto CONV;
11960             case CEE_CONV_I:
11961                 lclTyp = TYP_I_IMPL;
11962                 goto CONV;
11963             case CEE_CONV_I4:
11964                 lclTyp = TYP_INT;
11965                 goto CONV;
11966             case CEE_CONV_I8:
11967                 lclTyp = TYP_LONG;
11968                 goto CONV;
11969
11970             case CEE_CONV_U1:
11971                 lclTyp = TYP_UBYTE;
11972                 goto CONV;
11973             case CEE_CONV_U2:
11974                 lclTyp = TYP_CHAR;
11975                 goto CONV;
11976 #if (REGSIZE_BYTES == 8)
11977             case CEE_CONV_U:
11978                 lclTyp = TYP_U_IMPL;
11979                 goto CONV_UN;
11980 #else
11981             case CEE_CONV_U:
11982                 lclTyp = TYP_U_IMPL;
11983                 goto CONV;
11984 #endif
11985             case CEE_CONV_U4:
11986                 lclTyp = TYP_UINT;
11987                 goto CONV;
11988             case CEE_CONV_U8:
11989                 lclTyp = TYP_ULONG;
11990                 goto CONV_UN;
11991
11992             case CEE_CONV_R4:
11993                 lclTyp = TYP_FLOAT;
11994                 goto CONV;
11995             case CEE_CONV_R8:
11996                 lclTyp = TYP_DOUBLE;
11997                 goto CONV;
11998
11999             case CEE_CONV_R_UN:
12000                 lclTyp = TYP_DOUBLE;
12001                 goto CONV_UN;
12002
12003             CONV_UN:
12004                 uns  = true;
12005                 ovfl = false;
12006                 goto _CONV;
12007
12008             CONV:
12009                 uns  = false;
12010                 ovfl = false;
12011                 goto _CONV;
12012
12013             _CONV:
12014                 // just check that we have a number on the stack
12015                 if (tiVerificationNeeded)
12016                 {
12017                     const typeInfo& tiVal = impStackTop().seTypeInfo;
12018                     Verify(tiVal.IsNumberType(), "bad arg");
12019
12020 #ifdef _TARGET_64BIT_
12021                     bool isNative = false;
12022
12023                     switch (opcode)
12024                     {
12025                         case CEE_CONV_OVF_I:
12026                         case CEE_CONV_OVF_I_UN:
12027                         case CEE_CONV_I:
12028                         case CEE_CONV_OVF_U:
12029                         case CEE_CONV_OVF_U_UN:
12030                         case CEE_CONV_U:
12031                             isNative = true;
12032                         default:
12033                             // leave 'isNative' = false;
12034                             break;
12035                     }
12036                     if (isNative)
12037                     {
12038                         tiRetVal = typeInfo::nativeInt();
12039                     }
12040                     else
12041 #endif // _TARGET_64BIT_
12042                     {
12043                         tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12044                     }
12045                 }
12046
12047                 // only converts from FLOAT or DOUBLE to an integer type
12048                 // and converts from  ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12049
12050                 if (varTypeIsFloating(lclTyp))
12051                 {
12052                     callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12053 #ifdef _TARGET_64BIT_
12054                                // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12055                                // TYP_BYREF could be used as TYP_I_IMPL which is long.
12056                                // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12057                                // and generate SSE2 code instead of going through helper calls.
12058                                || (impStackTop().val->TypeGet() == TYP_BYREF)
12059 #endif
12060                         ;
12061                 }
12062                 else
12063                 {
12064                     callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12065                 }
12066
12067                 // At this point uns, ovf, callNode all set
12068
12069                 op1 = impPopStack().val;
12070                 impBashVarAddrsToI(op1);
12071
12072                 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12073                 {
12074                     op2 = op1->gtOp.gtOp2;
12075
12076                     if (op2->gtOper == GT_CNS_INT)
12077                     {
12078                         ssize_t ival = op2->gtIntCon.gtIconVal;
12079                         ssize_t mask, umask;
12080
12081                         switch (lclTyp)
12082                         {
12083                             case TYP_BYTE:
12084                             case TYP_UBYTE:
12085                                 mask  = 0x00FF;
12086                                 umask = 0x007F;
12087                                 break;
12088                             case TYP_CHAR:
12089                             case TYP_SHORT:
12090                                 mask  = 0xFFFF;
12091                                 umask = 0x7FFF;
12092                                 break;
12093
12094                             default:
12095                                 assert(!"unexpected type");
12096                                 return;
12097                         }
12098
12099                         if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12100                         {
12101                             /* Toss the cast, it's a waste of time */
12102
12103                             impPushOnStack(op1, tiRetVal);
12104                             break;
12105                         }
12106                         else if (ival == mask)
12107                         {
12108                             /* Toss the masking, it's a waste of time, since
12109                                we sign-extend from the small value anyways */
12110
12111                             op1 = op1->gtOp.gtOp1;
12112                         }
12113                     }
12114                 }
12115
12116                 /*  The 'op2' sub-operand of a cast is the 'real' type number,
12117                     since the result of a cast to one of the 'small' integer
12118                     types is an integer.
12119                  */
12120
12121                 type = genActualType(lclTyp);
12122
12123 #if SMALL_TREE_NODES
12124                 if (callNode)
12125                 {
12126                     op1 = gtNewCastNodeL(type, op1, lclTyp);
12127                 }
12128                 else
12129 #endif // SMALL_TREE_NODES
12130                 {
12131                     op1 = gtNewCastNode(type, op1, lclTyp);
12132                 }
12133
12134                 if (ovfl)
12135                 {
12136                     op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12137                 }
12138                 if (uns)
12139                 {
12140                     op1->gtFlags |= GTF_UNSIGNED;
12141                 }
12142                 impPushOnStack(op1, tiRetVal);
12143                 break;
12144
12145             case CEE_NEG:
12146                 if (tiVerificationNeeded)
12147                 {
12148                     tiRetVal = impStackTop().seTypeInfo;
12149                     Verify(tiRetVal.IsNumberType(), "Bad arg");
12150                 }
12151
12152                 op1 = impPopStack().val;
12153                 impBashVarAddrsToI(op1, nullptr);
12154                 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12155                 break;
12156
12157             case CEE_POP:
12158             {
12159                 /* Pull the top value from the stack */
12160
12161                 StackEntry se = impPopStack();
12162                 clsHnd        = se.seTypeInfo.GetClassHandle();
12163                 op1           = se.val;
12164
12165                 /* Get hold of the type of the value being duplicated */
12166
12167                 lclTyp = genActualType(op1->gtType);
12168
12169                 /* Does the value have any side effects? */
12170
12171                 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12172                 {
12173                     // Since we are throwing away the value, just normalize
12174                     // it to its address.  This is more efficient.
12175
12176                     if (varTypeIsStruct(op1))
12177                     {
12178 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12179                         // Non-calls, such as obj or ret_expr, have to go through this.
12180                         // Calls with large struct return value have to go through this.
12181                         // Helper calls with small struct return value also have to go
12182                         // through this since they do not follow Unix calling convention.
12183                         if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12184                             op1->AsCall()->gtCallType == CT_HELPER)
12185 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12186                         {
12187                             op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12188                         }
12189                     }
12190
12191                     // If op1 is non-overflow cast, throw it away since it is useless.
12192                     // Another reason for throwing away the useless cast is in the context of
12193                     // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12194                     // The cast gets added as part of importing GT_CALL, which gets in the way
12195                     // of fgMorphCall() on the forms of tail call nodes that we assert.
12196                     if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12197                     {
12198                         op1 = op1->gtOp.gtOp1;
12199                     }
12200
12201                     // If 'op1' is an expression, create an assignment node.
12202                     // Helps analyses (like CSE) to work fine.
12203
12204                     if (op1->gtOper != GT_CALL)
12205                     {
12206                         op1 = gtUnusedValNode(op1);
12207                     }
12208
12209                     /* Append the value to the tree list */
12210                     goto SPILL_APPEND;
12211                 }
12212
12213                 /* No side effects - just throw the <BEEP> thing away */
12214             }
12215             break;
12216
12217             case CEE_DUP:
12218             {
12219                 if (tiVerificationNeeded)
12220                 {
12221                     // Dup could start the begining of delegate creation sequence, remember that
12222                     delegateCreateStart = codeAddr - 1;
12223                     impStackTop(0);
12224                 }
12225
12226                 // If the expression to dup is simple, just clone it.
12227                 // Otherwise spill it to a temp, and reload the temp
12228                 // twice.
12229                 StackEntry se = impPopStack();
12230                 tiRetVal      = se.seTypeInfo;
12231                 op1           = se.val;
12232
12233                 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12234                 {
12235                     const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12236                     impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12237                     var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12238                     op1            = gtNewLclvNode(tmpNum, type);
12239
12240                     // Propagate type info to the temp
12241                     if (type == TYP_REF)
12242                     {
12243                         lvaSetClass(tmpNum, op1, tiRetVal.GetClassHandle());
12244                     }
12245                 }
12246
12247                 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12248                                    nullptr DEBUGARG("DUP instruction"));
12249
12250                 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12251                 impPushOnStack(op1, tiRetVal);
12252                 impPushOnStack(op2, tiRetVal);
12253             }
12254             break;
12255
12256             case CEE_STIND_I1:
12257                 lclTyp = TYP_BYTE;
12258                 goto STIND;
12259             case CEE_STIND_I2:
12260                 lclTyp = TYP_SHORT;
12261                 goto STIND;
12262             case CEE_STIND_I4:
12263                 lclTyp = TYP_INT;
12264                 goto STIND;
12265             case CEE_STIND_I8:
12266                 lclTyp = TYP_LONG;
12267                 goto STIND;
12268             case CEE_STIND_I:
12269                 lclTyp = TYP_I_IMPL;
12270                 goto STIND;
12271             case CEE_STIND_REF:
12272                 lclTyp = TYP_REF;
12273                 goto STIND;
12274             case CEE_STIND_R4:
12275                 lclTyp = TYP_FLOAT;
12276                 goto STIND;
12277             case CEE_STIND_R8:
12278                 lclTyp = TYP_DOUBLE;
12279                 goto STIND;
12280             STIND:
12281
12282                 if (tiVerificationNeeded)
12283                 {
12284                     typeInfo instrType(lclTyp);
12285 #ifdef _TARGET_64BIT_
12286                     if (opcode == CEE_STIND_I)
12287                     {
12288                         instrType = typeInfo::nativeInt();
12289                     }
12290 #endif // _TARGET_64BIT_
12291                     verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12292                 }
12293                 else
12294                 {
12295                     compUnsafeCastUsed = true; // Have to go conservative
12296                 }
12297
12298             STIND_POST_VERIFY:
12299
12300                 op2 = impPopStack().val; // value to store
12301                 op1 = impPopStack().val; // address to store to
12302
12303                 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12304                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12305
12306                 impBashVarAddrsToI(op1, op2);
12307
12308                 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12309
12310 #ifdef _TARGET_64BIT_
12311                 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12312                 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12313                 {
12314                     op2->gtType = TYP_I_IMPL;
12315                 }
12316                 else
12317                 {
12318                     // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12319                     //
12320                     if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12321                     {
12322                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12323                         op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12324                     }
12325                     // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12326                     //
12327                     if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12328                     {
12329                         assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12330                         op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12331                     }
12332                 }
12333 #endif // _TARGET_64BIT_
12334
12335                 if (opcode == CEE_STIND_REF)
12336                 {
12337                     // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12338                     assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12339                     lclTyp = genActualType(op2->TypeGet());
12340                 }
12341
12342 // Check target type.
12343 #ifdef DEBUG
12344                 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12345                 {
12346                     if (op2->gtType == TYP_BYREF)
12347                     {
12348                         assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12349                     }
12350                     else if (lclTyp == TYP_BYREF)
12351                     {
12352                         assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12353                     }
12354                 }
12355                 else
12356                 {
12357                     assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12358                               ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12359                               (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12360                 }
12361 #endif
12362
12363                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12364
12365                 // stind could point anywhere, example a boxed class static int
12366                 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12367
12368                 if (prefixFlags & PREFIX_VOLATILE)
12369                 {
12370                     assert(op1->OperGet() == GT_IND);
12371                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12372                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12373                     op1->gtFlags |= GTF_IND_VOLATILE;
12374                 }
12375
12376                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12377                 {
12378                     assert(op1->OperGet() == GT_IND);
12379                     op1->gtFlags |= GTF_IND_UNALIGNED;
12380                 }
12381
12382                 op1 = gtNewAssignNode(op1, op2);
12383                 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12384
12385                 // Spill side-effects AND global-data-accesses
12386                 if (verCurrentState.esStackDepth > 0)
12387                 {
12388                     impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12389                 }
12390
12391                 goto APPEND;
12392
12393             case CEE_LDIND_I1:
12394                 lclTyp = TYP_BYTE;
12395                 goto LDIND;
12396             case CEE_LDIND_I2:
12397                 lclTyp = TYP_SHORT;
12398                 goto LDIND;
12399             case CEE_LDIND_U4:
12400             case CEE_LDIND_I4:
12401                 lclTyp = TYP_INT;
12402                 goto LDIND;
12403             case CEE_LDIND_I8:
12404                 lclTyp = TYP_LONG;
12405                 goto LDIND;
12406             case CEE_LDIND_REF:
12407                 lclTyp = TYP_REF;
12408                 goto LDIND;
12409             case CEE_LDIND_I:
12410                 lclTyp = TYP_I_IMPL;
12411                 goto LDIND;
12412             case CEE_LDIND_R4:
12413                 lclTyp = TYP_FLOAT;
12414                 goto LDIND;
12415             case CEE_LDIND_R8:
12416                 lclTyp = TYP_DOUBLE;
12417                 goto LDIND;
12418             case CEE_LDIND_U1:
12419                 lclTyp = TYP_UBYTE;
12420                 goto LDIND;
12421             case CEE_LDIND_U2:
12422                 lclTyp = TYP_CHAR;
12423                 goto LDIND;
12424             LDIND:
12425
12426                 if (tiVerificationNeeded)
12427                 {
12428                     typeInfo lclTiType(lclTyp);
12429 #ifdef _TARGET_64BIT_
12430                     if (opcode == CEE_LDIND_I)
12431                     {
12432                         lclTiType = typeInfo::nativeInt();
12433                     }
12434 #endif // _TARGET_64BIT_
12435                     tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12436                     tiRetVal.NormaliseForStack();
12437                 }
12438                 else
12439                 {
12440                     compUnsafeCastUsed = true; // Have to go conservative
12441                 }
12442
12443             LDIND_POST_VERIFY:
12444
12445                 op1 = impPopStack().val; // address to load from
12446                 impBashVarAddrsToI(op1);
12447
12448 #ifdef _TARGET_64BIT_
12449                 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12450                 //
12451                 if (genActualType(op1->gtType) == TYP_INT)
12452                 {
12453                     assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12454                     op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12455                 }
12456 #endif
12457
12458                 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12459
12460                 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12461
12462                 // ldind could point anywhere, example a boxed class static int
12463                 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12464
12465                 if (prefixFlags & PREFIX_VOLATILE)
12466                 {
12467                     assert(op1->OperGet() == GT_IND);
12468                     op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
12469                     op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12470                     op1->gtFlags |= GTF_IND_VOLATILE;
12471                 }
12472
12473                 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12474                 {
12475                     assert(op1->OperGet() == GT_IND);
12476                     op1->gtFlags |= GTF_IND_UNALIGNED;
12477                 }
12478
12479                 impPushOnStack(op1, tiRetVal);
12480
12481                 break;
12482
12483             case CEE_UNALIGNED:
12484
12485                 assert(sz == 1);
12486                 val = getU1LittleEndian(codeAddr);
12487                 ++codeAddr;
12488                 JITDUMP(" %u", val);
12489                 if ((val != 1) && (val != 2) && (val != 4))
12490                 {
12491                     BADCODE("Alignment unaligned. must be 1, 2, or 4");
12492                 }
12493
12494                 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12495                 prefixFlags |= PREFIX_UNALIGNED;
12496
12497                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12498
12499             PREFIX:
12500                 opcode     = (OPCODE)getU1LittleEndian(codeAddr);
12501                 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12502                 codeAddr += sizeof(__int8);
12503                 goto DECODE_OPCODE;
12504
12505             case CEE_VOLATILE:
12506
12507                 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12508                 prefixFlags |= PREFIX_VOLATILE;
12509
12510                 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12511
12512                 assert(sz == 0);
12513                 goto PREFIX;
12514
12515             case CEE_LDFTN:
12516             {
12517                 // Need to do a lookup here so that we perform an access check
12518                 // and do a NOWAY if protections are violated
12519                 _impResolveToken(CORINFO_TOKENKIND_Method);
12520
12521                 JITDUMP(" %08X", resolvedToken.token);
12522
12523                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12524                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12525                               &callInfo);
12526
12527                 // This check really only applies to intrinsic Array.Address methods
12528                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12529                 {
12530                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12531                 }
12532
12533                 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12534                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12535
12536                 if (tiVerificationNeeded)
12537                 {
12538                     // LDFTN could start the begining of delegate creation sequence, remember that
12539                     delegateCreateStart = codeAddr - 2;
12540
12541                     // check any constraints on the callee's class and type parameters
12542                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12543                                    "method has unsatisfied class constraints");
12544                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12545                                                                                 resolvedToken.hMethod),
12546                                    "method has unsatisfied method constraints");
12547
12548                     mflags = callInfo.verMethodFlags;
12549                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12550                 }
12551
12552             DO_LDFTN:
12553                 op1 = impMethodPointer(&resolvedToken, &callInfo);
12554                 if (compDonotInline())
12555                 {
12556                     return;
12557                 }
12558
12559                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12560                 impPushOnStack(op1, typeInfo(heapToken));
12561
12562                 break;
12563             }
12564
12565             case CEE_LDVIRTFTN:
12566             {
12567                 /* Get the method token */
12568
12569                 _impResolveToken(CORINFO_TOKENKIND_Method);
12570
12571                 JITDUMP(" %08X", resolvedToken.token);
12572
12573                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12574                               addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12575                                                     CORINFO_CALLINFO_CALLVIRT)),
12576                               &callInfo);
12577
12578                 // This check really only applies to intrinsic Array.Address methods
12579                 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12580                 {
12581                     NO_WAY("Currently do not support LDFTN of Parameterized functions");
12582                 }
12583
12584                 mflags = callInfo.methodFlags;
12585
12586                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12587
12588                 if (compIsForInlining())
12589                 {
12590                     if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12591                     {
12592                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12593                         return;
12594                     }
12595                 }
12596
12597                 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12598
12599                 if (tiVerificationNeeded)
12600                 {
12601
12602                     Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12603                     Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12604
12605                     // JIT32 verifier rejects verifiable ldvirtftn pattern
12606                     typeInfo declType =
12607                         verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12608
12609                     typeInfo arg = impStackTop().seTypeInfo;
12610                     Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12611                            "bad ldvirtftn");
12612
12613                     CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12614                     if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12615                     {
12616                         instanceClassHnd = arg.GetClassHandleForObjRef();
12617                     }
12618
12619                     // check any constraints on the method's class and type parameters
12620                     VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12621                                    "method has unsatisfied class constraints");
12622                     VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12623                                                                                 resolvedToken.hMethod),
12624                                    "method has unsatisfied method constraints");
12625
12626                     if (mflags & CORINFO_FLG_PROTECTED)
12627                     {
12628                         Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12629                                "Accessing protected method through wrong type.");
12630                     }
12631                 }
12632
12633                 /* Get the object-ref */
12634                 op1 = impPopStack().val;
12635                 assertImp(op1->gtType == TYP_REF);
12636
12637                 if (opts.IsReadyToRun())
12638                 {
12639                     if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12640                     {
12641                         if (op1->gtFlags & GTF_SIDE_EFFECT)
12642                         {
12643                             op1 = gtUnusedValNode(op1);
12644                             impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12645                         }
12646                         goto DO_LDFTN;
12647                     }
12648                 }
12649                 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12650                 {
12651                     if (op1->gtFlags & GTF_SIDE_EFFECT)
12652                     {
12653                         op1 = gtUnusedValNode(op1);
12654                         impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12655                     }
12656                     goto DO_LDFTN;
12657                 }
12658
12659                 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12660                 if (compDonotInline())
12661                 {
12662                     return;
12663                 }
12664
12665                 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12666                 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
12667                 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
12668                 impPushOnStack(fptr, typeInfo(heapToken));
12669
12670                 break;
12671             }
12672
12673             case CEE_CONSTRAINED:
12674
12675                 assertImp(sz == sizeof(unsigned));
12676                 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12677                 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12678                 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12679
12680                 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12681                 prefixFlags |= PREFIX_CONSTRAINED;
12682
12683                 {
12684                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12685                     if (actualOpcode != CEE_CALLVIRT)
12686                     {
12687                         BADCODE("constrained. has to be followed by callvirt");
12688                     }
12689                 }
12690
12691                 goto PREFIX;
12692
12693             case CEE_READONLY:
12694                 JITDUMP(" readonly.");
12695
12696                 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12697                 prefixFlags |= PREFIX_READONLY;
12698
12699                 {
12700                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12701                     if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12702                     {
12703                         BADCODE("readonly. has to be followed by ldelema or call");
12704                     }
12705                 }
12706
12707                 assert(sz == 0);
12708                 goto PREFIX;
12709
12710             case CEE_TAILCALL:
12711                 JITDUMP(" tail.");
12712
12713                 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12714                 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12715
12716                 {
12717                     OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12718                     if (!impOpcodeIsCallOpcode(actualOpcode))
12719                     {
12720                         BADCODE("tailcall. has to be followed by call, callvirt or calli");
12721                     }
12722                 }
12723                 assert(sz == 0);
12724                 goto PREFIX;
12725
12726             case CEE_NEWOBJ:
12727
12728                 /* Since we will implicitly insert newObjThisPtr at the start of the
12729                    argument list, spill any GTF_ORDER_SIDEEFF */
12730                 impSpillSpecialSideEff();
12731
12732                 /* NEWOBJ does not respond to TAIL */
12733                 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12734
12735                 /* NEWOBJ does not respond to CONSTRAINED */
12736                 prefixFlags &= ~PREFIX_CONSTRAINED;
12737
12738                 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12739
12740                 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12741                               addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12742                               &callInfo);
12743
12744                 if (compIsForInlining())
12745                 {
12746                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12747                     {
12748                         // Check to see if this call violates the boundary.
12749                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12750                         return;
12751                     }
12752                 }
12753
12754                 mflags = callInfo.methodFlags;
12755
12756                 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12757                 {
12758                     BADCODE("newobj on static or abstract method");
12759                 }
12760
12761                 // Insert the security callout before any actual code is generated
12762                 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12763
12764                 // There are three different cases for new
12765                 // Object size is variable (depends on arguments)
12766                 //      1) Object is an array (arrays treated specially by the EE)
12767                 //      2) Object is some other variable sized object (e.g. String)
12768                 //      3) Class Size can be determined beforehand (normal case)
12769                 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12770                 // in the second case we call the constructor with a '0' this pointer
12771                 // In the third case we alloc the memory, then call the constuctor
12772
12773                 clsFlags = callInfo.classFlags;
12774                 if (clsFlags & CORINFO_FLG_ARRAY)
12775                 {
12776                     if (tiVerificationNeeded)
12777                     {
12778                         CORINFO_CLASS_HANDLE elemTypeHnd;
12779                         INDEBUG(CorInfoType corType =)
12780                         info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12781                         assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12782                         Verify(elemTypeHnd == nullptr ||
12783                                    !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12784                                "newarr of byref-like objects");
12785                         verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12786                                       ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12787                                       &callInfo DEBUGARG(info.compFullName));
12788                     }
12789                     // Arrays need to call the NEWOBJ helper.
12790                     assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12791
12792                     impImportNewObjArray(&resolvedToken, &callInfo);
12793                     if (compDonotInline())
12794                     {
12795                         return;
12796                     }
12797
12798                     callTyp = TYP_REF;
12799                     break;
12800                 }
12801                 // At present this can only be String
12802                 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12803                 {
12804                     if (IsTargetAbi(CORINFO_CORERT_ABI))
12805                     {
12806                         // The dummy argument does not exist in CoreRT
12807                         newObjThisPtr = nullptr;
12808                     }
12809                     else
12810                     {
12811                         // This is the case for variable-sized objects that are not
12812                         // arrays.  In this case, call the constructor with a null 'this'
12813                         // pointer
12814                         newObjThisPtr = gtNewIconNode(0, TYP_REF);
12815                     }
12816
12817                     /* Remember that this basic block contains 'new' of an object */
12818                     block->bbFlags |= BBF_HAS_NEWOBJ;
12819                     optMethodFlags |= OMF_HAS_NEWOBJ;
12820                 }
12821                 else
12822                 {
12823                     // This is the normal case where the size of the object is
12824                     // fixed.  Allocate the memory and call the constructor.
12825
12826                     // Note: We cannot add a peep to avoid use of temp here
12827                     // becase we don't have enough interference info to detect when
12828                     // sources and destination interfere, example: s = new S(ref);
12829
12830                     // TODO: We find the correct place to introduce a general
12831                     // reverse copy prop for struct return values from newobj or
12832                     // any function returning structs.
12833
12834                     /* get a temporary for the new object */
12835                     lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12836                     if (compDonotInline())
12837                     {
12838                         // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
12839                         assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
12840                         return;
12841                     }
12842
12843                     // In the value class case we only need clsHnd for size calcs.
12844                     //
12845                     // The lookup of the code pointer will be handled by CALL in this case
12846                     if (clsFlags & CORINFO_FLG_VALUECLASS)
12847                     {
12848                         if (compIsForInlining())
12849                         {
12850                             // If value class has GC fields, inform the inliner. It may choose to
12851                             // bail out on the inline.
12852                             DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12853                             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12854                             {
12855                                 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12856                                 if (compInlineResult->IsFailure())
12857                                 {
12858                                     return;
12859                                 }
12860
12861                                 // Do further notification in the case where the call site is rare;
12862                                 // some policies do not track the relative hotness of call sites for
12863                                 // "always" inline cases.
12864                                 if (impInlineInfo->iciBlock->isRunRarely())
12865                                 {
12866                                     compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12867                                     if (compInlineResult->IsFailure())
12868                                     {
12869                                         return;
12870                                     }
12871                                 }
12872                             }
12873                         }
12874
12875                         CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12876                         unsigned    size   = info.compCompHnd->getClassSize(resolvedToken.hClass);
12877
12878                         if (impIsPrimitive(jitTyp))
12879                         {
12880                             lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12881                         }
12882                         else
12883                         {
12884                             // The local variable itself is the allocated space.
12885                             // Here we need unsafe value cls check, since the address of struct is taken for further use
12886                             // and potentially exploitable.
12887                             lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12888                         }
12889
12890                         // Append a tree to zero-out the temp
12891                         newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12892
12893                         newObjThisPtr = gtNewBlkOpNode(newObjThisPtr,    // Dest
12894                                                        gtNewIconNode(0), // Value
12895                                                        size,             // Size
12896                                                        false,            // isVolatile
12897                                                        false);           // not copyBlock
12898                         impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12899
12900                         // Obtain the address of the temp
12901                         newObjThisPtr =
12902                             gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12903                     }
12904                     else
12905                     {
12906 #ifdef FEATURE_READYTORUN_COMPILER
12907                         if (opts.IsReadyToRun())
12908                         {
12909                             op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12910                             usingReadyToRunHelper = (op1 != nullptr);
12911                         }
12912
12913                         if (!usingReadyToRunHelper)
12914 #endif
12915                         {
12916                             op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12917                             if (op1 == nullptr)
12918                             { // compDonotInline()
12919                                 return;
12920                             }
12921
12922                             // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12923                             // and the newfast call with a single call to a dynamic R2R cell that will:
12924                             //      1) Load the context
12925                             //      2) Perform the generic dictionary lookup and caching, and generate the appropriate
12926                             //      stub
12927                             //      3) Allocate and return the new object
12928                             // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12929
12930                             op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12931                                                     resolvedToken.hClass, TYP_REF, op1);
12932                         }
12933
12934                         // Remember that this basic block contains 'new' of an object
12935                         block->bbFlags |= BBF_HAS_NEWOBJ;
12936                         optMethodFlags |= OMF_HAS_NEWOBJ;
12937
12938                         // Append the assignment to the temp/local. Dont need to spill
12939                         // at all as we are just calling an EE-Jit helper which can only
12940                         // cause an (async) OutOfMemoryException.
12941
12942                         // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12943                         // to a temp. Note that the pattern "temp = allocObj" is required
12944                         // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12945                         // without exhaustive walk over all expressions.
12946
12947                         impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12948                         lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
12949
12950                         newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12951                     }
12952                 }
12953                 goto CALL;
12954
12955             case CEE_CALLI:
12956
12957                 /* CALLI does not respond to CONSTRAINED */
12958                 prefixFlags &= ~PREFIX_CONSTRAINED;
12959
12960                 if (compIsForInlining())
12961                 {
12962                     // CALLI doesn't have a method handle, so assume the worst.
12963                     if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12964                     {
12965                         compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12966                         return;
12967                     }
12968                 }
12969
12970             // fall through
12971
12972             case CEE_CALLVIRT:
12973             case CEE_CALL:
12974
12975                 // We can't call getCallInfo on the token from a CALLI, but we need it in
12976                 // many other places.  We unfortunately embed that knowledge here.
12977                 if (opcode != CEE_CALLI)
12978                 {
12979                     _impResolveToken(CORINFO_TOKENKIND_Method);
12980
12981                     eeGetCallInfo(&resolvedToken,
12982                                   (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12983                                   // this is how impImportCall invokes getCallInfo
12984                                   addVerifyFlag(
12985                                       combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12986                                               (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12987                                                                        : CORINFO_CALLINFO_NONE)),
12988                                   &callInfo);
12989                 }
12990                 else
12991                 {
12992                     // Suppress uninitialized use warning.
12993                     memset(&resolvedToken, 0, sizeof(resolvedToken));
12994                     memset(&callInfo, 0, sizeof(callInfo));
12995
12996                     resolvedToken.token = getU4LittleEndian(codeAddr);
12997                 }
12998
12999             CALL: // memberRef should be set.
13000                 // newObjThisPtr should be set for CEE_NEWOBJ
13001
13002                 JITDUMP(" %08X", resolvedToken.token);
13003                 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13004
13005                 bool newBBcreatedForTailcallStress;
13006
13007                 newBBcreatedForTailcallStress = false;
13008
13009                 if (compIsForInlining())
13010                 {
13011                     if (compDonotInline())
13012                     {
13013                         return;
13014                     }
13015                     // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13016                     assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13017                 }
13018                 else
13019                 {
13020                     if (compTailCallStress())
13021                     {
13022                         // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13023                         // Tail call stress only recognizes call+ret patterns and forces them to be
13024                         // explicit tail prefixed calls.  Also fgMakeBasicBlocks() under tail call stress
13025                         // doesn't import 'ret' opcode following the call into the basic block containing
13026                         // the call instead imports it to a new basic block.  Note that fgMakeBasicBlocks()
13027                         // is already checking that there is an opcode following call and hence it is
13028                         // safe here to read next opcode without bounds check.
13029                         newBBcreatedForTailcallStress =
13030                             impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13031                                                              // make it jump to RET.
13032                             (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13033
13034                         if (newBBcreatedForTailcallStress &&
13035                             !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13036                             verCheckTailCallConstraint(opcode, &resolvedToken,
13037                                                        constraintCall ? &constrainedResolvedToken : nullptr,
13038                                                        true) // Is it legal to do talcall?
13039                             )
13040                         {
13041                             // Stress the tailcall.
13042                             JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13043                             prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13044                         }
13045                     }
13046                 }
13047
13048                 // This is split up to avoid goto flow warnings.
13049                 bool isRecursive;
13050                 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13051
13052                 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13053                 // hence will not be considered for implicit tail calling.
13054                 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13055                 {
13056                     if (compIsForInlining())
13057                     {
13058 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13059                         // Are we inlining at an implicit tail call site? If so the we can flag
13060                         // implicit tail call sites in the inline body. These call sites
13061                         // often end up in non BBJ_RETURN blocks, so only flag them when
13062                         // we're able to handle shared returns.
13063                         if (impInlineInfo->iciCall->IsImplicitTailCall())
13064                         {
13065                             JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13066                             prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13067                         }
13068 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13069                     }
13070                     else
13071                     {
13072                         JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13073                         prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13074                     }
13075                 }
13076
13077                 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13078                 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13079                 readonlyCall     = (prefixFlags & PREFIX_READONLY) != 0;
13080
13081                 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13082                 {
13083                     // All calls and delegates need a security callout.
13084                     // For delegates, this is the call to the delegate constructor, not the access check on the
13085                     // LD(virt)FTN.
13086                     impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13087
13088 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13089
13090                 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13091                 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13092                 // ldtoken <filed token>, and we now check accessibility
13093                 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13094                     (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13095                 {
13096                     if (prevOpcode != CEE_LDTOKEN)
13097                     {
13098                         Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13099                     }
13100                     else
13101                     {
13102                         assert(lastLoadToken != NULL);
13103                         // Now that we know we have a token, verify that it is accessible for loading
13104                         CORINFO_RESOLVED_TOKEN resolvedLoadField;
13105                         impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13106                         eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13107                         impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13108                     }
13109                 }
13110
13111 #endif // DevDiv 410397
13112                 }
13113
13114                 if (tiVerificationNeeded)
13115                 {
13116                     verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13117                                   explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13118                                   &callInfo DEBUGARG(info.compFullName));
13119                 }
13120
13121                 // Insert delegate callout here.
13122                 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13123                 {
13124 #ifdef DEBUG
13125                     // We should do this only if verification is enabled
13126                     // If verification is disabled, delegateCreateStart will not be initialized correctly
13127                     if (tiVerificationNeeded)
13128                     {
13129                         mdMemberRef delegateMethodRef = mdMemberRefNil;
13130                         // We should get here only for well formed delegate creation.
13131                         assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13132                     }
13133 #endif
13134                 }
13135
13136                 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13137                                         newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13138                 if (compDonotInline())
13139                 {
13140                     return;
13141                 }
13142
13143                 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13144                                                                        // have created a new BB after the "call"
13145                 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13146                 {
13147                     assert(!compIsForInlining());
13148                     goto RET;
13149                 }
13150
13151                 break;
13152
13153             case CEE_LDFLD:
13154             case CEE_LDSFLD:
13155             case CEE_LDFLDA:
13156             case CEE_LDSFLDA:
13157             {
13158
13159                 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13160                 BOOL isLoadStatic  = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13161
13162                 /* Get the CP_Fieldref index */
13163                 assertImp(sz == sizeof(unsigned));
13164
13165                 _impResolveToken(CORINFO_TOKENKIND_Field);
13166
13167                 JITDUMP(" %08X", resolvedToken.token);
13168
13169                 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13170
13171                 GenTreePtr           obj     = nullptr;
13172                 typeInfo*            tiObj   = nullptr;
13173                 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13174
13175                 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13176                 {
13177                     tiObj         = &impStackTop().seTypeInfo;
13178                     StackEntry se = impPopStack();
13179                     objType       = se.seTypeInfo.GetClassHandle();
13180                     obj           = se.val;
13181
13182                     if (impIsThis(obj))
13183                     {
13184                         aflags |= CORINFO_ACCESS_THIS;
13185
13186                         // An optimization for Contextful classes:
13187                         // we unwrap the proxy when we have a 'this reference'
13188
13189                         if (info.compUnwrapContextful)
13190                         {
13191                             aflags |= CORINFO_ACCESS_UNWRAP;
13192                         }
13193                     }
13194                 }
13195
13196                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13197
13198                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13199                 // handle
13200                 CorInfoType ciType = fieldInfo.fieldType;
13201                 clsHnd             = fieldInfo.structType;
13202
13203                 lclTyp = JITtype2varType(ciType);
13204
13205 #ifdef _TARGET_AMD64
13206                 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13207 #endif // _TARGET_AMD64
13208
13209                 if (compIsForInlining())
13210                 {
13211                     switch (fieldInfo.fieldAccessor)
13212                     {
13213                         case CORINFO_FIELD_INSTANCE_HELPER:
13214                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13215                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13216                         case CORINFO_FIELD_STATIC_TLS:
13217
13218                             compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13219                             return;
13220
13221                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13222                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13223                             /* We may be able to inline the field accessors in specific instantiations of generic
13224                              * methods */
13225                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13226                             return;
13227
13228                         default:
13229                             break;
13230                     }
13231
13232                     if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13233                         clsHnd)
13234                     {
13235                         if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13236                             !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13237                         {
13238                             // Loading a static valuetype field usually will cause a JitHelper to be called
13239                             // for the static base. This will bloat the code.
13240                             compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13241
13242                             if (compInlineResult->IsFailure())
13243                             {
13244                                 return;
13245                             }
13246                         }
13247                     }
13248                 }
13249
13250                 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13251                 if (isLoadAddress)
13252                 {
13253                     tiRetVal.MakeByRef();
13254                 }
13255                 else
13256                 {
13257                     tiRetVal.NormaliseForStack();
13258                 }
13259
13260                 // Perform this check always to ensure that we get field access exceptions even with
13261                 // SkipVerification.
13262                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13263
13264                 if (tiVerificationNeeded)
13265                 {
13266                     // You can also pass the unboxed struct to  LDFLD
13267                     BOOL bAllowPlainValueTypeAsThis = FALSE;
13268                     if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13269                     {
13270                         bAllowPlainValueTypeAsThis = TRUE;
13271                     }
13272
13273                     verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13274
13275                     // If we're doing this on a heap object or from a 'safe' byref
13276                     // then the result is a safe byref too
13277                     if (isLoadAddress) // load address
13278                     {
13279                         if (fieldInfo.fieldFlags &
13280                             CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13281                         {
13282                             if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13283                             {
13284                                 tiRetVal.SetIsPermanentHomeByRef();
13285                             }
13286                         }
13287                         else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13288                         {
13289                             // ldflda of byref is safe if done on a gc object or on  a
13290                             // safe byref
13291                             tiRetVal.SetIsPermanentHomeByRef();
13292                         }
13293                     }
13294                 }
13295                 else
13296                 {
13297                     // tiVerificationNeeded is false.
13298                     // Raise InvalidProgramException if static load accesses non-static field
13299                     if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13300                     {
13301                         BADCODE("static access on an instance field");
13302                     }
13303                 }
13304
13305                 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13306                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13307                 {
13308                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13309                     {
13310                         obj = gtUnusedValNode(obj);
13311                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13312                     }
13313                     obj = nullptr;
13314                 }
13315
13316                 /* Preserve 'small' int types */
13317                 if (lclTyp > TYP_INT)
13318                 {
13319                     lclTyp = genActualType(lclTyp);
13320                 }
13321
13322                 bool usesHelper = false;
13323
13324                 switch (fieldInfo.fieldAccessor)
13325                 {
13326                     case CORINFO_FIELD_INSTANCE:
13327 #ifdef FEATURE_READYTORUN_COMPILER
13328                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13329 #endif
13330                     {
13331                         bool nullcheckNeeded = false;
13332
13333                         obj = impCheckForNullPointer(obj);
13334
13335                         if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13336                         {
13337                             nullcheckNeeded = true;
13338                         }
13339
13340                         // If the object is a struct, what we really want is
13341                         // for the field to operate on the address of the struct.
13342                         if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13343                         {
13344                             assert(opcode == CEE_LDFLD && objType != nullptr);
13345
13346                             obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13347                         }
13348
13349                         /* Create the data member node */
13350                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13351
13352 #ifdef FEATURE_READYTORUN_COMPILER
13353                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13354                         {
13355                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13356                         }
13357 #endif
13358
13359                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13360
13361                         if (fgAddrCouldBeNull(obj))
13362                         {
13363                             op1->gtFlags |= GTF_EXCEPT;
13364                         }
13365
13366                         // If gtFldObj is a BYREF then our target is a value class and
13367                         // it could point anywhere, example a boxed class static int
13368                         if (obj->gtType == TYP_BYREF)
13369                         {
13370                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13371                         }
13372
13373                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13374                         if (StructHasOverlappingFields(typeFlags))
13375                         {
13376                             op1->gtField.gtFldMayOverlap = true;
13377                         }
13378
13379                         // wrap it in a address of operator if necessary
13380                         if (isLoadAddress)
13381                         {
13382                             op1 = gtNewOperNode(GT_ADDR,
13383                                                 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13384                         }
13385                         else
13386                         {
13387                             if (compIsForInlining() &&
13388                                 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13389                                                                                    impInlineInfo->inlArgInfo))
13390                             {
13391                                 impInlineInfo->thisDereferencedFirst = true;
13392                             }
13393                         }
13394                     }
13395                     break;
13396
13397                     case CORINFO_FIELD_STATIC_TLS:
13398 #ifdef _TARGET_X86_
13399                         // Legacy TLS access is implemented as intrinsic on x86 only
13400
13401                         /* Create the data member node */
13402                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13403                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13404
13405                         if (isLoadAddress)
13406                         {
13407                             op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13408                         }
13409                         break;
13410 #else
13411                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13412
13413                         __fallthrough;
13414 #endif
13415
13416                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13417                     case CORINFO_FIELD_INSTANCE_HELPER:
13418                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13419                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13420                                                clsHnd, nullptr);
13421                         usesHelper = true;
13422                         break;
13423
13424                     case CORINFO_FIELD_STATIC_ADDRESS:
13425                         // Replace static read-only fields with constant if possible
13426                         if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13427                             !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13428                             (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13429                         {
13430                             CorInfoInitClassResult initClassResult =
13431                                 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13432                                                             impTokenLookupContextHandle);
13433
13434                             if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13435                             {
13436                                 void** pFldAddr = nullptr;
13437                                 void*  fldAddr =
13438                                     info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13439
13440                                 // We should always be able to access this static's address directly
13441                                 assert(pFldAddr == nullptr);
13442
13443                                 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13444                                 goto FIELD_DONE;
13445                             }
13446                         }
13447
13448                         __fallthrough;
13449
13450                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13451                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13452                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13453                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13454                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13455                                                          lclTyp);
13456                         break;
13457
13458                     case CORINFO_FIELD_INTRINSIC_ZERO:
13459                     {
13460                         assert(aflags & CORINFO_ACCESS_GET);
13461                         op1 = gtNewIconNode(0, lclTyp);
13462                         goto FIELD_DONE;
13463                     }
13464                     break;
13465
13466                     case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13467                     {
13468                         assert(aflags & CORINFO_ACCESS_GET);
13469
13470                         LPVOID         pValue;
13471                         InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13472                         op1                = gtNewStringLiteralNode(iat, pValue);
13473                         goto FIELD_DONE;
13474                     }
13475                     break;
13476
13477                     case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13478                     {
13479                         assert(aflags & CORINFO_ACCESS_GET);
13480 #if BIGENDIAN
13481                         op1 = gtNewIconNode(0, lclTyp);
13482 #else
13483                         op1                     = gtNewIconNode(1, lclTyp);
13484 #endif
13485                         goto FIELD_DONE;
13486                     }
13487                     break;
13488
13489                     default:
13490                         assert(!"Unexpected fieldAccessor");
13491                 }
13492
13493                 if (!isLoadAddress)
13494                 {
13495
13496                     if (prefixFlags & PREFIX_VOLATILE)
13497                     {
13498                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13499                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13500
13501                         if (!usesHelper)
13502                         {
13503                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13504                                    (op1->OperGet() == GT_OBJ));
13505                             op1->gtFlags |= GTF_IND_VOLATILE;
13506                         }
13507                     }
13508
13509                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13510                     {
13511                         if (!usesHelper)
13512                         {
13513                             assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13514                                    (op1->OperGet() == GT_OBJ));
13515                             op1->gtFlags |= GTF_IND_UNALIGNED;
13516                         }
13517                     }
13518                 }
13519
13520                 /* Check if the class needs explicit initialization */
13521
13522                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13523                 {
13524                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13525                     if (compDonotInline())
13526                     {
13527                         return;
13528                     }
13529                     if (helperNode != nullptr)
13530                     {
13531                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13532                     }
13533                 }
13534
13535             FIELD_DONE:
13536                 impPushOnStack(op1, tiRetVal);
13537             }
13538             break;
13539
13540             case CEE_STFLD:
13541             case CEE_STSFLD:
13542             {
13543
13544                 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13545
13546                 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13547
13548                 /* Get the CP_Fieldref index */
13549
13550                 assertImp(sz == sizeof(unsigned));
13551
13552                 _impResolveToken(CORINFO_TOKENKIND_Field);
13553
13554                 JITDUMP(" %08X", resolvedToken.token);
13555
13556                 int        aflags = CORINFO_ACCESS_SET;
13557                 GenTreePtr obj    = nullptr;
13558                 typeInfo*  tiObj  = nullptr;
13559                 typeInfo   tiVal;
13560
13561                 /* Pull the value from the stack */
13562                 StackEntry se = impPopStack();
13563                 op2           = se.val;
13564                 tiVal         = se.seTypeInfo;
13565                 clsHnd        = tiVal.GetClassHandle();
13566
13567                 if (opcode == CEE_STFLD)
13568                 {
13569                     tiObj = &impStackTop().seTypeInfo;
13570                     obj   = impPopStack().val;
13571
13572                     if (impIsThis(obj))
13573                     {
13574                         aflags |= CORINFO_ACCESS_THIS;
13575
13576                         // An optimization for Contextful classes:
13577                         // we unwrap the proxy when we have a 'this reference'
13578
13579                         if (info.compUnwrapContextful)
13580                         {
13581                             aflags |= CORINFO_ACCESS_UNWRAP;
13582                         }
13583                     }
13584                 }
13585
13586                 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13587
13588                 // Figure out the type of the member.  We always call canAccessField, so you always need this
13589                 // handle
13590                 CorInfoType ciType = fieldInfo.fieldType;
13591                 fieldClsHnd        = fieldInfo.structType;
13592
13593                 lclTyp = JITtype2varType(ciType);
13594
13595                 if (compIsForInlining())
13596                 {
13597                     /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13598                      * per-inst static? */
13599
13600                     switch (fieldInfo.fieldAccessor)
13601                     {
13602                         case CORINFO_FIELD_INSTANCE_HELPER:
13603                         case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13604                         case CORINFO_FIELD_STATIC_ADDR_HELPER:
13605                         case CORINFO_FIELD_STATIC_TLS:
13606
13607                             compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13608                             return;
13609
13610                         case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13611                         case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13612                             /* We may be able to inline the field accessors in specific instantiations of generic
13613                              * methods */
13614                             compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13615                             return;
13616
13617                         default:
13618                             break;
13619                     }
13620                 }
13621
13622                 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13623
13624                 if (tiVerificationNeeded)
13625                 {
13626                     verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13627                     typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13628                     Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13629                 }
13630                 else
13631                 {
13632                     // tiVerificationNeed is false.
13633                     // Raise InvalidProgramException if static store accesses non-static field
13634                     if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13635                     {
13636                         BADCODE("static access on an instance field");
13637                     }
13638                 }
13639
13640                 // We are using stfld on a static field.
13641                 // We allow it, but need to eval any side-effects for obj
13642                 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13643                 {
13644                     if (obj->gtFlags & GTF_SIDE_EFFECT)
13645                     {
13646                         obj = gtUnusedValNode(obj);
13647                         impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13648                     }
13649                     obj = nullptr;
13650                 }
13651
13652                 /* Preserve 'small' int types */
13653                 if (lclTyp > TYP_INT)
13654                 {
13655                     lclTyp = genActualType(lclTyp);
13656                 }
13657
13658                 switch (fieldInfo.fieldAccessor)
13659                 {
13660                     case CORINFO_FIELD_INSTANCE:
13661 #ifdef FEATURE_READYTORUN_COMPILER
13662                     case CORINFO_FIELD_INSTANCE_WITH_BASE:
13663 #endif
13664                     {
13665                         obj = impCheckForNullPointer(obj);
13666
13667                         /* Create the data member node */
13668                         op1             = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13669                         DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13670                         if (StructHasOverlappingFields(typeFlags))
13671                         {
13672                             op1->gtField.gtFldMayOverlap = true;
13673                         }
13674
13675 #ifdef FEATURE_READYTORUN_COMPILER
13676                         if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13677                         {
13678                             op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13679                         }
13680 #endif
13681
13682                         op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13683
13684                         if (fgAddrCouldBeNull(obj))
13685                         {
13686                             op1->gtFlags |= GTF_EXCEPT;
13687                         }
13688
13689                         // If gtFldObj is a BYREF then our target is a value class and
13690                         // it could point anywhere, example a boxed class static int
13691                         if (obj->gtType == TYP_BYREF)
13692                         {
13693                             op1->gtFlags |= GTF_IND_TGTANYWHERE;
13694                         }
13695
13696                         if (compIsForInlining() &&
13697                             impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13698                         {
13699                             impInlineInfo->thisDereferencedFirst = true;
13700                         }
13701                     }
13702                     break;
13703
13704                     case CORINFO_FIELD_STATIC_TLS:
13705 #ifdef _TARGET_X86_
13706                         // Legacy TLS access is implemented as intrinsic on x86 only
13707
13708                         /* Create the data member node */
13709                         op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13710                         op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13711
13712                         break;
13713 #else
13714                         fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13715
13716                         __fallthrough;
13717 #endif
13718
13719                     case CORINFO_FIELD_STATIC_ADDR_HELPER:
13720                     case CORINFO_FIELD_INSTANCE_HELPER:
13721                     case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13722                         op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13723                                                clsHnd, op2);
13724                         goto SPILL_APPEND;
13725
13726                     case CORINFO_FIELD_STATIC_ADDRESS:
13727                     case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13728                     case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13729                     case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13730                     case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13731                         op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13732                                                          lclTyp);
13733                         break;
13734
13735                     default:
13736                         assert(!"Unexpected fieldAccessor");
13737                 }
13738
13739                 // Create the member assignment, unless we have a struct.
13740                 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13741                 bool deferStructAssign = varTypeIsStruct(lclTyp);
13742
13743                 if (!deferStructAssign)
13744                 {
13745                     if (prefixFlags & PREFIX_VOLATILE)
13746                     {
13747                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13748                         op1->gtFlags |= GTF_DONT_CSE;      // Can't CSE a volatile
13749                         op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13750                         op1->gtFlags |= GTF_IND_VOLATILE;
13751                     }
13752                     if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13753                     {
13754                         assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13755                         op1->gtFlags |= GTF_IND_UNALIGNED;
13756                     }
13757
13758                     /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13759                        trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13760                        importation and reads from the union as if it were a long during code generation. Though this
13761                        can potentially read garbage, one can get lucky to have this working correctly.
13762
13763                        This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13764                        /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
13765                        dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
13766                        it works correctly always.
13767
13768                        Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
13769                        for V4.0.
13770                     */
13771                     CLANG_FORMAT_COMMENT_ANCHOR;
13772
13773 #ifndef _TARGET_64BIT_
13774                     // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
13775                     // generated for ARM as well as x86, so the following IR will be accepted:
13776                     //     *  STMT      void
13777                     //         |  /--*  CNS_INT   int    2
13778                     //         \--*  ASG       long
13779                     //            \--*  CLS_VAR   long
13780
13781                     if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13782                         varTypeIsLong(op1->TypeGet()))
13783                     {
13784                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13785                     }
13786 #endif
13787
13788 #ifdef _TARGET_64BIT_
13789                     // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13790                     if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13791                     {
13792                         op2->gtType = TYP_I_IMPL;
13793                     }
13794                     else
13795                     {
13796                         // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13797                         //
13798                         if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13799                         {
13800                             op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13801                         }
13802                         // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13803                         //
13804                         if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13805                         {
13806                             op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13807                         }
13808                     }
13809 #endif
13810
13811 #if !FEATURE_X87_DOUBLES
13812                     // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13813                     // We insert a cast to the dest 'op1' type
13814                     //
13815                     if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13816                         varTypeIsFloating(op2->gtType))
13817                     {
13818                         op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13819                     }
13820 #endif // !FEATURE_X87_DOUBLES
13821
13822                     op1 = gtNewAssignNode(op1, op2);
13823
13824                     /* Mark the expression as containing an assignment */
13825
13826                     op1->gtFlags |= GTF_ASG;
13827                 }
13828
13829                 /* Check if the class needs explicit initialization */
13830
13831                 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13832                 {
13833                     GenTreePtr helperNode = impInitClass(&resolvedToken);
13834                     if (compDonotInline())
13835                     {
13836                         return;
13837                     }
13838                     if (helperNode != nullptr)
13839                     {
13840                         op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13841                     }
13842                 }
13843
13844                 /* stfld can interfere with value classes (consider the sequence
13845                    ldloc, ldloca, ..., stfld, stloc).  We will be conservative and
13846                    spill all value class references from the stack. */
13847
13848                 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13849                 {
13850                     assert(tiObj);
13851
13852                     if (impIsValueType(tiObj))
13853                     {
13854                         impSpillEvalStack();
13855                     }
13856                     else
13857                     {
13858                         impSpillValueClasses();
13859                     }
13860                 }
13861
13862                 /* Spill any refs to the same member from the stack */
13863
13864                 impSpillLclRefs((ssize_t)resolvedToken.hField);
13865
13866                 /* stsfld also interferes with indirect accesses (for aliased
13867                    statics) and calls. But don't need to spill other statics
13868                    as we have explicitly spilled this particular static field. */
13869
13870                 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13871
13872                 if (deferStructAssign)
13873                 {
13874                     op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13875                 }
13876             }
13877                 goto APPEND;
13878
13879             case CEE_NEWARR:
13880             {
13881
13882                 /* Get the class type index operand */
13883
13884                 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13885
13886                 JITDUMP(" %08X", resolvedToken.token);
13887
13888                 if (!opts.IsReadyToRun())
13889                 {
13890                     // Need to restore array classes before creating array objects on the heap
13891                     op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13892                     if (op1 == nullptr)
13893                     { // compDonotInline()
13894                         return;
13895                     }
13896                 }
13897
13898                 if (tiVerificationNeeded)
13899                 {
13900                     // As per ECMA 'numElems' specified can be either int32 or native int.
13901                     Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13902
13903                     CORINFO_CLASS_HANDLE elemTypeHnd;
13904                     info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13905                     Verify(elemTypeHnd == nullptr ||
13906                                !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13907                            "array of byref-like type");
13908                 }
13909
13910                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13911
13912                 accessAllowedResult =
13913                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13914                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13915
13916                 /* Form the arglist: array class handle, size */
13917                 op2 = impPopStack().val;
13918                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13919
13920 #ifdef FEATURE_READYTORUN_COMPILER
13921                 if (opts.IsReadyToRun())
13922                 {
13923                     op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13924                                                     gtNewArgList(op2));
13925                     usingReadyToRunHelper = (op1 != nullptr);
13926
13927                     if (!usingReadyToRunHelper)
13928                     {
13929                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13930                         // and the newarr call with a single call to a dynamic R2R cell that will:
13931                         //      1) Load the context
13932                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13933                         //      3) Allocate the new array
13934                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13935
13936                         // Need to restore array classes before creating array objects on the heap
13937                         op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13938                         if (op1 == nullptr)
13939                         { // compDonotInline()
13940                             return;
13941                         }
13942                     }
13943                 }
13944
13945                 if (!usingReadyToRunHelper)
13946 #endif
13947                 {
13948                     args = gtNewArgList(op1, op2);
13949
13950                     /* Create a call to 'new' */
13951
13952                     // Note that this only works for shared generic code because the same helper is used for all
13953                     // reference array types
13954                     op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
13955                 }
13956
13957                 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13958
13959                 /* Remember that this basic block contains 'new' of an sd array */
13960
13961                 block->bbFlags |= BBF_HAS_NEWARRAY;
13962                 optMethodFlags |= OMF_HAS_NEWARRAY;
13963
13964                 /* Push the result of the call on the stack */
13965
13966                 impPushOnStack(op1, tiRetVal);
13967
13968                 callTyp = TYP_REF;
13969             }
13970             break;
13971
13972             case CEE_LOCALLOC:
13973                 assert(!compIsForInlining());
13974
13975                 if (tiVerificationNeeded)
13976                 {
13977                     Verify(false, "bad opcode");
13978                 }
13979
13980                 // We don't allow locallocs inside handlers
13981                 if (block->hasHndIndex())
13982                 {
13983                     BADCODE("Localloc can't be inside handler");
13984                 }
13985
13986                 /* The FP register may not be back to the original value at the end
13987                    of the method, even if the frame size is 0, as localloc may
13988                    have modified it. So we will HAVE to reset it */
13989
13990                 compLocallocUsed = true;
13991                 setNeedsGSSecurityCookie();
13992
13993                 // Get the size to allocate
13994
13995                 op2 = impPopStack().val;
13996                 assertImp(genActualTypeIsIntOrI(op2->gtType));
13997
13998                 if (verCurrentState.esStackDepth != 0)
13999                 {
14000                     BADCODE("Localloc can only be used when the stack is empty");
14001                 }
14002
14003                 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14004
14005                 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14006
14007                 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14008
14009                 impPushOnStack(op1, tiRetVal);
14010                 break;
14011
14012             case CEE_ISINST:
14013
14014                 /* Get the type token */
14015                 assertImp(sz == sizeof(unsigned));
14016
14017                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14018
14019                 JITDUMP(" %08X", resolvedToken.token);
14020
14021                 if (!opts.IsReadyToRun())
14022                 {
14023                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14024                     if (op2 == nullptr)
14025                     { // compDonotInline()
14026                         return;
14027                     }
14028                 }
14029
14030                 if (tiVerificationNeeded)
14031                 {
14032                     Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14033                     // Even if this is a value class, we know it is boxed.
14034                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14035                 }
14036                 accessAllowedResult =
14037                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14038                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14039
14040                 op1 = impPopStack().val;
14041
14042 #ifdef FEATURE_READYTORUN_COMPILER
14043                 if (opts.IsReadyToRun())
14044                 {
14045                     GenTreeCall* opLookup =
14046                         impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14047                                                   gtNewArgList(op1));
14048                     usingReadyToRunHelper = (opLookup != nullptr);
14049                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14050
14051                     if (!usingReadyToRunHelper)
14052                     {
14053                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14054                         // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14055                         //      1) Load the context
14056                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14057                         //      3) Perform the 'is instance' check on the input object
14058                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14059
14060                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14061                         if (op2 == nullptr)
14062                         { // compDonotInline()
14063                             return;
14064                         }
14065                     }
14066                 }
14067
14068                 if (!usingReadyToRunHelper)
14069 #endif
14070                 {
14071                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14072                 }
14073                 if (compDonotInline())
14074                 {
14075                     return;
14076                 }
14077
14078                 impPushOnStack(op1, tiRetVal);
14079
14080                 break;
14081
14082             case CEE_REFANYVAL:
14083
14084                 // get the class handle and make a ICON node out of it
14085
14086                 _impResolveToken(CORINFO_TOKENKIND_Class);
14087
14088                 JITDUMP(" %08X", resolvedToken.token);
14089
14090                 op2 = impTokenToHandle(&resolvedToken);
14091                 if (op2 == nullptr)
14092                 { // compDonotInline()
14093                     return;
14094                 }
14095
14096                 if (tiVerificationNeeded)
14097                 {
14098                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14099                            "need refany");
14100                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14101                 }
14102
14103                 op1 = impPopStack().val;
14104                 // make certain it is normalized;
14105                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14106
14107                 // Call helper GETREFANY(classHandle, op1);
14108                 args = gtNewArgList(op2, op1);
14109                 op1  = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14110
14111                 impPushOnStack(op1, tiRetVal);
14112                 break;
14113
14114             case CEE_REFANYTYPE:
14115
14116                 if (tiVerificationNeeded)
14117                 {
14118                     Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14119                            "need refany");
14120                 }
14121
14122                 op1 = impPopStack().val;
14123
14124                 // make certain it is normalized;
14125                 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14126
14127                 if (op1->gtOper == GT_OBJ)
14128                 {
14129                     // Get the address of the refany
14130                     op1 = op1->gtOp.gtOp1;
14131
14132                     // Fetch the type from the correct slot
14133                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14134                                         gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14135                     op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14136                 }
14137                 else
14138                 {
14139                     assertImp(op1->gtOper == GT_MKREFANY);
14140
14141                     // The pointer may have side-effects
14142                     if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14143                     {
14144                         impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14145 #ifdef DEBUG
14146                         impNoteLastILoffs();
14147 #endif
14148                     }
14149
14150                     // We already have the class handle
14151                     op1 = op1->gtOp.gtOp2;
14152                 }
14153
14154                 // convert native TypeHandle to RuntimeTypeHandle
14155                 {
14156                     GenTreeArgList* helperArgs = gtNewArgList(op1);
14157
14158                     op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14159
14160                     // The handle struct is returned in register
14161                     op1->gtCall.gtReturnType = TYP_REF;
14162
14163                     tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14164                 }
14165
14166                 impPushOnStack(op1, tiRetVal);
14167                 break;
14168
14169             case CEE_LDTOKEN:
14170             {
14171                 /* Get the Class index */
14172                 assertImp(sz == sizeof(unsigned));
14173                 lastLoadToken = codeAddr;
14174                 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14175
14176                 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14177
14178                 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14179                 if (op1 == nullptr)
14180                 { // compDonotInline()
14181                     return;
14182                 }
14183
14184                 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14185                 assert(resolvedToken.hClass != nullptr);
14186
14187                 if (resolvedToken.hMethod != nullptr)
14188                 {
14189                     helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14190                 }
14191                 else if (resolvedToken.hField != nullptr)
14192                 {
14193                     helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14194                 }
14195
14196                 GenTreeArgList* helperArgs = gtNewArgList(op1);
14197
14198                 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14199
14200                 // The handle struct is returned in register
14201                 op1->gtCall.gtReturnType = TYP_REF;
14202
14203                 tiRetVal = verMakeTypeInfo(tokenType);
14204                 impPushOnStack(op1, tiRetVal);
14205             }
14206             break;
14207
14208             case CEE_UNBOX:
14209             case CEE_UNBOX_ANY:
14210             {
14211                 /* Get the Class index */
14212                 assertImp(sz == sizeof(unsigned));
14213
14214                 _impResolveToken(CORINFO_TOKENKIND_Class);
14215
14216                 JITDUMP(" %08X", resolvedToken.token);
14217
14218                 BOOL runtimeLookup;
14219                 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14220                 if (op2 == nullptr)
14221                 {
14222                     assert(compDonotInline());
14223                     return;
14224                 }
14225
14226                 // Run this always so we can get access exceptions even with SkipVerification.
14227                 accessAllowedResult =
14228                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14229                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14230
14231                 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14232                 {
14233                     if (tiVerificationNeeded)
14234                     {
14235                         typeInfo tiUnbox = impStackTop().seTypeInfo;
14236                         Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14237                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14238                         tiRetVal.NormaliseForStack();
14239                     }
14240                     JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14241                     op1 = impPopStack().val;
14242                     goto CASTCLASS;
14243                 }
14244
14245                 /* Pop the object and create the unbox helper call */
14246                 /* You might think that for UNBOX_ANY we need to push a different */
14247                 /* (non-byref) type, but here we're making the tiRetVal that is used */
14248                 /* for the intermediate pointer which we then transfer onto the OBJ */
14249                 /* instruction.  OBJ then creates the appropriate tiRetVal. */
14250                 if (tiVerificationNeeded)
14251                 {
14252                     typeInfo tiUnbox = impStackTop().seTypeInfo;
14253                     Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14254
14255                     tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14256                     Verify(tiRetVal.IsValueClass(), "not value class");
14257                     tiRetVal.MakeByRef();
14258
14259                     // We always come from an objref, so this is safe byref
14260                     tiRetVal.SetIsPermanentHomeByRef();
14261                     tiRetVal.SetIsReadonlyByRef();
14262                 }
14263
14264                 op1 = impPopStack().val;
14265                 assertImp(op1->gtType == TYP_REF);
14266
14267                 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14268                 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14269
14270                 // Check legality and profitability of inline expansion for unboxing.
14271                 const bool canExpandInline    = (helper == CORINFO_HELP_UNBOX);
14272                 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14273
14274                 if (canExpandInline && shouldExpandInline)
14275                 {
14276                     JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14277                     // we are doing normal unboxing
14278                     // inline the common case of the unbox helper
14279                     // UNBOX(exp) morphs into
14280                     // clone = pop(exp);
14281                     // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14282                     // push(clone + sizeof(void*))
14283                     //
14284                     GenTreePtr cloneOperand;
14285                     op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14286                                        nullptr DEBUGARG("inline UNBOX clone1"));
14287                     op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14288
14289                     GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14290
14291                     op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14292                                        nullptr DEBUGARG("inline UNBOX clone2"));
14293                     op2 = impTokenToHandle(&resolvedToken);
14294                     if (op2 == nullptr)
14295                     { // compDonotInline()
14296                         return;
14297                     }
14298                     args = gtNewArgList(op2, op1);
14299                     op1  = gtNewHelperCallNode(helper, TYP_VOID, args);
14300
14301                     op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14302                     op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14303                     condBox->gtFlags |= GTF_RELOP_QMARK;
14304
14305                     // QMARK nodes cannot reside on the evaluation stack. Because there
14306                     // may be other trees on the evaluation stack that side-effect the
14307                     // sources of the UNBOX operation we must spill the stack.
14308
14309                     impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14310
14311                     // Create the address-expression to reference past the object header
14312                     // to the beginning of the value-type. Today this means adjusting
14313                     // past the base of the objects vtable field which is pointer sized.
14314
14315                     op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14316                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14317                 }
14318                 else
14319                 {
14320                     JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14321                             canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14322
14323                     // Don't optimize, just call the helper and be done with it
14324                     args = gtNewArgList(op2, op1);
14325                     op1 =
14326                         gtNewHelperCallNode(helper,
14327                                             (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14328                 }
14329
14330                 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14331                        helper == CORINFO_HELP_UNBOX_NULLABLE &&
14332                            varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14333                        );
14334
14335                 /*
14336                   ----------------------------------------------------------------------
14337                   | \ helper  |                         |                              |
14338                   |   \       |                         |                              |
14339                   |     \     | CORINFO_HELP_UNBOX      | CORINFO_HELP_UNBOX_NULLABLE  |
14340                   |       \   | (which returns a BYREF) | (which returns a STRUCT)     |                              |
14341                   | opcode  \ |                         |                              |
14342                   |---------------------------------------------------------------------
14343                   | UNBOX     | push the BYREF          | spill the STRUCT to a local, |
14344                   |           |                         | push the BYREF to this local |
14345                   |---------------------------------------------------------------------
14346                   | UNBOX_ANY | push a GT_OBJ of        | push the STRUCT              |
14347                   |           | the BYREF               | For Linux when the           |
14348                   |           |                         |  struct is returned in two   |
14349                   |           |                         |  registers create a temp     |
14350                   |           |                         |  which address is passed to  |
14351                   |           |                         |  the unbox_nullable helper.  |
14352                   |---------------------------------------------------------------------
14353                 */
14354
14355                 if (opcode == CEE_UNBOX)
14356                 {
14357                     if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14358                     {
14359                         // Unbox nullable helper returns a struct type.
14360                         // We need to spill it to a temp so than can take the address of it.
14361                         // Here we need unsafe value cls check, since the address of struct is taken to be used
14362                         // further along and potetially be exploitable.
14363
14364                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14365                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14366
14367                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14368                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14369                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14370
14371                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14372                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14373                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14374                     }
14375
14376                     assert(op1->gtType == TYP_BYREF);
14377                     assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14378                 }
14379                 else
14380                 {
14381                     assert(opcode == CEE_UNBOX_ANY);
14382
14383                     if (helper == CORINFO_HELP_UNBOX)
14384                     {
14385                         // Normal unbox helper returns a TYP_BYREF.
14386                         impPushOnStack(op1, tiRetVal);
14387                         oper = GT_OBJ;
14388                         goto OBJ;
14389                     }
14390
14391                     assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14392
14393 #if FEATURE_MULTIREG_RET
14394
14395                     if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14396                     {
14397                         // Unbox nullable helper returns a TYP_STRUCT.
14398                         // For the multi-reg case we need to spill it to a temp so that
14399                         // we can pass the address to the unbox_nullable jit helper.
14400
14401                         unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14402                         lvaTable[tmp].lvIsMultiRegArg = true;
14403                         lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14404
14405                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14406                         op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14407                         assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14408
14409                         op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14410                         op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14411                         op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14412
14413                         // In this case the return value of the unbox helper is TYP_BYREF.
14414                         // Make sure the right type is placed on the operand type stack.
14415                         impPushOnStack(op1, tiRetVal);
14416
14417                         // Load the struct.
14418                         oper = GT_OBJ;
14419
14420                         assert(op1->gtType == TYP_BYREF);
14421                         assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14422
14423                         goto OBJ;
14424                     }
14425                     else
14426
14427 #endif // !FEATURE_MULTIREG_RET
14428
14429                     {
14430                         // If non register passable struct we have it materialized in the RetBuf.
14431                         assert(op1->gtType == TYP_STRUCT);
14432                         tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14433                         assert(tiRetVal.IsValueClass());
14434                     }
14435                 }
14436
14437                 impPushOnStack(op1, tiRetVal);
14438             }
14439             break;
14440
14441             case CEE_BOX:
14442             {
14443                 /* Get the Class index */
14444                 assertImp(sz == sizeof(unsigned));
14445
14446                 _impResolveToken(CORINFO_TOKENKIND_Box);
14447
14448                 JITDUMP(" %08X", resolvedToken.token);
14449
14450                 if (tiVerificationNeeded)
14451                 {
14452                     typeInfo tiActual = impStackTop().seTypeInfo;
14453                     typeInfo tiBox    = verMakeTypeInfo(resolvedToken.hClass);
14454
14455                     Verify(verIsBoxable(tiBox), "boxable type expected");
14456
14457                     // check the class constraints of the boxed type in case we are boxing an uninitialized value
14458                     Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14459                            "boxed type has unsatisfied class constraints");
14460
14461                     Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14462
14463                     // Observation: the following code introduces a boxed value class on the stack, but,
14464                     // according to the ECMA spec, one would simply expect: tiRetVal =
14465                     // typeInfo(TI_REF,impGetObjectClass());
14466
14467                     // Push the result back on the stack,
14468                     // even if clsHnd is a value class we want the TI_REF
14469                     // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14470                     tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14471                 }
14472
14473                 accessAllowedResult =
14474                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14475                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14476
14477                 // Note BOX can be used on things that are not value classes, in which
14478                 // case we get a NOP.  However the verifier's view of the type on the
14479                 // stack changes (in generic code a 'T' becomes a 'boxed T')
14480                 if (!eeIsValueClass(resolvedToken.hClass))
14481                 {
14482                     JITDUMP("\n Importing BOX(refClass) as NOP\n");
14483                     verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14484                     break;
14485                 }
14486
14487                 // Look ahead for unbox.any
14488                 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14489                 {
14490                     DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14491                     if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14492                     {
14493                         CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14494
14495                         impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14496
14497                         if (unboxResolvedToken.hClass == resolvedToken.hClass)
14498                         {
14499                             JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14500                             // Skip the next unbox.any instruction
14501                             sz += sizeof(mdToken) + 1;
14502                             break;
14503                         }
14504                     }
14505                 }
14506
14507                 impImportAndPushBox(&resolvedToken);
14508                 if (compDonotInline())
14509                 {
14510                     return;
14511                 }
14512             }
14513             break;
14514
14515             case CEE_SIZEOF:
14516
14517                 /* Get the Class index */
14518                 assertImp(sz == sizeof(unsigned));
14519
14520                 _impResolveToken(CORINFO_TOKENKIND_Class);
14521
14522                 JITDUMP(" %08X", resolvedToken.token);
14523
14524                 if (tiVerificationNeeded)
14525                 {
14526                     tiRetVal = typeInfo(TI_INT);
14527                 }
14528
14529                 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14530                 impPushOnStack(op1, tiRetVal);
14531                 break;
14532
14533             case CEE_CASTCLASS:
14534
14535                 /* Get the Class index */
14536
14537                 assertImp(sz == sizeof(unsigned));
14538
14539                 _impResolveToken(CORINFO_TOKENKIND_Casting);
14540
14541                 JITDUMP(" %08X", resolvedToken.token);
14542
14543                 if (!opts.IsReadyToRun())
14544                 {
14545                     op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14546                     if (op2 == nullptr)
14547                     { // compDonotInline()
14548                         return;
14549                     }
14550                 }
14551
14552                 if (tiVerificationNeeded)
14553                 {
14554                     Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14555                     // box it
14556                     tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14557                 }
14558
14559                 accessAllowedResult =
14560                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14561                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14562
14563                 op1 = impPopStack().val;
14564
14565             /* Pop the address and create the 'checked cast' helper call */
14566
14567             // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14568             // and op2 to contain code that creates the type handle corresponding to typeRef
14569             CASTCLASS:
14570
14571 #ifdef FEATURE_READYTORUN_COMPILER
14572                 if (opts.IsReadyToRun())
14573                 {
14574                     GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14575                                                                       TYP_REF, gtNewArgList(op1));
14576                     usingReadyToRunHelper = (opLookup != nullptr);
14577                     op1                   = (usingReadyToRunHelper ? opLookup : op1);
14578
14579                     if (!usingReadyToRunHelper)
14580                     {
14581                         // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14582                         // and the chkcastany call with a single call to a dynamic R2R cell that will:
14583                         //      1) Load the context
14584                         //      2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14585                         //      3) Check the object on the stack for the type-cast
14586                         // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14587
14588                         op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14589                         if (op2 == nullptr)
14590                         { // compDonotInline()
14591                             return;
14592                         }
14593                     }
14594                 }
14595
14596                 if (!usingReadyToRunHelper)
14597 #endif
14598                 {
14599                     op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14600                 }
14601                 if (compDonotInline())
14602                 {
14603                     return;
14604                 }
14605
14606                 /* Push the result back on the stack */
14607                 impPushOnStack(op1, tiRetVal);
14608                 break;
14609
14610             case CEE_THROW:
14611
14612                 if (compIsForInlining())
14613                 {
14614                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14615                     // TODO: Will this be too strict, given that we will inline many basic blocks?
14616                     // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14617
14618                     /* Do we have just the exception on the stack ?*/
14619
14620                     if (verCurrentState.esStackDepth != 1)
14621                     {
14622                         /* if not, just don't inline the method */
14623
14624                         compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14625                         return;
14626                     }
14627                 }
14628
14629                 if (tiVerificationNeeded)
14630                 {
14631                     tiRetVal = impStackTop().seTypeInfo;
14632                     Verify(tiRetVal.IsObjRef(), "object ref expected");
14633                     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14634                     {
14635                         Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14636                     }
14637                 }
14638
14639                 block->bbSetRunRarely(); // any block with a throw is rare
14640                 /* Pop the exception object and create the 'throw' helper call */
14641
14642                 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
14643
14644             EVAL_APPEND:
14645                 if (verCurrentState.esStackDepth > 0)
14646                 {
14647                     impEvalSideEffects();
14648                 }
14649
14650                 assert(verCurrentState.esStackDepth == 0);
14651
14652                 goto APPEND;
14653
14654             case CEE_RETHROW:
14655
14656                 assert(!compIsForInlining());
14657
14658                 if (info.compXcptnsCount == 0)
14659                 {
14660                     BADCODE("rethrow outside catch");
14661                 }
14662
14663                 if (tiVerificationNeeded)
14664                 {
14665                     Verify(block->hasHndIndex(), "rethrow outside catch");
14666                     if (block->hasHndIndex())
14667                     {
14668                         EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14669                         Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14670                         if (HBtab->HasFilter())
14671                         {
14672                             // we better be in the handler clause part, not the filter part
14673                             Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14674                                    "rethrow in filter");
14675                         }
14676                     }
14677                 }
14678
14679                 /* Create the 'rethrow' helper call */
14680
14681                 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
14682
14683                 goto EVAL_APPEND;
14684
14685             case CEE_INITOBJ:
14686
14687                 assertImp(sz == sizeof(unsigned));
14688
14689                 _impResolveToken(CORINFO_TOKENKIND_Class);
14690
14691                 JITDUMP(" %08X", resolvedToken.token);
14692
14693                 if (tiVerificationNeeded)
14694                 {
14695                     typeInfo tiTo    = impStackTop().seTypeInfo;
14696                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14697
14698                     Verify(tiTo.IsByRef(), "byref expected");
14699                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14700
14701                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14702                            "type operand incompatible with type of address");
14703                 }
14704
14705                 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14706                 op2  = gtNewIconNode(0);                                     // Value
14707                 op1  = impPopStack().val;                                    // Dest
14708                 op1  = gtNewBlockVal(op1, size);
14709                 op1  = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14710                 goto SPILL_APPEND;
14711
14712             case CEE_INITBLK:
14713
14714                 if (tiVerificationNeeded)
14715                 {
14716                     Verify(false, "bad opcode");
14717                 }
14718
14719                 op3 = impPopStack().val; // Size
14720                 op2 = impPopStack().val; // Value
14721                 op1 = impPopStack().val; // Dest
14722
14723                 if (op3->IsCnsIntOrI())
14724                 {
14725                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14726                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14727                 }
14728                 else
14729                 {
14730                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14731                     size = 0;
14732                 }
14733                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14734
14735                 goto SPILL_APPEND;
14736
14737             case CEE_CPBLK:
14738
14739                 if (tiVerificationNeeded)
14740                 {
14741                     Verify(false, "bad opcode");
14742                 }
14743                 op3 = impPopStack().val; // Size
14744                 op2 = impPopStack().val; // Src
14745                 op1 = impPopStack().val; // Dest
14746
14747                 if (op3->IsCnsIntOrI())
14748                 {
14749                     size = (unsigned)op3->AsIntConCommon()->IconValue();
14750                     op1  = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14751                 }
14752                 else
14753                 {
14754                     op1  = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14755                     size = 0;
14756                 }
14757                 if (op2->OperGet() == GT_ADDR)
14758                 {
14759                     op2 = op2->gtOp.gtOp1;
14760                 }
14761                 else
14762                 {
14763                     op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14764                 }
14765
14766                 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14767                 goto SPILL_APPEND;
14768
14769             case CEE_CPOBJ:
14770
14771                 assertImp(sz == sizeof(unsigned));
14772
14773                 _impResolveToken(CORINFO_TOKENKIND_Class);
14774
14775                 JITDUMP(" %08X", resolvedToken.token);
14776
14777                 if (tiVerificationNeeded)
14778                 {
14779                     typeInfo tiFrom  = impStackTop().seTypeInfo;
14780                     typeInfo tiTo    = impStackTop(1).seTypeInfo;
14781                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14782
14783                     Verify(tiFrom.IsByRef(), "expected byref source");
14784                     Verify(tiTo.IsByRef(), "expected byref destination");
14785
14786                     Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14787                            "type of source address incompatible with type operand");
14788                     Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14789                     Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14790                            "type operand incompatible with type of destination address");
14791                 }
14792
14793                 if (!eeIsValueClass(resolvedToken.hClass))
14794                 {
14795                     op1 = impPopStack().val; // address to load from
14796
14797                     impBashVarAddrsToI(op1);
14798
14799                     assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14800
14801                     op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14802                     op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14803
14804                     impPushOnStack(op1, typeInfo());
14805                     opcode = CEE_STIND_REF;
14806                     lclTyp = TYP_REF;
14807                     goto STIND_POST_VERIFY;
14808                 }
14809
14810                 op2 = impPopStack().val; // Src
14811                 op1 = impPopStack().val; // Dest
14812                 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14813                 goto SPILL_APPEND;
14814
14815             case CEE_STOBJ:
14816             {
14817                 assertImp(sz == sizeof(unsigned));
14818
14819                 _impResolveToken(CORINFO_TOKENKIND_Class);
14820
14821                 JITDUMP(" %08X", resolvedToken.token);
14822
14823                 if (eeIsValueClass(resolvedToken.hClass))
14824                 {
14825                     lclTyp = TYP_STRUCT;
14826                 }
14827                 else
14828                 {
14829                     lclTyp = TYP_REF;
14830                 }
14831
14832                 if (tiVerificationNeeded)
14833                 {
14834
14835                     typeInfo tiPtr = impStackTop(1).seTypeInfo;
14836
14837                     // Make sure we have a good looking byref
14838                     Verify(tiPtr.IsByRef(), "pointer not byref");
14839                     Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14840                     if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14841                     {
14842                         compUnsafeCastUsed = true;
14843                     }
14844
14845                     typeInfo ptrVal = DereferenceByRef(tiPtr);
14846                     typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14847
14848                     if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14849                     {
14850                         Verify(false, "type of value incompatible with type operand");
14851                         compUnsafeCastUsed = true;
14852                     }
14853
14854                     if (!tiCompatibleWith(argVal, ptrVal, false))
14855                     {
14856                         Verify(false, "type operand incompatible with type of address");
14857                         compUnsafeCastUsed = true;
14858                     }
14859                 }
14860                 else
14861                 {
14862                     compUnsafeCastUsed = true;
14863                 }
14864
14865                 if (lclTyp == TYP_REF)
14866                 {
14867                     opcode = CEE_STIND_REF;
14868                     goto STIND_POST_VERIFY;
14869                 }
14870
14871                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14872                 if (impIsPrimitive(jitTyp))
14873                 {
14874                     lclTyp = JITtype2varType(jitTyp);
14875                     goto STIND_POST_VERIFY;
14876                 }
14877
14878                 op2 = impPopStack().val; // Value
14879                 op1 = impPopStack().val; // Ptr
14880
14881                 assertImp(varTypeIsStruct(op2));
14882
14883                 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14884
14885                 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
14886                 {
14887                     op1->gtFlags |= GTF_BLK_UNALIGNED;
14888                 }
14889                 goto SPILL_APPEND;
14890             }
14891
14892             case CEE_MKREFANY:
14893
14894                 assert(!compIsForInlining());
14895
14896                 // Being lazy here. Refanys are tricky in terms of gc tracking.
14897                 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14898
14899                 JITDUMP("disabling struct promotion because of mkrefany\n");
14900                 fgNoStructPromotion = true;
14901
14902                 oper = GT_MKREFANY;
14903                 assertImp(sz == sizeof(unsigned));
14904
14905                 _impResolveToken(CORINFO_TOKENKIND_Class);
14906
14907                 JITDUMP(" %08X", resolvedToken.token);
14908
14909                 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14910                 if (op2 == nullptr)
14911                 { // compDonotInline()
14912                     return;
14913                 }
14914
14915                 if (tiVerificationNeeded)
14916                 {
14917                     typeInfo tiPtr   = impStackTop().seTypeInfo;
14918                     typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14919
14920                     Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14921                     Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14922                     Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14923                 }
14924
14925                 accessAllowedResult =
14926                     info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14927                 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14928
14929                 op1 = impPopStack().val;
14930
14931                 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14932                 // But JIT32 allowed it, so we continue to allow it.
14933                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14934
14935                 // MKREFANY returns a struct.  op2 is the class token.
14936                 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14937
14938                 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14939                 break;
14940
14941             case CEE_LDOBJ:
14942             {
14943                 oper = GT_OBJ;
14944                 assertImp(sz == sizeof(unsigned));
14945
14946                 _impResolveToken(CORINFO_TOKENKIND_Class);
14947
14948                 JITDUMP(" %08X", resolvedToken.token);
14949
14950             OBJ:
14951
14952                 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14953
14954                 if (tiVerificationNeeded)
14955                 {
14956                     typeInfo tiPtr = impStackTop().seTypeInfo;
14957
14958                     // Make sure we have a byref
14959                     if (!tiPtr.IsByRef())
14960                     {
14961                         Verify(false, "pointer not byref");
14962                         compUnsafeCastUsed = true;
14963                     }
14964                     typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14965
14966                     if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14967                     {
14968                         Verify(false, "type of address incompatible with type operand");
14969                         compUnsafeCastUsed = true;
14970                     }
14971                     tiRetVal.NormaliseForStack();
14972                 }
14973                 else
14974                 {
14975                     compUnsafeCastUsed = true;
14976                 }
14977
14978                 if (eeIsValueClass(resolvedToken.hClass))
14979                 {
14980                     lclTyp = TYP_STRUCT;
14981                 }
14982                 else
14983                 {
14984                     lclTyp = TYP_REF;
14985                     opcode = CEE_LDIND_REF;
14986                     goto LDIND_POST_VERIFY;
14987                 }
14988
14989                 op1 = impPopStack().val;
14990
14991                 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14992
14993                 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14994                 if (impIsPrimitive(jitTyp))
14995                 {
14996                     op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14997
14998                     // Could point anywhere, example a boxed class static int
14999                     op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15000                     assertImp(varTypeIsArithmetic(op1->gtType));
15001                 }
15002                 else
15003                 {
15004                     // OBJ returns a struct
15005                     // and an inline argument which is the class token of the loaded obj
15006                     op1 = gtNewObjNode(resolvedToken.hClass, op1);
15007                 }
15008                 op1->gtFlags |= GTF_EXCEPT;
15009
15010                 if (prefixFlags & PREFIX_UNALIGNED)
15011                 {
15012                     op1->gtFlags |= GTF_IND_UNALIGNED;
15013                 }
15014
15015                 impPushOnStack(op1, tiRetVal);
15016                 break;
15017             }
15018
15019             case CEE_LDLEN:
15020                 if (tiVerificationNeeded)
15021                 {
15022                     typeInfo tiArray = impStackTop().seTypeInfo;
15023                     Verify(verIsSDArray(tiArray), "bad array");
15024                     tiRetVal = typeInfo(TI_INT);
15025                 }
15026
15027                 op1 = impPopStack().val;
15028                 if (!opts.MinOpts() && !opts.compDbgCode)
15029                 {
15030                     /* Use GT_ARR_LENGTH operator so rng check opts see this */
15031                     GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15032
15033                     /* Mark the block as containing a length expression */
15034
15035                     if (op1->gtOper == GT_LCL_VAR)
15036                     {
15037                         block->bbFlags |= BBF_HAS_IDX_LEN;
15038                     }
15039
15040                     op1 = arrLen;
15041                 }
15042                 else
15043                 {
15044                     /* Create the expression "*(array_addr + ArrLenOffs)" */
15045                     op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15046                                         gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15047                     op1 = gtNewIndir(TYP_INT, op1);
15048                     op1->gtFlags |= GTF_IND_ARR_LEN;
15049                 }
15050
15051                 /* Push the result back on the stack */
15052                 impPushOnStack(op1, tiRetVal);
15053                 break;
15054
15055             case CEE_BREAK:
15056                 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15057                 goto SPILL_APPEND;
15058
15059             case CEE_NOP:
15060                 if (opts.compDbgCode)
15061                 {
15062                     op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15063                     goto SPILL_APPEND;
15064                 }
15065                 break;
15066
15067             /******************************** NYI *******************************/
15068
15069             case 0xCC:
15070                 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15071
15072             case CEE_ILLEGAL:
15073             case CEE_MACRO_END:
15074
15075             default:
15076                 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15077         }
15078
15079         codeAddr += sz;
15080         prevOpcode = opcode;
15081
15082         prefixFlags = 0;
15083     }
15084
15085     return;
15086 #undef _impResolveToken
15087 }
15088 #ifdef _PREFAST_
15089 #pragma warning(pop)
15090 #endif
15091
15092 // Push a local/argument treeon the operand stack
15093 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15094 {
15095     tiRetVal.NormaliseForStack();
15096
15097     if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15098     {
15099         tiRetVal.SetUninitialisedObjRef();
15100     }
15101
15102     impPushOnStack(op, tiRetVal);
15103 }
15104
15105 // Load a local/argument on the operand stack
15106 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15107 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15108 {
15109     var_types lclTyp;
15110
15111     if (lvaTable[lclNum].lvNormalizeOnLoad())
15112     {
15113         lclTyp = lvaGetRealType(lclNum);
15114     }
15115     else
15116     {
15117         lclTyp = lvaGetActualType(lclNum);
15118     }
15119
15120     impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15121 }
15122
15123 // Load an argument on the operand stack
15124 // Shared by the various CEE_LDARG opcodes
15125 // ilArgNum is the argument index as specified in IL.
15126 // It will be mapped to the correct lvaTable index
15127 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15128 {
15129     Verify(ilArgNum < info.compILargsCount, "bad arg num");
15130
15131     if (compIsForInlining())
15132     {
15133         if (ilArgNum >= info.compArgsCount)
15134         {
15135             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15136             return;
15137         }
15138
15139         impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15140                    impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15141     }
15142     else
15143     {
15144         if (ilArgNum >= info.compArgsCount)
15145         {
15146             BADCODE("Bad IL");
15147         }
15148
15149         unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15150
15151         if (lclNum == info.compThisArg)
15152         {
15153             lclNum = lvaArg0Var;
15154         }
15155
15156         impLoadVar(lclNum, offset);
15157     }
15158 }
15159
15160 // Load a local on the operand stack
15161 // Shared by the various CEE_LDLOC opcodes
15162 // ilLclNum is the local index as specified in IL.
15163 // It will be mapped to the correct lvaTable index
15164 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15165 {
15166     if (tiVerificationNeeded)
15167     {
15168         Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15169         Verify(info.compInitMem, "initLocals not set");
15170     }
15171
15172     if (compIsForInlining())
15173     {
15174         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15175         {
15176             compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15177             return;
15178         }
15179
15180         // Get the local type
15181         var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15182
15183         typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15184
15185         /* Have we allocated a temp for this local? */
15186
15187         unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15188
15189         // All vars of inlined methods should be !lvNormalizeOnLoad()
15190
15191         assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15192         lclTyp = genActualType(lclTyp);
15193
15194         impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15195     }
15196     else
15197     {
15198         if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15199         {
15200             BADCODE("Bad IL");
15201         }
15202
15203         unsigned lclNum = info.compArgsCount + ilLclNum;
15204
15205         impLoadVar(lclNum, offset);
15206     }
15207 }
15208
15209 #ifdef _TARGET_ARM_
15210 /**************************************************************************************
15211  *
15212  *  When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15213  *  dst struct, because struct promotion will turn it into a float/double variable while
15214  *  the rhs will be an int/long variable. We don't code generate assignment of int into
15215  *  a float, but there is nothing that might prevent us from doing so. The tree however
15216  *  would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15217  *
15218  *  tmpNum - the lcl dst variable num that is a struct.
15219  *  src    - the src tree assigned to the dest that is a struct/int (when varargs call.)
15220  *  hClass - the type handle for the struct variable.
15221  *
15222  *  TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15223  *        however, we could do a codegen of transferring from int to float registers
15224  *        (transfer, not a cast.)
15225  *
15226  */
15227 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15228 {
15229     if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15230     {
15231         int       hfaSlots = GetHfaCount(hClass);
15232         var_types hfaType  = GetHfaType(hClass);
15233
15234         // If we have varargs we morph the method's return type to be "int" irrespective of its original
15235         // type: struct/float at importer because the ABI calls out return in integer registers.
15236         // We don't want struct promotion to replace an expression like this:
15237         //   lclFld_int = callvar_int() into lclFld_float = callvar_int();
15238         // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15239         if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15240             (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15241         {
15242             // Make sure this struct type stays as struct so we can receive the call in a struct.
15243             lvaTable[tmpNum].lvIsMultiRegRet = true;
15244         }
15245     }
15246 }
15247 #endif // _TARGET_ARM_
15248
15249 #if FEATURE_MULTIREG_RET
15250 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15251 {
15252     unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15253     impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15254     GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15255
15256     // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15257     ret->gtFlags |= GTF_DONT_CSE;
15258
15259     assert(IsMultiRegReturnedType(hClass));
15260
15261     // Mark the var so that fields are not promoted and stay together.
15262     lvaTable[tmpNum].lvIsMultiRegRet = true;
15263
15264     return ret;
15265 }
15266 #endif // FEATURE_MULTIREG_RET
15267
15268 // do import for a return
15269 // returns false if inlining was aborted
15270 // opcode can be ret or call in the case of a tail.call
15271 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15272 {
15273     if (tiVerificationNeeded)
15274     {
15275         verVerifyThisPtrInitialised();
15276
15277         unsigned expectedStack = 0;
15278         if (info.compRetType != TYP_VOID)
15279         {
15280             typeInfo tiVal = impStackTop().seTypeInfo;
15281             typeInfo tiDeclared =
15282                 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15283
15284             Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15285
15286             Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15287             expectedStack = 1;
15288         }
15289         Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15290     }
15291
15292 #ifdef DEBUG
15293     // If we are importing an inlinee and have GC ref locals we always
15294     // need to have a spill temp for the return value.  This temp
15295     // should have been set up in advance, over in fgFindBasicBlocks.
15296     if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15297     {
15298         assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15299     }
15300 #endif // DEBUG
15301
15302     GenTree*             op2       = nullptr;
15303     GenTree*             op1       = nullptr;
15304     CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15305
15306     if (info.compRetType != TYP_VOID)
15307     {
15308         StackEntry se = impPopStack();
15309         retClsHnd     = se.seTypeInfo.GetClassHandle();
15310         op2           = se.val;
15311
15312         if (!compIsForInlining())
15313         {
15314             impBashVarAddrsToI(op2);
15315             op2 = impImplicitIorI4Cast(op2, info.compRetType);
15316             op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15317             assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15318                       ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15319                       ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15320                       (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15321                       (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15322
15323 #ifdef DEBUG
15324             if (opts.compGcChecks && info.compRetType == TYP_REF)
15325             {
15326                 // DDB 3483  : JIT Stress: early termination of GC ref's life time in exception code path
15327                 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15328                 // one-return BB.
15329
15330                 assert(op2->gtType == TYP_REF);
15331
15332                 // confirm that the argument is a GC pointer (for debugging (GC stress))
15333                 GenTreeArgList* args = gtNewArgList(op2);
15334                 op2                  = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15335
15336                 if (verbose)
15337                 {
15338                     printf("\ncompGcChecks tree:\n");
15339                     gtDispTree(op2);
15340                 }
15341             }
15342 #endif
15343         }
15344         else
15345         {
15346             // inlinee's stack should be empty now.
15347             assert(verCurrentState.esStackDepth == 0);
15348
15349 #ifdef DEBUG
15350             if (verbose)
15351             {
15352                 printf("\n\n    Inlinee Return expression (before normalization)  =>\n");
15353                 gtDispTree(op2);
15354             }
15355 #endif
15356
15357             // Make sure the type matches the original call.
15358
15359             var_types returnType       = genActualType(op2->gtType);
15360             var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15361             if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15362             {
15363                 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15364             }
15365
15366             if (returnType != originalCallType)
15367             {
15368                 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15369                 return false;
15370             }
15371
15372             // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15373             // expression. At this point, retExpr could already be set if there are multiple
15374             // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15375             // the other blocks already set it. If there is only a single return block,
15376             // retExpr shouldn't be set. However, this is not true if we reimport a block
15377             // with a return. In that case, retExpr will be set, then the block will be
15378             // reimported, but retExpr won't get cleared as part of setting the block to
15379             // be reimported. The reimported retExpr value should be the same, so even if
15380             // we don't unconditionally overwrite it, it shouldn't matter.
15381             if (info.compRetNativeType != TYP_STRUCT)
15382             {
15383                 // compRetNativeType is not TYP_STRUCT.
15384                 // This implies it could be either a scalar type or SIMD vector type or
15385                 // a struct type that can be normalized to a scalar type.
15386
15387                 if (varTypeIsStruct(info.compRetType))
15388                 {
15389                     noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15390                     // adjust the type away from struct to integral
15391                     // and no normalizing
15392                     op2 = impFixupStructReturnType(op2, retClsHnd);
15393                 }
15394                 else
15395                 {
15396                     // Do we have to normalize?
15397                     var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15398                     if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15399                         fgCastNeeded(op2, fncRealRetType))
15400                     {
15401                         // Small-typed return values are normalized by the callee
15402                         op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15403                     }
15404                 }
15405
15406                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15407                 {
15408                     assert(info.compRetNativeType != TYP_VOID &&
15409                            (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15410
15411                     // This is a bit of a workaround...
15412                     // If we are inlining a call that returns a struct, where the actual "native" return type is
15413                     // not a struct (for example, the struct is composed of exactly one int, and the native
15414                     // return type is thus an int), and the inlinee has multiple return blocks (thus,
15415                     // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15416                     // to the *native* return type), and at least one of the return blocks is the result of
15417                     // a call, then we have a problem. The situation is like this (from a failed test case):
15418                     //
15419                     // inliner:
15420                     //      // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15421                     //      call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15422                     //      plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15423                     //
15424                     // inlinee:
15425                     //      ...
15426                     //      ldobj      !!T                 // this gets bashed to a GT_LCL_FLD, type TYP_INT
15427                     //      ret
15428                     //      ...
15429                     //      call       !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15430                     //      object&, class System.Func`1<!!0>)
15431                     //      ret
15432                     //
15433                     // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15434                     // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15435                     // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15436                     // inlining properly by leaving the correct type on the GT_CALL node through importing.
15437                     //
15438                     // To fix this, for this case, we temporarily change the GT_CALL node type to the
15439                     // native return type, which is what it will be set to eventually. We generate the
15440                     // assignment to the return temp, using the correct type, and then restore the GT_CALL
15441                     // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15442
15443                     bool restoreType = false;
15444                     if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15445                     {
15446                         noway_assert(op2->TypeGet() == TYP_STRUCT);
15447                         op2->gtType = info.compRetNativeType;
15448                         restoreType = true;
15449                     }
15450
15451                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15452                                      (unsigned)CHECK_SPILL_ALL);
15453
15454                     GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15455
15456                     if (restoreType)
15457                     {
15458                         op2->gtType = TYP_STRUCT; // restore it to what it was
15459                     }
15460
15461                     op2 = tmpOp2;
15462
15463 #ifdef DEBUG
15464                     if (impInlineInfo->retExpr)
15465                     {
15466                         // Some other block(s) have seen the CEE_RET first.
15467                         // Better they spilled to the same temp.
15468                         assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15469                         assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15470                     }
15471 #endif
15472                 }
15473
15474 #ifdef DEBUG
15475                 if (verbose)
15476                 {
15477                     printf("\n\n    Inlinee Return expression (after normalization) =>\n");
15478                     gtDispTree(op2);
15479                 }
15480 #endif
15481
15482                 // Report the return expression
15483                 impInlineInfo->retExpr = op2;
15484             }
15485             else
15486             {
15487                 // compRetNativeType is TYP_STRUCT.
15488                 // This implies that struct return via RetBuf arg or multi-reg struct return
15489
15490                 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15491
15492                 // Assign the inlinee return into a spill temp.
15493                 // spill temp only exists if there are multiple return points
15494                 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15495                 {
15496                     // in this case we have to insert multiple struct copies to the temp
15497                     // and the retexpr is just the temp.
15498                     assert(info.compRetNativeType != TYP_VOID);
15499                     assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15500
15501                     impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15502                                      (unsigned)CHECK_SPILL_ALL);
15503                 }
15504
15505 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15506 #if defined(_TARGET_ARM_)
15507                 // TODO-ARM64-NYI: HFA
15508                 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15509                 // next ifdefs could be refactored in a single method with the ifdef inside.
15510                 if (IsHfa(retClsHnd))
15511                 {
15512 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15513 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15514                 ReturnTypeDesc retTypeDesc;
15515                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15516                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15517
15518                 if (retRegCount != 0)
15519                 {
15520                     // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15521                     // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15522                     // max allowed.)
15523                     assert(retRegCount == MAX_RET_REG_COUNT);
15524                     // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15525                     CLANG_FORMAT_COMMENT_ANCHOR;
15526 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15527
15528                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15529                     {
15530                         if (!impInlineInfo->retExpr)
15531                         {
15532 #if defined(_TARGET_ARM_)
15533                             impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15534 #else  // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15535                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15536                             impInlineInfo->retExpr =
15537                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15538 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15539                         }
15540                     }
15541                     else
15542                     {
15543                         impInlineInfo->retExpr = op2;
15544                     }
15545                 }
15546                 else
15547 #elif defined(_TARGET_ARM64_)
15548                 ReturnTypeDesc retTypeDesc;
15549                 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15550                 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15551
15552                 if (retRegCount != 0)
15553                 {
15554                     assert(!iciCall->HasRetBufArg());
15555                     assert(retRegCount >= 2);
15556                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15557                     {
15558                         if (!impInlineInfo->retExpr)
15559                         {
15560                             // The inlinee compiler has figured out the type of the temp already. Use it here.
15561                             impInlineInfo->retExpr =
15562                                 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15563                         }
15564                     }
15565                     else
15566                     {
15567                         impInlineInfo->retExpr = op2;
15568                     }
15569                 }
15570                 else
15571 #endif // defined(_TARGET_ARM64_)
15572                 {
15573                     assert(iciCall->HasRetBufArg());
15574                     GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
15575                     // spill temp only exists if there are multiple return points
15576                     if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15577                     {
15578                         // if this is the first return we have seen set the retExpr
15579                         if (!impInlineInfo->retExpr)
15580                         {
15581                             impInlineInfo->retExpr =
15582                                 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15583                                                    retClsHnd, (unsigned)CHECK_SPILL_ALL);
15584                         }
15585                     }
15586                     else
15587                     {
15588                         impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15589                     }
15590                 }
15591             }
15592         }
15593     }
15594
15595     if (compIsForInlining())
15596     {
15597         return true;
15598     }
15599
15600     if (info.compRetType == TYP_VOID)
15601     {
15602         // return void
15603         op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15604     }
15605     else if (info.compRetBuffArg != BAD_VAR_NUM)
15606     {
15607         // Assign value to return buff (first param)
15608         GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15609
15610         op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15611         impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15612
15613         // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15614         CLANG_FORMAT_COMMENT_ANCHOR;
15615
15616 #if defined(_TARGET_AMD64_)
15617
15618         // x64 (System V and Win64) calling convention requires to
15619         // return the implicit return buffer explicitly (in RAX).
15620         // Change the return type to be BYREF.
15621         op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15622 #else  // !defined(_TARGET_AMD64_)
15623         // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15624         // In such case the return value of the function is changed to BYREF.
15625         // If profiler hook is not needed the return type of the function is TYP_VOID.
15626         if (compIsProfilerHookNeeded())
15627         {
15628             op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15629         }
15630         else
15631         {
15632             // return void
15633             op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15634         }
15635 #endif // !defined(_TARGET_AMD64_)
15636     }
15637     else if (varTypeIsStruct(info.compRetType))
15638     {
15639 #if !FEATURE_MULTIREG_RET
15640         // For both ARM architectures the HFA native types are maintained as structs.
15641         // Also on System V AMD64 the multireg structs returns are also left as structs.
15642         noway_assert(info.compRetNativeType != TYP_STRUCT);
15643 #endif
15644         op2 = impFixupStructReturnType(op2, retClsHnd);
15645         // return op2
15646         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15647     }
15648     else
15649     {
15650         // return op2
15651         op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15652     }
15653
15654     // We must have imported a tailcall and jumped to RET
15655     if (prefixFlags & PREFIX_TAILCALL)
15656     {
15657 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
15658         // Jit64 compat:
15659         // This cannot be asserted on Amd64 since we permit the following IL pattern:
15660         //      tail.call
15661         //      pop
15662         //      ret
15663         assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15664 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
15665
15666         opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15667
15668         // impImportCall() would have already appended TYP_VOID calls
15669         if (info.compRetType == TYP_VOID)
15670         {
15671             return true;
15672         }
15673     }
15674
15675     impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15676 #ifdef DEBUG
15677     // Remember at which BC offset the tree was finished
15678     impNoteLastILoffs();
15679 #endif
15680     return true;
15681 }
15682
15683 /*****************************************************************************
15684  *  Mark the block as unimported.
15685  *  Note that the caller is responsible for calling impImportBlockPending(),
15686  *  with the appropriate stack-state
15687  */
15688
15689 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15690 {
15691 #ifdef DEBUG
15692     if (verbose && (block->bbFlags & BBF_IMPORTED))
15693     {
15694         printf("\nBB%02u will be reimported\n", block->bbNum);
15695     }
15696 #endif
15697
15698     block->bbFlags &= ~BBF_IMPORTED;
15699 }
15700
15701 /*****************************************************************************
15702  *  Mark the successors of the given block as unimported.
15703  *  Note that the caller is responsible for calling impImportBlockPending()
15704  *  for all the successors, with the appropriate stack-state.
15705  */
15706
15707 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15708 {
15709     const unsigned numSuccs = block->NumSucc();
15710     for (unsigned i = 0; i < numSuccs; i++)
15711     {
15712         impReimportMarkBlock(block->GetSucc(i));
15713     }
15714 }
15715
15716 /*****************************************************************************
15717  *
15718  *  Filter wrapper to handle only passed in exception code
15719  *  from it).
15720  */
15721
15722 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15723 {
15724     if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15725     {
15726         return EXCEPTION_EXECUTE_HANDLER;
15727     }
15728
15729     return EXCEPTION_CONTINUE_SEARCH;
15730 }
15731
15732 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15733 {
15734     assert(block->hasTryIndex());
15735     assert(!compIsForInlining());
15736
15737     unsigned  tryIndex = block->getTryIndex();
15738     EHblkDsc* HBtab    = ehGetDsc(tryIndex);
15739
15740     if (isTryStart)
15741     {
15742         assert(block->bbFlags & BBF_TRY_BEG);
15743
15744         // The Stack must be empty
15745         //
15746         if (block->bbStkDepth != 0)
15747         {
15748             BADCODE("Evaluation stack must be empty on entry into a try block");
15749         }
15750     }
15751
15752     // Save the stack contents, we'll need to restore it later
15753     //
15754     SavedStack blockState;
15755     impSaveStackState(&blockState, false);
15756
15757     while (HBtab != nullptr)
15758     {
15759         if (isTryStart)
15760         {
15761             // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15762             //  We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15763             //
15764             if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15765             {
15766                 // We  trigger an invalid program exception here unless we have a try/fault region.
15767                 //
15768                 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15769                 {
15770                     BADCODE(
15771                         "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15772                 }
15773                 else
15774                 {
15775                     // Allow a try/fault region to proceed.
15776                     assert(HBtab->HasFaultHandler());
15777                 }
15778             }
15779
15780             /* Recursively process the handler block */
15781             BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15782
15783             //  Construct the proper verification stack state
15784             //   either empty or one that contains just
15785             //   the Exception Object that we are dealing with
15786             //
15787             verCurrentState.esStackDepth = 0;
15788
15789             if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15790             {
15791                 CORINFO_CLASS_HANDLE clsHnd;
15792
15793                 if (HBtab->HasFilter())
15794                 {
15795                     clsHnd = impGetObjectClass();
15796                 }
15797                 else
15798                 {
15799                     CORINFO_RESOLVED_TOKEN resolvedToken;
15800
15801                     resolvedToken.tokenContext = impTokenLookupContextHandle;
15802                     resolvedToken.tokenScope   = info.compScopeHnd;
15803                     resolvedToken.token        = HBtab->ebdTyp;
15804                     resolvedToken.tokenType    = CORINFO_TOKENKIND_Class;
15805                     info.compCompHnd->resolveToken(&resolvedToken);
15806
15807                     clsHnd = resolvedToken.hClass;
15808                 }
15809
15810                 // push catch arg the stack, spill to a temp if necessary
15811                 // Note: can update HBtab->ebdHndBeg!
15812                 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
15813             }
15814
15815             // Queue up the handler for importing
15816             //
15817             impImportBlockPending(hndBegBB);
15818
15819             if (HBtab->HasFilter())
15820             {
15821                 /* @VERIFICATION : Ideally the end of filter state should get
15822                    propagated to the catch handler, this is an incompleteness,
15823                    but is not a security/compliance issue, since the only
15824                    interesting state is the 'thisInit' state.
15825                    */
15826
15827                 verCurrentState.esStackDepth = 0;
15828
15829                 BasicBlock* filterBB = HBtab->ebdFilter;
15830
15831                 // push catch arg the stack, spill to a temp if necessary
15832                 // Note: can update HBtab->ebdFilter!
15833                 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
15834                 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
15835
15836                 impImportBlockPending(filterBB);
15837             }
15838         }
15839         else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15840         {
15841             /* Recursively process the handler block */
15842
15843             verCurrentState.esStackDepth = 0;
15844
15845             // Queue up the fault handler for importing
15846             //
15847             impImportBlockPending(HBtab->ebdHndBeg);
15848         }
15849
15850         // Now process our enclosing try index (if any)
15851         //
15852         tryIndex = HBtab->ebdEnclosingTryIndex;
15853         if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15854         {
15855             HBtab = nullptr;
15856         }
15857         else
15858         {
15859             HBtab = ehGetDsc(tryIndex);
15860         }
15861     }
15862
15863     // Restore the stack contents
15864     impRestoreStackState(&blockState);
15865 }
15866
15867 //***************************************************************
15868 // Import the instructions for the given basic block.  Perform
15869 // verification, throwing an exception on failure.  Push any successor blocks that are enabled for the first
15870 // time, or whose verification pre-state is changed.
15871
15872 #ifdef _PREFAST_
15873 #pragma warning(push)
15874 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15875 #endif
15876 void Compiler::impImportBlock(BasicBlock* block)
15877 {
15878     // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15879     // handle them specially. In particular, there is no IL to import for them, but we do need
15880     // to mark them as imported and put their successors on the pending import list.
15881     if (block->bbFlags & BBF_INTERNAL)
15882     {
15883         JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15884         block->bbFlags |= BBF_IMPORTED;
15885
15886         const unsigned numSuccs = block->NumSucc();
15887         for (unsigned i = 0; i < numSuccs; i++)
15888         {
15889             impImportBlockPending(block->GetSucc(i));
15890         }
15891
15892         return;
15893     }
15894
15895     bool markImport;
15896
15897     assert(block);
15898
15899     /* Make the block globaly available */
15900
15901     compCurBB = block;
15902
15903 #ifdef DEBUG
15904     /* Initialize the debug variables */
15905     impCurOpcName = "unknown";
15906     impCurOpcOffs = block->bbCodeOffs;
15907 #endif
15908
15909     /* Set the current stack state to the merged result */
15910     verResetCurrentState(block, &verCurrentState);
15911
15912     /* Now walk the code and import the IL into GenTrees */
15913
15914     struct FilterVerificationExceptionsParam
15915     {
15916         Compiler*   pThis;
15917         BasicBlock* block;
15918     };
15919     FilterVerificationExceptionsParam param;
15920
15921     param.pThis = this;
15922     param.block = block;
15923
15924     PAL_TRY(FilterVerificationExceptionsParam*, pParam, &param)
15925     {
15926         /* @VERIFICATION : For now, the only state propagation from try
15927            to it's handler is "thisInit" state (stack is empty at start of try).
15928            In general, for state that we track in verification, we need to
15929            model the possibility that an exception might happen at any IL
15930            instruction, so we really need to merge all states that obtain
15931            between IL instructions in a try block into the start states of
15932            all handlers.
15933
15934            However we do not allow the 'this' pointer to be uninitialized when
15935            entering most kinds try regions (only try/fault are allowed to have
15936            an uninitialized this pointer on entry to the try)
15937
15938            Fortunately, the stack is thrown away when an exception
15939            leads to a handler, so we don't have to worry about that.
15940            We DO, however, have to worry about the "thisInit" state.
15941            But only for the try/fault case.
15942
15943            The only allowed transition is from TIS_Uninit to TIS_Init.
15944
15945            So for a try/fault region for the fault handler block
15946            we will merge the start state of the try begin
15947            and the post-state of each block that is part of this try region
15948         */
15949
15950         // merge the start state of the try begin
15951         //
15952         if (pParam->block->bbFlags & BBF_TRY_BEG)
15953         {
15954             pParam->pThis->impVerifyEHBlock(pParam->block, true);
15955         }
15956
15957         pParam->pThis->impImportBlockCode(pParam->block);
15958
15959         // As discussed above:
15960         // merge the post-state of each block that is part of this try region
15961         //
15962         if (pParam->block->hasTryIndex())
15963         {
15964             pParam->pThis->impVerifyEHBlock(pParam->block, false);
15965         }
15966     }
15967     PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15968     {
15969         verHandleVerificationFailure(block DEBUGARG(false));
15970     }
15971     PAL_ENDTRY
15972
15973     if (compDonotInline())
15974     {
15975         return;
15976     }
15977
15978     assert(!compDonotInline());
15979
15980     markImport = false;
15981
15982 SPILLSTACK:
15983
15984     unsigned    baseTmp             = NO_BASE_TMP; // input temps assigned to successor blocks
15985     bool        reimportSpillClique = false;
15986     BasicBlock* tgtBlock            = nullptr;
15987
15988     /* If the stack is non-empty, we might have to spill its contents */
15989
15990     if (verCurrentState.esStackDepth != 0)
15991     {
15992         impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15993                                   // on the stack, its lifetime is hard to determine, simply
15994                                   // don't reuse such temps.
15995
15996         GenTreePtr addStmt = nullptr;
15997
15998         /* Do the successors of 'block' have any other predecessors ?
15999            We do not want to do some of the optimizations related to multiRef
16000            if we can reimport blocks */
16001
16002         unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16003
16004         switch (block->bbJumpKind)
16005         {
16006             case BBJ_COND:
16007
16008                 /* Temporarily remove the 'jtrue' from the end of the tree list */
16009
16010                 assert(impTreeLast);
16011                 assert(impTreeLast->gtOper == GT_STMT);
16012                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16013
16014                 addStmt     = impTreeLast;
16015                 impTreeLast = impTreeLast->gtPrev;
16016
16017                 /* Note if the next block has more than one ancestor */
16018
16019                 multRef |= block->bbNext->bbRefs;
16020
16021                 /* Does the next block have temps assigned? */
16022
16023                 baseTmp  = block->bbNext->bbStkTempsIn;
16024                 tgtBlock = block->bbNext;
16025
16026                 if (baseTmp != NO_BASE_TMP)
16027                 {
16028                     break;
16029                 }
16030
16031                 /* Try the target of the jump then */
16032
16033                 multRef |= block->bbJumpDest->bbRefs;
16034                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16035                 tgtBlock = block->bbJumpDest;
16036                 break;
16037
16038             case BBJ_ALWAYS:
16039                 multRef |= block->bbJumpDest->bbRefs;
16040                 baseTmp  = block->bbJumpDest->bbStkTempsIn;
16041                 tgtBlock = block->bbJumpDest;
16042                 break;
16043
16044             case BBJ_NONE:
16045                 multRef |= block->bbNext->bbRefs;
16046                 baseTmp  = block->bbNext->bbStkTempsIn;
16047                 tgtBlock = block->bbNext;
16048                 break;
16049
16050             case BBJ_SWITCH:
16051
16052                 BasicBlock** jmpTab;
16053                 unsigned     jmpCnt;
16054
16055                 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16056
16057                 assert(impTreeLast);
16058                 assert(impTreeLast->gtOper == GT_STMT);
16059                 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16060
16061                 addStmt     = impTreeLast;
16062                 impTreeLast = impTreeLast->gtPrev;
16063
16064                 jmpCnt = block->bbJumpSwt->bbsCount;
16065                 jmpTab = block->bbJumpSwt->bbsDstTab;
16066
16067                 do
16068                 {
16069                     tgtBlock = (*jmpTab);
16070
16071                     multRef |= tgtBlock->bbRefs;
16072
16073                     // Thanks to spill cliques, we should have assigned all or none
16074                     assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16075                     baseTmp = tgtBlock->bbStkTempsIn;
16076                     if (multRef > 1)
16077                     {
16078                         break;
16079                     }
16080                 } while (++jmpTab, --jmpCnt);
16081
16082                 break;
16083
16084             case BBJ_CALLFINALLY:
16085             case BBJ_EHCATCHRET:
16086             case BBJ_RETURN:
16087             case BBJ_EHFINALLYRET:
16088             case BBJ_EHFILTERRET:
16089             case BBJ_THROW:
16090                 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16091                 break;
16092
16093             default:
16094                 noway_assert(!"Unexpected bbJumpKind");
16095                 break;
16096         }
16097
16098         assert(multRef >= 1);
16099
16100         /* Do we have a base temp number? */
16101
16102         bool newTemps = (baseTmp == NO_BASE_TMP);
16103
16104         if (newTemps)
16105         {
16106             /* Grab enough temps for the whole stack */
16107             baseTmp = impGetSpillTmpBase(block);
16108         }
16109
16110         /* Spill all stack entries into temps */
16111         unsigned level, tempNum;
16112
16113         JITDUMP("\nSpilling stack entries into temps\n");
16114         for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16115         {
16116             GenTreePtr tree = verCurrentState.esStack[level].val;
16117
16118             /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16119                the other. This should merge to a byref in unverifiable code.
16120                However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16121                successor would be imported assuming there was a TYP_I_IMPL on
16122                the stack. Thus the value would not get GC-tracked. Hence,
16123                change the temp to TYP_BYREF and reimport the successors.
16124                Note: We should only allow this in unverifiable code.
16125             */
16126             if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16127             {
16128                 lvaTable[tempNum].lvType = TYP_BYREF;
16129                 impReimportMarkSuccessors(block);
16130                 markImport = true;
16131             }
16132
16133 #ifdef _TARGET_64BIT_
16134             if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16135             {
16136                 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16137                     (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16138                 {
16139                     // Merge the current state into the entry state of block;
16140                     // the call to verMergeEntryStates must have changed
16141                     // the entry state of the block by merging the int local var
16142                     // and the native-int stack entry.
16143                     bool changed = false;
16144                     if (verMergeEntryStates(tgtBlock, &changed))
16145                     {
16146                         impRetypeEntryStateTemps(tgtBlock);
16147                         impReimportBlockPending(tgtBlock);
16148                         assert(changed);
16149                     }
16150                     else
16151                     {
16152                         tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16153                         break;
16154                     }
16155                 }
16156
16157                 // Some other block in the spill clique set this to "int", but now we have "native int".
16158                 // Change the type and go back to re-import any blocks that used the wrong type.
16159                 lvaTable[tempNum].lvType = TYP_I_IMPL;
16160                 reimportSpillClique      = true;
16161             }
16162             else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16163             {
16164                 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16165                 // Insert a sign-extension to "native int" so we match the clique.
16166                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16167             }
16168
16169             // Consider the case where one branch left a 'byref' on the stack and the other leaves
16170             // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16171             // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16172             // behavior instead of asserting and then generating bad code (where we save/restore the
16173             // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16174             // imported already, we need to change the type of the local and reimport the spill clique.
16175             // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16176             // the 'byref' size.
16177             if (!tiVerificationNeeded)
16178             {
16179                 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16180                 {
16181                     // Some other block in the spill clique set this to "int", but now we have "byref".
16182                     // Change the type and go back to re-import any blocks that used the wrong type.
16183                     lvaTable[tempNum].lvType = TYP_BYREF;
16184                     reimportSpillClique      = true;
16185                 }
16186                 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16187                 {
16188                     // Spill clique has decided this should be "byref", but this block only pushes an "int".
16189                     // Insert a sign-extension to "native int" so we match the clique size.
16190                     verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16191                 }
16192             }
16193 #endif // _TARGET_64BIT_
16194
16195 #if FEATURE_X87_DOUBLES
16196             // X87 stack doesn't differentiate between float/double
16197             // so promoting is no big deal.
16198             // For everybody else keep it as float until we have a collision and then promote
16199             // Just like for x64's TYP_INT<->TYP_I_IMPL
16200
16201             if (multRef > 1 && tree->gtType == TYP_FLOAT)
16202             {
16203                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16204             }
16205
16206 #else // !FEATURE_X87_DOUBLES
16207
16208             if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16209             {
16210                 // Some other block in the spill clique set this to "float", but now we have "double".
16211                 // Change the type and go back to re-import any blocks that used the wrong type.
16212                 lvaTable[tempNum].lvType = TYP_DOUBLE;
16213                 reimportSpillClique      = true;
16214             }
16215             else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16216             {
16217                 // Spill clique has decided this should be "double", but this block only pushes a "float".
16218                 // Insert a cast to "double" so we match the clique.
16219                 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16220             }
16221
16222 #endif // FEATURE_X87_DOUBLES
16223
16224             /* If addStmt has a reference to tempNum (can only happen if we
16225                are spilling to the temps already used by a previous block),
16226                we need to spill addStmt */
16227
16228             if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16229             {
16230                 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
16231
16232                 if (addTree->gtOper == GT_JTRUE)
16233                 {
16234                     GenTreePtr relOp = addTree->gtOp.gtOp1;
16235                     assert(relOp->OperIsCompare());
16236
16237                     var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16238
16239                     if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16240                     {
16241                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16242                         impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16243                         type              = genActualType(lvaTable[temp].TypeGet());
16244                         relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16245                     }
16246
16247                     if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16248                     {
16249                         unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16250                         impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16251                         type              = genActualType(lvaTable[temp].TypeGet());
16252                         relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16253                     }
16254                 }
16255                 else
16256                 {
16257                     assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16258
16259                     unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16260                     impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16261                     addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16262                 }
16263             }
16264
16265             /* Spill the stack entry, and replace with the temp */
16266
16267             if (!impSpillStackEntry(level, tempNum
16268 #ifdef DEBUG
16269                                     ,
16270                                     true, "Spill Stack Entry"
16271 #endif
16272                                     ))
16273             {
16274                 if (markImport)
16275                 {
16276                     BADCODE("bad stack state");
16277                 }
16278
16279                 // Oops. Something went wrong when spilling. Bad code.
16280                 verHandleVerificationFailure(block DEBUGARG(true));
16281
16282                 goto SPILLSTACK;
16283             }
16284         }
16285
16286         /* Put back the 'jtrue'/'switch' if we removed it earlier */
16287
16288         if (addStmt)
16289         {
16290             impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16291         }
16292     }
16293
16294     // Some of the append/spill logic works on compCurBB
16295
16296     assert(compCurBB == block);
16297
16298     /* Save the tree list in the block */
16299     impEndTreeList(block);
16300
16301     // impEndTreeList sets BBF_IMPORTED on the block
16302     // We do *NOT* want to set it later than this because
16303     // impReimportSpillClique might clear it if this block is both a
16304     // predecessor and successor in the current spill clique
16305     assert(block->bbFlags & BBF_IMPORTED);
16306
16307     // If we had a int/native int, or float/double collision, we need to re-import
16308     if (reimportSpillClique)
16309     {
16310         // This will re-import all the successors of block (as well as each of their predecessors)
16311         impReimportSpillClique(block);
16312
16313         // For blocks that haven't been imported yet, we still need to mark them as pending import.
16314         const unsigned numSuccs = block->NumSucc();
16315         for (unsigned i = 0; i < numSuccs; i++)
16316         {
16317             BasicBlock* succ = block->GetSucc(i);
16318             if ((succ->bbFlags & BBF_IMPORTED) == 0)
16319             {
16320                 impImportBlockPending(succ);
16321             }
16322         }
16323     }
16324     else // the normal case
16325     {
16326         // otherwise just import the successors of block
16327
16328         /* Does this block jump to any other blocks? */
16329         const unsigned numSuccs = block->NumSucc();
16330         for (unsigned i = 0; i < numSuccs; i++)
16331         {
16332             impImportBlockPending(block->GetSucc(i));
16333         }
16334     }
16335 }
16336 #ifdef _PREFAST_
16337 #pragma warning(pop)
16338 #endif
16339
16340 /*****************************************************************************/
16341 //
16342 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16343 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16344 // impPendingBlockMembers).  Merges the current verification state into the verification state of "block"
16345 // (its "pre-state").
16346
16347 void Compiler::impImportBlockPending(BasicBlock* block)
16348 {
16349 #ifdef DEBUG
16350     if (verbose)
16351     {
16352         printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16353     }
16354 #endif
16355
16356     // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16357     // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16358     // (When we're doing verification, we always attempt the merge to detect verification errors.)
16359
16360     // If the block has not been imported, add to pending set.
16361     bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16362
16363     // Initialize bbEntryState just the first time we try to add this block to the pending list
16364     // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16365     // We use NULL to indicate the 'common' state to avoid memory allocation
16366     if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16367         (impGetPendingBlockMember(block) == 0))
16368     {
16369         verInitBBEntryState(block, &verCurrentState);
16370         assert(block->bbStkDepth == 0);
16371         block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16372         assert(addToPending);
16373         assert(impGetPendingBlockMember(block) == 0);
16374     }
16375     else
16376     {
16377         // The stack should have the same height on entry to the block from all its predecessors.
16378         if (block->bbStkDepth != verCurrentState.esStackDepth)
16379         {
16380 #ifdef DEBUG
16381             char buffer[400];
16382             sprintf_s(buffer, sizeof(buffer),
16383                       "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16384                       "Previous depth was %d, current depth is %d",
16385                       block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16386                       verCurrentState.esStackDepth);
16387             buffer[400 - 1] = 0;
16388             NO_WAY(buffer);
16389 #else
16390             NO_WAY("Block entered with different stack depths");
16391 #endif
16392         }
16393
16394         // Additionally, if we need to verify, merge the verification state.
16395         if (tiVerificationNeeded)
16396         {
16397             // Merge the current state into the entry state of block; if this does not change the entry state
16398             // by merging, do not add the block to the pending-list.
16399             bool changed = false;
16400             if (!verMergeEntryStates(block, &changed))
16401             {
16402                 block->bbFlags |= BBF_FAILED_VERIFICATION;
16403                 addToPending = true; // We will pop it off, and check the flag set above.
16404             }
16405             else if (changed)
16406             {
16407                 addToPending = true;
16408
16409                 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16410             }
16411         }
16412
16413         if (!addToPending)
16414         {
16415             return;
16416         }
16417
16418         if (block->bbStkDepth > 0)
16419         {
16420             // We need to fix the types of any spill temps that might have changed:
16421             //   int->native int, float->double, int->byref, etc.
16422             impRetypeEntryStateTemps(block);
16423         }
16424
16425         // OK, we must add to the pending list, if it's not already in it.
16426         if (impGetPendingBlockMember(block) != 0)
16427         {
16428             return;
16429         }
16430     }
16431
16432     // Get an entry to add to the pending list
16433
16434     PendingDsc* dsc;
16435
16436     if (impPendingFree)
16437     {
16438         // We can reuse one of the freed up dscs.
16439         dsc            = impPendingFree;
16440         impPendingFree = dsc->pdNext;
16441     }
16442     else
16443     {
16444         // We have to create a new dsc
16445         dsc = new (this, CMK_Unknown) PendingDsc;
16446     }
16447
16448     dsc->pdBB                 = block;
16449     dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16450     dsc->pdThisPtrInit        = verCurrentState.thisInitialized;
16451
16452     // Save the stack trees for later
16453
16454     if (verCurrentState.esStackDepth)
16455     {
16456         impSaveStackState(&dsc->pdSavedStack, false);
16457     }
16458
16459     // Add the entry to the pending list
16460
16461     dsc->pdNext    = impPendingList;
16462     impPendingList = dsc;
16463     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16464
16465     // Various assertions require us to now to consider the block as not imported (at least for
16466     // the final time...)
16467     block->bbFlags &= ~BBF_IMPORTED;
16468
16469 #ifdef DEBUG
16470     if (verbose && 0)
16471     {
16472         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16473     }
16474 #endif
16475 }
16476
16477 /*****************************************************************************/
16478 //
16479 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16480 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16481 // impPendingBlockMembers).  Does *NOT* change the existing "pre-state" of the block.
16482
16483 void Compiler::impReimportBlockPending(BasicBlock* block)
16484 {
16485     JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16486
16487     assert(block->bbFlags & BBF_IMPORTED);
16488
16489     // OK, we must add to the pending list, if it's not already in it.
16490     if (impGetPendingBlockMember(block) != 0)
16491     {
16492         return;
16493     }
16494
16495     // Get an entry to add to the pending list
16496
16497     PendingDsc* dsc;
16498
16499     if (impPendingFree)
16500     {
16501         // We can reuse one of the freed up dscs.
16502         dsc            = impPendingFree;
16503         impPendingFree = dsc->pdNext;
16504     }
16505     else
16506     {
16507         // We have to create a new dsc
16508         dsc = new (this, CMK_ImpStack) PendingDsc;
16509     }
16510
16511     dsc->pdBB = block;
16512
16513     if (block->bbEntryState)
16514     {
16515         dsc->pdThisPtrInit        = block->bbEntryState->thisInitialized;
16516         dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16517         dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16518     }
16519     else
16520     {
16521         dsc->pdThisPtrInit        = TIS_Bottom;
16522         dsc->pdSavedStack.ssDepth = 0;
16523         dsc->pdSavedStack.ssTrees = nullptr;
16524     }
16525
16526     // Add the entry to the pending list
16527
16528     dsc->pdNext    = impPendingList;
16529     impPendingList = dsc;
16530     impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16531
16532     // Various assertions require us to now to consider the block as not imported (at least for
16533     // the final time...)
16534     block->bbFlags &= ~BBF_IMPORTED;
16535
16536 #ifdef DEBUG
16537     if (verbose && 0)
16538     {
16539         printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16540     }
16541 #endif
16542 }
16543
16544 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16545 {
16546     if (comp->impBlockListNodeFreeList == nullptr)
16547     {
16548         return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16549     }
16550     else
16551     {
16552         BlockListNode* res             = comp->impBlockListNodeFreeList;
16553         comp->impBlockListNodeFreeList = res->m_next;
16554         return res;
16555     }
16556 }
16557
16558 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16559 {
16560     node->m_next             = impBlockListNodeFreeList;
16561     impBlockListNodeFreeList = node;
16562 }
16563
16564 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16565 {
16566     bool toDo = true;
16567
16568     noway_assert(!fgComputePredsDone);
16569     if (!fgCheapPredsValid)
16570     {
16571         fgComputeCheapPreds();
16572     }
16573
16574     BlockListNode* succCliqueToDo = nullptr;
16575     BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16576     while (toDo)
16577     {
16578         toDo = false;
16579         // Look at the successors of every member of the predecessor to-do list.
16580         while (predCliqueToDo != nullptr)
16581         {
16582             BlockListNode* node = predCliqueToDo;
16583             predCliqueToDo      = node->m_next;
16584             BasicBlock* blk     = node->m_blk;
16585             FreeBlockListNode(node);
16586
16587             const unsigned numSuccs = blk->NumSucc();
16588             for (unsigned succNum = 0; succNum < numSuccs; succNum++)
16589             {
16590                 BasicBlock* succ = blk->GetSucc(succNum);
16591                 // If it's not already in the clique, add it, and also add it
16592                 // as a member of the successor "toDo" set.
16593                 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16594                 {
16595                     callback->Visit(SpillCliqueSucc, succ);
16596                     impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16597                     succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16598                     toDo           = true;
16599                 }
16600             }
16601         }
16602         // Look at the predecessors of every member of the successor to-do list.
16603         while (succCliqueToDo != nullptr)
16604         {
16605             BlockListNode* node = succCliqueToDo;
16606             succCliqueToDo      = node->m_next;
16607             BasicBlock* blk     = node->m_blk;
16608             FreeBlockListNode(node);
16609
16610             for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16611             {
16612                 BasicBlock* predBlock = pred->block;
16613                 // If it's not already in the clique, add it, and also add it
16614                 // as a member of the predecessor "toDo" set.
16615                 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16616                 {
16617                     callback->Visit(SpillCliquePred, predBlock);
16618                     impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16619                     predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16620                     toDo           = true;
16621                 }
16622             }
16623         }
16624     }
16625
16626     // If this fails, it means we didn't walk the spill clique properly and somehow managed
16627     // miss walking back to include the predecessor we started from.
16628     // This most likely cause: missing or out of date bbPreds
16629     assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16630 }
16631
16632 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16633 {
16634     if (predOrSucc == SpillCliqueSucc)
16635     {
16636         assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16637         blk->bbStkTempsIn = m_baseTmp;
16638     }
16639     else
16640     {
16641         assert(predOrSucc == SpillCliquePred);
16642         assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16643         blk->bbStkTempsOut = m_baseTmp;
16644     }
16645 }
16646
16647 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16648 {
16649     // For Preds we could be a little smarter and just find the existing store
16650     // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16651     // just re-import the whole block (just like we do for successors)
16652
16653     if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16654     {
16655         // If we haven't imported this block and we're not going to (because it isn't on
16656         // the pending list) then just ignore it for now.
16657
16658         // This block has either never been imported (EntryState == NULL) or it failed
16659         // verification. Neither state requires us to force it to be imported now.
16660         assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16661         return;
16662     }
16663
16664     // For successors we have a valid verCurrentState, so just mark them for reimport
16665     // the 'normal' way
16666     // Unlike predecessors, we *DO* need to reimport the current block because the
16667     // initial import had the wrong entry state types.
16668     // Similarly, blocks that are currently on the pending list, still need to call
16669     // impImportBlockPending to fixup their entry state.
16670     if (predOrSucc == SpillCliqueSucc)
16671     {
16672         m_pComp->impReimportMarkBlock(blk);
16673
16674         // Set the current stack state to that of the blk->bbEntryState
16675         m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16676         assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16677
16678         m_pComp->impImportBlockPending(blk);
16679     }
16680     else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16681     {
16682         // As described above, we are only visiting predecessors so they can
16683         // add the appropriate casts, since we have already done that for the current
16684         // block, it does not need to be reimported.
16685         // Nor do we need to reimport blocks that are still pending, but not yet
16686         // imported.
16687         //
16688         // For predecessors, we have no state to seed the EntryState, so we just have
16689         // to assume the existing one is correct.
16690         // If the block is also a successor, it will get the EntryState properly
16691         // updated when it is visited as a successor in the above "if" block.
16692         assert(predOrSucc == SpillCliquePred);
16693         m_pComp->impReimportBlockPending(blk);
16694     }
16695 }
16696
16697 // Re-type the incoming lclVar nodes to match the varDsc.
16698 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16699 {
16700     if (blk->bbEntryState != nullptr)
16701     {
16702         EntryState* es = blk->bbEntryState;
16703         for (unsigned level = 0; level < es->esStackDepth; level++)
16704         {
16705             GenTreePtr tree = es->esStack[level].val;
16706             if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16707             {
16708                 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16709                 noway_assert(lclNum < lvaCount);
16710                 LclVarDsc* varDsc              = lvaTable + lclNum;
16711                 es->esStack[level].val->gtType = varDsc->TypeGet();
16712             }
16713         }
16714     }
16715 }
16716
16717 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16718 {
16719     if (block->bbStkTempsOut != NO_BASE_TMP)
16720     {
16721         return block->bbStkTempsOut;
16722     }
16723
16724 #ifdef DEBUG
16725     if (verbose)
16726     {
16727         printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16728     }
16729 #endif // DEBUG
16730
16731     // Otherwise, choose one, and propagate to all members of the spill clique.
16732     // Grab enough temps for the whole stack.
16733     unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16734     SetSpillTempsBase callback(baseTmp);
16735
16736     // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16737     // to one spill clique, and similarly can only be the sucessor to one spill clique
16738     impWalkSpillCliqueFromPred(block, &callback);
16739
16740     return baseTmp;
16741 }
16742
16743 void Compiler::impReimportSpillClique(BasicBlock* block)
16744 {
16745 #ifdef DEBUG
16746     if (verbose)
16747     {
16748         printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16749     }
16750 #endif // DEBUG
16751
16752     // If we get here, it is because this block is already part of a spill clique
16753     // and one predecessor had an outgoing live stack slot of type int, and this
16754     // block has an outgoing live stack slot of type native int.
16755     // We need to reset these before traversal because they have already been set
16756     // by the previous walk to determine all the members of the spill clique.
16757     impInlineRoot()->impSpillCliquePredMembers.Reset();
16758     impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16759
16760     ReimportSpillClique callback(this);
16761
16762     impWalkSpillCliqueFromPred(block, &callback);
16763 }
16764
16765 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16766 // a copy of "srcState", cloning tree pointers as required.
16767 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16768 {
16769     if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16770     {
16771         block->bbEntryState = nullptr;
16772         return;
16773     }
16774
16775     block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16776
16777     // block->bbEntryState.esRefcount = 1;
16778
16779     block->bbEntryState->esStackDepth    = srcState->esStackDepth;
16780     block->bbEntryState->thisInitialized = TIS_Bottom;
16781
16782     if (srcState->esStackDepth > 0)
16783     {
16784         block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16785         unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16786
16787         memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16788         for (unsigned level = 0; level < srcState->esStackDepth; level++)
16789         {
16790             GenTreePtr tree                         = srcState->esStack[level].val;
16791             block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16792         }
16793     }
16794
16795     if (verTrackObjCtorInitState)
16796     {
16797         verSetThisInit(block, srcState->thisInitialized);
16798     }
16799
16800     return;
16801 }
16802
16803 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16804 {
16805     assert(tis != TIS_Bottom); // Precondition.
16806     if (block->bbEntryState == nullptr)
16807     {
16808         block->bbEntryState = new (this, CMK_Unknown) EntryState();
16809     }
16810
16811     block->bbEntryState->thisInitialized = tis;
16812 }
16813
16814 /*
16815  * Resets the current state to the state at the start of the basic block
16816  */
16817 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16818 {
16819
16820     if (block->bbEntryState == nullptr)
16821     {
16822         destState->esStackDepth    = 0;
16823         destState->thisInitialized = TIS_Bottom;
16824         return;
16825     }
16826
16827     destState->esStackDepth = block->bbEntryState->esStackDepth;
16828
16829     if (destState->esStackDepth > 0)
16830     {
16831         unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16832
16833         memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16834     }
16835
16836     destState->thisInitialized = block->bbThisOnEntry();
16837
16838     return;
16839 }
16840
16841 ThisInitState BasicBlock::bbThisOnEntry()
16842 {
16843     return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16844 }
16845
16846 unsigned BasicBlock::bbStackDepthOnEntry()
16847 {
16848     return (bbEntryState ? bbEntryState->esStackDepth : 0);
16849 }
16850
16851 void BasicBlock::bbSetStack(void* stackBuffer)
16852 {
16853     assert(bbEntryState);
16854     assert(stackBuffer);
16855     bbEntryState->esStack = (StackEntry*)stackBuffer;
16856 }
16857
16858 StackEntry* BasicBlock::bbStackOnEntry()
16859 {
16860     assert(bbEntryState);
16861     return bbEntryState->esStack;
16862 }
16863
16864 void Compiler::verInitCurrentState()
16865 {
16866     verTrackObjCtorInitState        = FALSE;
16867     verCurrentState.thisInitialized = TIS_Bottom;
16868
16869     if (tiVerificationNeeded)
16870     {
16871         // Track this ptr initialization
16872         if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16873         {
16874             verTrackObjCtorInitState        = TRUE;
16875             verCurrentState.thisInitialized = TIS_Uninit;
16876         }
16877     }
16878
16879     // initialize stack info
16880
16881     verCurrentState.esStackDepth = 0;
16882     assert(verCurrentState.esStack != nullptr);
16883
16884     // copy current state to entry state of first BB
16885     verInitBBEntryState(fgFirstBB, &verCurrentState);
16886 }
16887
16888 Compiler* Compiler::impInlineRoot()
16889 {
16890     if (impInlineInfo == nullptr)
16891     {
16892         return this;
16893     }
16894     else
16895     {
16896         return impInlineInfo->InlineRoot;
16897     }
16898 }
16899
16900 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16901 {
16902     if (predOrSucc == SpillCliquePred)
16903     {
16904         return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16905     }
16906     else
16907     {
16908         assert(predOrSucc == SpillCliqueSucc);
16909         return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16910     }
16911 }
16912
16913 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16914 {
16915     if (predOrSucc == SpillCliquePred)
16916     {
16917         impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16918     }
16919     else
16920     {
16921         assert(predOrSucc == SpillCliqueSucc);
16922         impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16923     }
16924 }
16925
16926 /*****************************************************************************
16927  *
16928  *  Convert the instrs ("import") into our internal format (trees). The
16929  *  basic flowgraph has already been constructed and is passed in.
16930  */
16931
16932 void Compiler::impImport(BasicBlock* method)
16933 {
16934 #ifdef DEBUG
16935     if (verbose)
16936     {
16937         printf("*************** In impImport() for %s\n", info.compFullName);
16938     }
16939 #endif
16940
16941     /* Allocate the stack contents */
16942
16943     if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16944     {
16945         /* Use local variable, don't waste time allocating on the heap */
16946
16947         impStkSize              = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16948         verCurrentState.esStack = impSmallStack;
16949     }
16950     else
16951     {
16952         impStkSize              = info.compMaxStack;
16953         verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16954     }
16955
16956     // initialize the entry state at start of method
16957     verInitCurrentState();
16958
16959     // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16960     Compiler* inlineRoot = impInlineRoot();
16961     if (this == inlineRoot) // These are only used on the root of the inlining tree.
16962     {
16963         // We have initialized these previously, but to size 0.  Make them larger.
16964         impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16965         impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16966         impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16967     }
16968     inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16969     inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16970     inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16971     impBlockListNodeFreeList = nullptr;
16972
16973 #ifdef DEBUG
16974     impLastILoffsStmt   = nullptr;
16975     impNestedStackSpill = false;
16976 #endif
16977     impBoxTemp = BAD_VAR_NUM;
16978
16979     impPendingList = impPendingFree = nullptr;
16980
16981     /* Add the entry-point to the worker-list */
16982
16983     // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16984     // from EH normalization.
16985     // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16986     // out.
16987     for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16988     {
16989         // Treat these as imported.
16990         assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16991         JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16992         method->bbFlags |= BBF_IMPORTED;
16993     }
16994
16995     impImportBlockPending(method);
16996
16997     /* Import blocks in the worker-list until there are no more */
16998
16999     while (impPendingList)
17000     {
17001         /* Remove the entry at the front of the list */
17002
17003         PendingDsc* dsc = impPendingList;
17004         impPendingList  = impPendingList->pdNext;
17005         impSetPendingBlockMember(dsc->pdBB, 0);
17006
17007         /* Restore the stack state */
17008
17009         verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17010         verCurrentState.esStackDepth    = dsc->pdSavedStack.ssDepth;
17011         if (verCurrentState.esStackDepth)
17012         {
17013             impRestoreStackState(&dsc->pdSavedStack);
17014         }
17015
17016         /* Add the entry to the free list for reuse */
17017
17018         dsc->pdNext    = impPendingFree;
17019         impPendingFree = dsc;
17020
17021         /* Now import the block */
17022
17023         if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17024         {
17025
17026 #ifdef _TARGET_64BIT_
17027             // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17028             // coupled with the JIT64 IL Verification logic.  Look inside verHandleVerificationFailure
17029             // method for further explanation on why we raise this exception instead of making the jitted
17030             // code throw the verification exception during execution.
17031             if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17032             {
17033                 BADCODE("Basic block marked as not verifiable");
17034             }
17035             else
17036 #endif // _TARGET_64BIT_
17037             {
17038                 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17039                 impEndTreeList(dsc->pdBB);
17040             }
17041         }
17042         else
17043         {
17044             impImportBlock(dsc->pdBB);
17045
17046             if (compDonotInline())
17047             {
17048                 return;
17049             }
17050             if (compIsForImportOnly() && !tiVerificationNeeded)
17051             {
17052                 return;
17053             }
17054         }
17055     }
17056
17057 #ifdef DEBUG
17058     if (verbose && info.compXcptnsCount)
17059     {
17060         printf("\nAfter impImport() added block for try,catch,finally");
17061         fgDispBasicBlocks();
17062         printf("\n");
17063     }
17064
17065     // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17066     for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17067     {
17068         block->bbFlags &= ~BBF_VISITED;
17069     }
17070 #endif
17071
17072     assert(!compIsForInlining() || !tiVerificationNeeded);
17073 }
17074
17075 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17076 // The invariant here is that if it's not a ref or a method and has a class handle
17077 // it's a valuetype
17078 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17079 {
17080     if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17081     {
17082         return true;
17083     }
17084     else
17085     {
17086         return false;
17087     }
17088 }
17089
17090 /*****************************************************************************
17091  *  Check to see if the tree is the address of a local or
17092     the address of a field in a local.
17093
17094     *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17095
17096  */
17097
17098 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
17099 {
17100     if (tree->gtOper != GT_ADDR)
17101     {
17102         return FALSE;
17103     }
17104
17105     GenTreePtr op = tree->gtOp.gtOp1;
17106     while (op->gtOper == GT_FIELD)
17107     {
17108         op = op->gtField.gtFldObj;
17109         if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17110         {
17111             op = op->gtOp.gtOp1;
17112         }
17113         else
17114         {
17115             return false;
17116         }
17117     }
17118
17119     if (op->gtOper == GT_LCL_VAR)
17120     {
17121         *lclVarTreeOut = op;
17122         return TRUE;
17123     }
17124     else
17125     {
17126         return FALSE;
17127     }
17128 }
17129
17130 //------------------------------------------------------------------------
17131 // impMakeDiscretionaryInlineObservations: make observations that help
17132 // determine the profitability of a discretionary inline
17133 //
17134 // Arguments:
17135 //    pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17136 //    inlineResult -- InlineResult accumulating information about this inline
17137 //
17138 // Notes:
17139 //    If inlining or prejitting the root, this method also makes
17140 //    various observations about the method that factor into inline
17141 //    decisions. It sets `compNativeSizeEstimate` as a side effect.
17142
17143 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17144 {
17145     assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17146            pInlineInfo == nullptr && !compIsForInlining()   // Calculate the static inlining hint for ngen.
17147            );
17148
17149     // If we're really inlining, we should just have one result in play.
17150     assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17151
17152     // If this is a "forceinline" method, the JIT probably shouldn't have gone
17153     // to the trouble of estimating the native code size. Even if it did, it
17154     // shouldn't be relying on the result of this method.
17155     assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17156
17157     // Note if the caller contains NEWOBJ or NEWARR.
17158     Compiler* rootCompiler = impInlineRoot();
17159
17160     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17161     {
17162         inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17163     }
17164
17165     if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17166     {
17167         inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17168     }
17169
17170     bool calleeIsStatic  = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17171     bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17172
17173     if (isSpecialMethod)
17174     {
17175         if (calleeIsStatic)
17176         {
17177             inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17178         }
17179         else
17180         {
17181             inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17182         }
17183     }
17184     else if (!calleeIsStatic)
17185     {
17186         // Callee is an instance method.
17187         //
17188         // Check if the callee has the same 'this' as the root.
17189         if (pInlineInfo != nullptr)
17190         {
17191             GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17192             assert(thisArg);
17193             bool isSameThis = impIsThis(thisArg);
17194             inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17195         }
17196     }
17197
17198     // Note if the callee's class is a promotable struct
17199     if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17200     {
17201         lvaStructPromotionInfo structPromotionInfo;
17202         lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17203         if (structPromotionInfo.canPromote)
17204         {
17205             inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17206         }
17207     }
17208
17209 #ifdef FEATURE_SIMD
17210
17211     // Note if this method is has SIMD args or return value
17212     if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17213     {
17214         inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17215     }
17216
17217 #endif // FEATURE_SIMD
17218
17219     // Roughly classify callsite frequency.
17220     InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17221
17222     // If this is a prejit root, or a maximally hot block...
17223     if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17224     {
17225         frequency = InlineCallsiteFrequency::HOT;
17226     }
17227     // No training data.  Look for loop-like things.
17228     // We consider a recursive call loop-like.  Do not give the inlining boost to the method itself.
17229     // However, give it to things nearby.
17230     else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17231              (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17232     {
17233         frequency = InlineCallsiteFrequency::LOOP;
17234     }
17235     else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17236     {
17237         frequency = InlineCallsiteFrequency::WARM;
17238     }
17239     // Now modify the multiplier based on where we're called from.
17240     else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17241     {
17242         frequency = InlineCallsiteFrequency::RARE;
17243     }
17244     else
17245     {
17246         frequency = InlineCallsiteFrequency::BORING;
17247     }
17248
17249     // Also capture the block weight of the call site.  In the prejit
17250     // root case, assume there's some hot call site for this method.
17251     unsigned weight = 0;
17252
17253     if (pInlineInfo != nullptr)
17254     {
17255         weight = pInlineInfo->iciBlock->bbWeight;
17256     }
17257     else
17258     {
17259         weight = BB_MAX_WEIGHT;
17260     }
17261
17262     inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17263     inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17264 }
17265
17266 /*****************************************************************************
17267  This method makes STATIC inlining decision based on the IL code.
17268  It should not make any inlining decision based on the context.
17269  If forceInline is true, then the inlining decision should not depend on
17270  performance heuristics (code size, etc.).
17271  */
17272
17273 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17274                               CORINFO_METHOD_INFO*  methInfo,
17275                               bool                  forceInline,
17276                               InlineResult*         inlineResult)
17277 {
17278     unsigned codeSize = methInfo->ILCodeSize;
17279
17280     // We shouldn't have made up our minds yet...
17281     assert(!inlineResult->IsDecided());
17282
17283     if (methInfo->EHcount)
17284     {
17285         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17286         return;
17287     }
17288
17289     if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17290     {
17291         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17292         return;
17293     }
17294
17295     // For now we don't inline varargs (import code can't handle it)
17296
17297     if (methInfo->args.isVarArg())
17298     {
17299         inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17300         return;
17301     }
17302
17303     // Reject if it has too many locals.
17304     // This is currently an implementation limit due to fixed-size arrays in the
17305     // inline info, rather than a performance heuristic.
17306
17307     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17308
17309     if (methInfo->locals.numArgs > MAX_INL_LCLS)
17310     {
17311         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17312         return;
17313     }
17314
17315     // Make sure there aren't too many arguments.
17316     // This is currently an implementation limit due to fixed-size arrays in the
17317     // inline info, rather than a performance heuristic.
17318
17319     inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17320
17321     if (methInfo->args.numArgs > MAX_INL_ARGS)
17322     {
17323         inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17324         return;
17325     }
17326
17327     // Note force inline state
17328
17329     inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17330
17331     // Note IL code size
17332
17333     inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17334
17335     if (inlineResult->IsFailure())
17336     {
17337         return;
17338     }
17339
17340     // Make sure maxstack is not too big
17341
17342     inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17343
17344     if (inlineResult->IsFailure())
17345     {
17346         return;
17347     }
17348 }
17349
17350 /*****************************************************************************
17351  */
17352
17353 void Compiler::impCheckCanInline(GenTreePtr             call,
17354                                  CORINFO_METHOD_HANDLE  fncHandle,
17355                                  unsigned               methAttr,
17356                                  CORINFO_CONTEXT_HANDLE exactContextHnd,
17357                                  InlineCandidateInfo**  ppInlineCandidateInfo,
17358                                  InlineResult*          inlineResult)
17359 {
17360     // Either EE or JIT might throw exceptions below.
17361     // If that happens, just don't inline the method.
17362
17363     struct Param
17364     {
17365         Compiler*              pThis;
17366         GenTreePtr             call;
17367         CORINFO_METHOD_HANDLE  fncHandle;
17368         unsigned               methAttr;
17369         CORINFO_CONTEXT_HANDLE exactContextHnd;
17370         InlineResult*          result;
17371         InlineCandidateInfo**  ppInlineCandidateInfo;
17372     } param;
17373     memset(&param, 0, sizeof(param));
17374
17375     param.pThis                 = this;
17376     param.call                  = call;
17377     param.fncHandle             = fncHandle;
17378     param.methAttr              = methAttr;
17379     param.exactContextHnd       = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17380     param.result                = inlineResult;
17381     param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17382
17383     bool success = eeRunWithErrorTrap<Param>(
17384         [](Param* pParam) {
17385             DWORD                  dwRestrictions = 0;
17386             CorInfoInitClassResult initClassResult;
17387
17388 #ifdef DEBUG
17389             const char* methodName;
17390             const char* className;
17391             methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17392
17393             if (JitConfig.JitNoInline())
17394             {
17395                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17396                 goto _exit;
17397             }
17398 #endif
17399
17400             /* Try to get the code address/size for the method */
17401
17402             CORINFO_METHOD_INFO methInfo;
17403             if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17404             {
17405                 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17406                 goto _exit;
17407             }
17408
17409             bool forceInline;
17410             forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17411
17412             pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17413
17414             if (pParam->result->IsFailure())
17415             {
17416                 assert(pParam->result->IsNever());
17417                 goto _exit;
17418             }
17419
17420             // Speculatively check if initClass() can be done.
17421             // If it can be done, we will try to inline the method. If inlining
17422             // succeeds, then we will do the non-speculative initClass() and commit it.
17423             // If this speculative call to initClass() fails, there is no point
17424             // trying to inline this method.
17425             initClassResult =
17426                 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17427                                                            pParam->exactContextHnd /* context */,
17428                                                            TRUE /* speculative */);
17429
17430             if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17431             {
17432                 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17433                 goto _exit;
17434             }
17435
17436             // Given the EE the final say in whether to inline or not.
17437             // This should be last since for verifiable code, this can be expensive
17438
17439             /* VM Inline check also ensures that the method is verifiable if needed */
17440             CorInfoInline vmResult;
17441             vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17442                                                                   &dwRestrictions);
17443
17444             if (vmResult == INLINE_FAIL)
17445             {
17446                 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17447             }
17448             else if (vmResult == INLINE_NEVER)
17449             {
17450                 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17451             }
17452
17453             if (pParam->result->IsFailure())
17454             {
17455                 // Make sure not to report this one.  It was already reported by the VM.
17456                 pParam->result->SetReported();
17457                 goto _exit;
17458             }
17459
17460             // check for unsupported inlining restrictions
17461             assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17462
17463             if (dwRestrictions & INLINE_SAME_THIS)
17464             {
17465                 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17466                 assert(thisArg);
17467
17468                 if (!pParam->pThis->impIsThis(thisArg))
17469                 {
17470                     pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17471                     goto _exit;
17472                 }
17473             }
17474
17475             /* Get the method properties */
17476
17477             CORINFO_CLASS_HANDLE clsHandle;
17478             clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17479             unsigned clsAttr;
17480             clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17481
17482             /* Get the return type */
17483
17484             var_types fncRetType;
17485             fncRetType = pParam->call->TypeGet();
17486
17487 #ifdef DEBUG
17488             var_types fncRealRetType;
17489             fncRealRetType = JITtype2varType(methInfo.args.retType);
17490
17491             assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17492                    // <BUGNUM> VSW 288602 </BUGNUM>
17493                    // In case of IJW, we allow to assign a native pointer to a BYREF.
17494                    (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17495                    (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17496 #endif
17497
17498             //
17499             // Allocate an InlineCandidateInfo structure
17500             //
17501             InlineCandidateInfo* pInfo;
17502             pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17503
17504             pInfo->dwRestrictions  = dwRestrictions;
17505             pInfo->methInfo        = methInfo;
17506             pInfo->methAttr        = pParam->methAttr;
17507             pInfo->clsHandle       = clsHandle;
17508             pInfo->clsAttr         = clsAttr;
17509             pInfo->fncRetType      = fncRetType;
17510             pInfo->exactContextHnd = pParam->exactContextHnd;
17511             pInfo->ilCallerHandle  = pParam->pThis->info.compMethodHnd;
17512             pInfo->initClassResult = initClassResult;
17513
17514             *(pParam->ppInlineCandidateInfo) = pInfo;
17515
17516         _exit:;
17517         },
17518         &param);
17519     if (!success)
17520     {
17521         param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17522     }
17523 }
17524
17525 void Compiler::impInlineRecordArgInfo(InlineInfo*   pInlineInfo,
17526                                       GenTreePtr    curArgVal,
17527                                       unsigned      argNum,
17528                                       InlineResult* inlineResult)
17529 {
17530     InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17531
17532     if (curArgVal->gtOper == GT_MKREFANY)
17533     {
17534         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17535         return;
17536     }
17537
17538     inlCurArgInfo->argNode = curArgVal;
17539
17540     GenTreePtr lclVarTree;
17541     if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17542     {
17543         inlCurArgInfo->argIsByRefToStructLocal = true;
17544 #ifdef FEATURE_SIMD
17545         if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17546         {
17547             pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17548         }
17549 #endif // FEATURE_SIMD
17550     }
17551
17552     if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17553     {
17554         inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17555         inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17556     }
17557
17558     if (curArgVal->gtOper == GT_LCL_VAR)
17559     {
17560         inlCurArgInfo->argIsLclVar = true;
17561
17562         /* Remember the "original" argument number */
17563         curArgVal->gtLclVar.gtLclILoffs = argNum;
17564     }
17565
17566     if ((curArgVal->OperKind() & GTK_CONST) ||
17567         ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17568     {
17569         inlCurArgInfo->argIsInvariant = true;
17570         if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17571         {
17572             /* Abort, but do not mark as not inlinable */
17573             inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17574             return;
17575         }
17576     }
17577
17578     if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17579     {
17580         inlCurArgInfo->argHasLdargaOp = true;
17581     }
17582
17583 #ifdef DEBUG
17584     if (verbose)
17585     {
17586         if (inlCurArgInfo->argIsThis)
17587         {
17588             printf("thisArg:");
17589         }
17590         else
17591         {
17592             printf("\nArgument #%u:", argNum);
17593         }
17594         if (inlCurArgInfo->argIsLclVar)
17595         {
17596             printf(" is a local var");
17597         }
17598         if (inlCurArgInfo->argIsInvariant)
17599         {
17600             printf(" is a constant");
17601         }
17602         if (inlCurArgInfo->argHasGlobRef)
17603         {
17604             printf(" has global refs");
17605         }
17606         if (inlCurArgInfo->argHasSideEff)
17607         {
17608             printf(" has side effects");
17609         }
17610         if (inlCurArgInfo->argHasLdargaOp)
17611         {
17612             printf(" has ldarga effect");
17613         }
17614         if (inlCurArgInfo->argHasStargOp)
17615         {
17616             printf(" has starg effect");
17617         }
17618         if (inlCurArgInfo->argIsByRefToStructLocal)
17619         {
17620             printf(" is byref to a struct local");
17621         }
17622
17623         printf("\n");
17624         gtDispTree(curArgVal);
17625         printf("\n");
17626     }
17627 #endif
17628 }
17629
17630 /*****************************************************************************
17631  *
17632  */
17633
17634 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17635 {
17636     assert(!compIsForInlining());
17637
17638     GenTreePtr           call         = pInlineInfo->iciCall;
17639     CORINFO_METHOD_INFO* methInfo     = &pInlineInfo->inlineCandidateInfo->methInfo;
17640     unsigned             clsAttr      = pInlineInfo->inlineCandidateInfo->clsAttr;
17641     InlArgInfo*          inlArgInfo   = pInlineInfo->inlArgInfo;
17642     InlLclVarInfo*       lclVarInfo   = pInlineInfo->lclVarInfo;
17643     InlineResult*        inlineResult = pInlineInfo->inlineResult;
17644
17645     const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17646
17647     /* init the argument stuct */
17648
17649     memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17650
17651     /* Get hold of the 'this' pointer and the argument list proper */
17652
17653     GenTreePtr thisArg = call->gtCall.gtCallObjp;
17654     GenTreePtr argList = call->gtCall.gtCallArgs;
17655     unsigned   argCnt  = 0; // Count of the arguments
17656
17657     assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17658
17659     if (thisArg)
17660     {
17661         inlArgInfo[0].argIsThis = true;
17662
17663         impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17664
17665         if (inlineResult->IsFailure())
17666         {
17667             return;
17668         }
17669
17670         /* Increment the argument count */
17671         argCnt++;
17672     }
17673
17674     /* Record some information about each of the arguments */
17675     bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17676
17677 #if USER_ARGS_COME_LAST
17678     unsigned typeCtxtArg = thisArg ? 1 : 0;
17679 #else  // USER_ARGS_COME_LAST
17680     unsigned typeCtxtArg = methInfo->args.totalILArgs();
17681 #endif // USER_ARGS_COME_LAST
17682
17683     for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17684     {
17685         if (argTmp == argList && hasRetBuffArg)
17686         {
17687             continue;
17688         }
17689
17690         // Ignore the type context argument
17691         if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17692         {
17693             pInlineInfo->typeContextArg = typeCtxtArg;
17694             typeCtxtArg                 = 0xFFFFFFFF;
17695             continue;
17696         }
17697
17698         assert(argTmp->gtOper == GT_LIST);
17699         GenTreePtr argVal = argTmp->gtOp.gtOp1;
17700
17701         impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17702
17703         if (inlineResult->IsFailure())
17704         {
17705             return;
17706         }
17707
17708         /* Increment the argument count */
17709         argCnt++;
17710     }
17711
17712     /* Make sure we got the arg number right */
17713     assert(argCnt == methInfo->args.totalILArgs());
17714
17715 #ifdef FEATURE_SIMD
17716     bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17717 #endif // FEATURE_SIMD
17718
17719     /* We have typeless opcodes, get type information from the signature */
17720
17721     if (thisArg)
17722     {
17723         var_types sigType;
17724
17725         if (clsAttr & CORINFO_FLG_VALUECLASS)
17726         {
17727             sigType = TYP_BYREF;
17728         }
17729         else
17730         {
17731             sigType = TYP_REF;
17732         }
17733
17734         lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17735         lclVarInfo[0].lclHasLdlocaOp = false;
17736
17737 #ifdef FEATURE_SIMD
17738         // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17739         // the inlining multiplier) for anything in that assembly.
17740         // But we only need to normalize it if it is a TYP_STRUCT
17741         // (which we need to do even if we have already set foundSIMDType).
17742         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17743         {
17744             if (sigType == TYP_STRUCT)
17745             {
17746                 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17747             }
17748             foundSIMDType = true;
17749         }
17750 #endif // FEATURE_SIMD
17751         lclVarInfo[0].lclTypeInfo = sigType;
17752
17753         assert(varTypeIsGC(thisArg->gtType) ||   // "this" is managed
17754                (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17755                 (clsAttr & CORINFO_FLG_VALUECLASS)));
17756
17757         if (genActualType(thisArg->gtType) != genActualType(sigType))
17758         {
17759             if (sigType == TYP_REF)
17760             {
17761                 /* The argument cannot be bashed into a ref (see bug 750871) */
17762                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17763                 return;
17764             }
17765
17766             /* This can only happen with byrefs <-> ints/shorts */
17767
17768             assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17769             assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17770
17771             if (sigType == TYP_BYREF)
17772             {
17773                 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17774             }
17775             else if (thisArg->gtType == TYP_BYREF)
17776             {
17777                 assert(sigType == TYP_I_IMPL);
17778
17779                 /* If possible change the BYREF to an int */
17780                 if (thisArg->IsVarAddr())
17781                 {
17782                     thisArg->gtType              = TYP_I_IMPL;
17783                     lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17784                 }
17785                 else
17786                 {
17787                     /* Arguments 'int <- byref' cannot be bashed */
17788                     inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17789                     return;
17790                 }
17791             }
17792         }
17793     }
17794
17795     /* Init the types of the arguments and make sure the types
17796      * from the trees match the types in the signature */
17797
17798     CORINFO_ARG_LIST_HANDLE argLst;
17799     argLst = methInfo->args.args;
17800
17801     unsigned i;
17802     for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17803     {
17804         var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17805
17806         lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17807
17808 #ifdef FEATURE_SIMD
17809         if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17810         {
17811             // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17812             // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17813             // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17814             foundSIMDType = true;
17815             if (sigType == TYP_STRUCT)
17816             {
17817                 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17818                 sigType              = structType;
17819             }
17820         }
17821 #endif // FEATURE_SIMD
17822
17823         lclVarInfo[i].lclTypeInfo    = sigType;
17824         lclVarInfo[i].lclHasLdlocaOp = false;
17825
17826         /* Does the tree type match the signature type? */
17827
17828         GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17829
17830         if (sigType != inlArgNode->gtType)
17831         {
17832             /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17833                but in bad IL cases with caller-callee signature mismatches we can see other types.
17834                Intentionally reject cases with mismatches so the jit is more flexible when
17835                encountering bad IL. */
17836
17837             bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17838                                         (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17839                                         (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17840
17841             if (!isPlausibleTypeMatch)
17842             {
17843                 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17844                 return;
17845             }
17846
17847             /* Is it a narrowing or widening cast?
17848              * Widening casts are ok since the value computed is already
17849              * normalized to an int (on the IL stack) */
17850
17851             if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17852             {
17853                 if (sigType == TYP_BYREF)
17854                 {
17855                     lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17856                 }
17857                 else if (inlArgNode->gtType == TYP_BYREF)
17858                 {
17859                     assert(varTypeIsIntOrI(sigType));
17860
17861                     /* If possible bash the BYREF to an int */
17862                     if (inlArgNode->IsVarAddr())
17863                     {
17864                         inlArgNode->gtType           = TYP_I_IMPL;
17865                         lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17866                     }
17867                     else
17868                     {
17869                         /* Arguments 'int <- byref' cannot be changed */
17870                         inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17871                         return;
17872                     }
17873                 }
17874                 else if (genTypeSize(sigType) < EA_PTRSIZE)
17875                 {
17876                     /* Narrowing cast */
17877
17878                     if (inlArgNode->gtOper == GT_LCL_VAR &&
17879                         !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17880                         sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17881                     {
17882                         /* We don't need to insert a cast here as the variable
17883                            was assigned a normalized value of the right type */
17884
17885                         continue;
17886                     }
17887
17888                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17889
17890                     inlArgInfo[i].argIsLclVar = false;
17891
17892                     /* Try to fold the node in case we have constant arguments */
17893
17894                     if (inlArgInfo[i].argIsInvariant)
17895                     {
17896                         inlArgNode            = gtFoldExprConst(inlArgNode);
17897                         inlArgInfo[i].argNode = inlArgNode;
17898                         assert(inlArgNode->OperIsConst());
17899                     }
17900                 }
17901 #ifdef _TARGET_64BIT_
17902                 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17903                 {
17904                     // This should only happen for int -> native int widening
17905                     inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17906
17907                     inlArgInfo[i].argIsLclVar = false;
17908
17909                     /* Try to fold the node in case we have constant arguments */
17910
17911                     if (inlArgInfo[i].argIsInvariant)
17912                     {
17913                         inlArgNode            = gtFoldExprConst(inlArgNode);
17914                         inlArgInfo[i].argNode = inlArgNode;
17915                         assert(inlArgNode->OperIsConst());
17916                     }
17917                 }
17918 #endif // _TARGET_64BIT_
17919             }
17920         }
17921     }
17922
17923     /* Init the types of the local variables */
17924
17925     CORINFO_ARG_LIST_HANDLE localsSig;
17926     localsSig = methInfo->locals.args;
17927
17928     for (i = 0; i < methInfo->locals.numArgs; i++)
17929     {
17930         bool      isPinned;
17931         var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17932
17933         lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17934         lclVarInfo[i + argCnt].lclIsPinned    = isPinned;
17935         lclVarInfo[i + argCnt].lclTypeInfo    = type;
17936
17937         if (varTypeIsGC(type))
17938         {
17939             pInlineInfo->numberOfGcRefLocals++;
17940         }
17941
17942         if (isPinned)
17943         {
17944             // Pinned locals may cause inlines to fail.
17945             inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17946             if (inlineResult->IsFailure())
17947             {
17948                 return;
17949             }
17950         }
17951
17952         lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17953
17954         // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17955         // out on the inline.
17956         if (type == TYP_STRUCT)
17957         {
17958             CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17959             DWORD                typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17960             if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17961             {
17962                 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17963                 if (inlineResult->IsFailure())
17964                 {
17965                     return;
17966                 }
17967
17968                 // Do further notification in the case where the call site is rare; some policies do
17969                 // not track the relative hotness of call sites for "always" inline cases.
17970                 if (pInlineInfo->iciBlock->isRunRarely())
17971                 {
17972                     inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17973                     if (inlineResult->IsFailure())
17974                     {
17975
17976                         return;
17977                     }
17978                 }
17979             }
17980         }
17981
17982         localsSig = info.compCompHnd->getArgNext(localsSig);
17983
17984 #ifdef FEATURE_SIMD
17985         if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17986         {
17987             foundSIMDType = true;
17988             if (featureSIMD && type == TYP_STRUCT)
17989             {
17990                 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17991                 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17992             }
17993         }
17994 #endif // FEATURE_SIMD
17995     }
17996
17997 #ifdef FEATURE_SIMD
17998     if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17999     {
18000         foundSIMDType = true;
18001     }
18002     pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18003 #endif // FEATURE_SIMD
18004 }
18005
18006 //------------------------------------------------------------------------
18007 // impInlineFetchLocal: get a local var that represents an inlinee local
18008 //
18009 // Arguments:
18010 //    lclNum -- number of the inlinee local
18011 //    reason -- debug string describing purpose of the local var
18012 //
18013 // Returns:
18014 //    Number of the local to use
18015 //
18016 // Notes:
18017 //    This method is invoked only for locals actually used in the
18018 //    inlinee body.
18019 //
18020 //    Allocates a new temp if necessary, and copies key properties
18021 //    over from the inlinee local var info.
18022
18023 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18024 {
18025     assert(compIsForInlining());
18026
18027     unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18028
18029     if (tmpNum == BAD_VAR_NUM)
18030     {
18031         const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18032         const var_types      lclTyp       = inlineeLocal.lclTypeInfo;
18033
18034         // The lifetime of this local might span multiple BBs.
18035         // So it is a long lifetime local.
18036         impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18037
18038         // Copy over key info
18039         lvaTable[tmpNum].lvType                 = lclTyp;
18040         lvaTable[tmpNum].lvHasLdAddrOp          = inlineeLocal.lclHasLdlocaOp;
18041         lvaTable[tmpNum].lvPinned               = inlineeLocal.lclIsPinned;
18042         lvaTable[tmpNum].lvHasILStoreOp         = inlineeLocal.lclHasStlocOp;
18043         lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18044
18045         // Copy over class handle for ref types. Note this may be a
18046         // shared type -- someday perhaps we can get the exact
18047         // signature and pass in a more precise type.
18048         if (lclTyp == TYP_REF)
18049         {
18050             lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18051         }
18052
18053         if (inlineeLocal.lclVerTypeInfo.IsStruct())
18054         {
18055             if (varTypeIsStruct(lclTyp))
18056             {
18057                 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18058             }
18059             else
18060             {
18061                 // This is a wrapped primitive.  Make sure the verstate knows that
18062                 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18063             }
18064         }
18065
18066 #ifdef DEBUG
18067         // Sanity check that we're properly prepared for gc ref locals.
18068         if (varTypeIsGC(lclTyp))
18069         {
18070             // Since there are gc locals we should have seen them earlier
18071             // and if there was a return value, set up the spill temp.
18072             assert(impInlineInfo->HasGcRefLocals());
18073             assert((info.compRetNativeType == TYP_VOID) || (lvaInlineeReturnSpillTemp != BAD_VAR_NUM));
18074         }
18075         else
18076         {
18077             // Make sure all pinned locals count as gc refs.
18078             assert(!inlineeLocal.lclIsPinned);
18079         }
18080 #endif // DEBUG
18081     }
18082
18083     return tmpNum;
18084 }
18085
18086 //------------------------------------------------------------------------
18087 // impInlineFetchArg: return tree node for argument value in an inlinee
18088 //
18089 // Arguments:
18090 //    lclNum -- argument number in inlinee IL
18091 //    inlArgInfo -- argument info for inlinee
18092 //    lclVarInfo -- var info for inlinee
18093 //
18094 // Returns:
18095 //    Tree for the argument's value. Often an inlinee-scoped temp
18096 //    GT_LCL_VAR but can be other tree kinds, if the argument
18097 //    expression from the caller can be directly substituted into the
18098 //    inlinee body.
18099 //
18100 // Notes:
18101 //    Must be used only for arguments -- use impInlineFetchLocal for
18102 //    inlinee locals.
18103 //
18104 //    Direct substitution is performed when the formal argument cannot
18105 //    change value in the inlinee body (no starg or ldarga), and the
18106 //    actual argument expression's value cannot be changed if it is
18107 //    substituted it into the inlinee body.
18108 //
18109 //    Even if an inlinee-scoped temp is returned here, it may later be
18110 //    "bashed" to a caller-supplied tree when arguments are actually
18111 //    passed (see fgInlinePrependStatements). Bashing can happen if
18112 //    the argument ends up being single use and other conditions are
18113 //    met. So the contents of the tree returned here may not end up
18114 //    being the ones ultimately used for the argument.
18115 //
18116 //    This method will side effect inlArgInfo. It should only be called
18117 //    for actual uses of the argument in the inlinee.
18118
18119 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18120 {
18121     // Cache the relevant arg and lcl info for this argument.
18122     // We will modify argInfo but not lclVarInfo.
18123     InlArgInfo&          argInfo          = inlArgInfo[lclNum];
18124     const InlLclVarInfo& lclInfo          = lclVarInfo[lclNum];
18125     const bool           argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18126     const var_types      lclTyp           = lclInfo.lclTypeInfo;
18127     GenTreePtr           op1              = nullptr;
18128
18129     if (argInfo.argIsInvariant && !argCanBeModified)
18130     {
18131         // Directly substitute constants or addresses of locals
18132         //
18133         // Clone the constant. Note that we cannot directly use
18134         // argNode in the trees even if !argInfo.argIsUsed as this
18135         // would introduce aliasing between inlArgInfo[].argNode and
18136         // impInlineExpr. Then gtFoldExpr() could change it, causing
18137         // further references to the argument working off of the
18138         // bashed copy.
18139         op1 = gtCloneExpr(argInfo.argNode);
18140         PREFIX_ASSUME(op1 != nullptr);
18141         argInfo.argTmpNum = BAD_VAR_NUM;
18142     }
18143     else if (argInfo.argIsLclVar && !argCanBeModified)
18144     {
18145         // Directly substitute caller locals
18146         //
18147         // Use the caller-supplied node if this is the first use.
18148         op1               = argInfo.argNode;
18149         argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18150
18151         // Use an equivalent copy if this is the second or subsequent
18152         // use, or if we need to retype.
18153         //
18154         // Note argument type mismatches that prevent inlining should
18155         // have been caught in impInlineInitVars.
18156         if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18157         {
18158             assert(op1->gtOper == GT_LCL_VAR);
18159             assert(lclNum == op1->gtLclVar.gtLclILoffs);
18160
18161             var_types newTyp = lclTyp;
18162
18163             if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18164             {
18165                 newTyp = genActualType(lclTyp);
18166             }
18167
18168             // Create a new lcl var node - remember the argument lclNum
18169             op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18170         }
18171     }
18172     else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18173     {
18174         /* Argument is a by-ref address to a struct, a normed struct, or its field.
18175            In these cases, don't spill the byref to a local, simply clone the tree and use it.
18176            This way we will increase the chance for this byref to be optimized away by
18177            a subsequent "dereference" operation.
18178
18179            From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18180            (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18181            For example, if the caller is:
18182                 ldloca.s   V_1  // V_1 is a local struct
18183                 call       void Test.ILPart::RunLdargaOnPointerArg(int32*)
18184            and the callee being inlined has:
18185                 .method public static void  RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18186                     ldarga.s   ptrToInts
18187                     call       void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18188            then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18189            soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18190         */
18191         assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18192         op1 = gtCloneExpr(argInfo.argNode);
18193     }
18194     else
18195     {
18196         /* Argument is a complex expression - it must be evaluated into a temp */
18197
18198         if (argInfo.argHasTmp)
18199         {
18200             assert(argInfo.argIsUsed);
18201             assert(argInfo.argTmpNum < lvaCount);
18202
18203             /* Create a new lcl var node - remember the argument lclNum */
18204             op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18205
18206             /* This is the second or later use of the this argument,
18207             so we have to use the temp (instead of the actual arg) */
18208             argInfo.argBashTmpNode = nullptr;
18209         }
18210         else
18211         {
18212             /* First time use */
18213             assert(!argInfo.argIsUsed);
18214
18215             /* Reserve a temp for the expression.
18216             * Use a large size node as we may change it later */
18217
18218             const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18219
18220             lvaTable[tmpNum].lvType = lclTyp;
18221
18222             // For ref types, determine the type of the temp.
18223             if (lclTyp == TYP_REF)
18224             {
18225                 if (!argCanBeModified)
18226                 {
18227                     // If the arg can't be modified in the method
18228                     // body, use the type of the value, if
18229                     // known. Otherwise, use the declared type.
18230                     lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18231                 }
18232                 else
18233                 {
18234                     // Arg might be modified, use the delcared type of
18235                     // the argument.
18236                     lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18237                 }
18238             }
18239
18240             assert(lvaTable[tmpNum].lvAddrExposed == 0);
18241             if (argInfo.argHasLdargaOp)
18242             {
18243                 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18244             }
18245
18246             if (lclInfo.lclVerTypeInfo.IsStruct())
18247             {
18248                 if (varTypeIsStruct(lclTyp))
18249                 {
18250                     lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18251                 }
18252                 else
18253                 {
18254                     // This is a wrapped primitive.  Make sure the verstate knows that
18255                     lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18256                 }
18257             }
18258
18259             argInfo.argHasTmp = true;
18260             argInfo.argTmpNum = tmpNum;
18261
18262             // If we require strict exception order, then arguments must
18263             // be evaluated in sequence before the body of the inlined method.
18264             // So we need to evaluate them to a temp.
18265             // Also, if arguments have global references, we need to
18266             // evaluate them to a temp before the inlined body as the
18267             // inlined body may be modifying the global ref.
18268             // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18269             // if it is a struct, because it requires some additional handling.
18270
18271             if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef)
18272             {
18273                 /* Get a *LARGE* LCL_VAR node */
18274                 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18275
18276                 /* Record op1 as the very first use of this argument.
18277                 If there are no further uses of the arg, we may be
18278                 able to use the actual arg node instead of the temp.
18279                 If we do see any further uses, we will clear this. */
18280                 argInfo.argBashTmpNode = op1;
18281             }
18282             else
18283             {
18284                 /* Get a small LCL_VAR node */
18285                 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18286                 /* No bashing of this argument */
18287                 argInfo.argBashTmpNode = nullptr;
18288             }
18289         }
18290     }
18291
18292     // Mark this argument as used.
18293     argInfo.argIsUsed = true;
18294
18295     return op1;
18296 }
18297
18298 /******************************************************************************
18299  Is this the original "this" argument to the call being inlined?
18300
18301  Note that we do not inline methods with "starg 0", and so we do not need to
18302  worry about it.
18303 */
18304
18305 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
18306 {
18307     assert(compIsForInlining());
18308     return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18309 }
18310
18311 //-----------------------------------------------------------------------------
18312 // This function checks if a dereference in the inlinee can guarantee that
18313 // the "this" is non-NULL.
18314 // If we haven't hit a branch or a side effect, and we are dereferencing
18315 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18316 // then we can avoid a separate null pointer check.
18317 //
18318 // "additionalTreesToBeEvaluatedBefore"
18319 // is the set of pending trees that have not yet been added to the statement list,
18320 // and which have been removed from verCurrentState.esStack[]
18321
18322 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr  additionalTreesToBeEvaluatedBefore,
18323                                                                   GenTreePtr  variableBeingDereferenced,
18324                                                                   InlArgInfo* inlArgInfo)
18325 {
18326     assert(compIsForInlining());
18327     assert(opts.OptEnabled(CLFLG_INLINING));
18328
18329     BasicBlock* block = compCurBB;
18330
18331     GenTreePtr stmt;
18332     GenTreePtr expr;
18333
18334     if (block != fgFirstBB)
18335     {
18336         return FALSE;
18337     }
18338
18339     if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18340     {
18341         return FALSE;
18342     }
18343
18344     if (additionalTreesToBeEvaluatedBefore &&
18345         GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18346     {
18347         return FALSE;
18348     }
18349
18350     for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18351     {
18352         expr = stmt->gtStmt.gtStmtExpr;
18353
18354         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18355         {
18356             return FALSE;
18357         }
18358     }
18359
18360     for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18361     {
18362         unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18363         if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18364         {
18365             return FALSE;
18366         }
18367     }
18368
18369     return TRUE;
18370 }
18371
18372 //------------------------------------------------------------------------
18373 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18374 //
18375 // Arguments:
18376 //    callNode -- call under scrutiny
18377 //    exactContextHnd -- context handle for inlining
18378 //    exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18379 //    callInfo -- call info from VM
18380 //
18381 // Notes:
18382 //    If callNode is an inline candidate, this method sets the flag
18383 //    GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18384 //    filled in the associated InlineCandidateInfo.
18385 //
18386 //    If callNode is not an inline candidate, and the reason is
18387 //    something that is inherent to the method being called, the
18388 //    method may be marked as "noinline" to short-circuit any
18389 //    future assessments of calls to this method.
18390
18391 void Compiler::impMarkInlineCandidate(GenTreePtr             callNode,
18392                                       CORINFO_CONTEXT_HANDLE exactContextHnd,
18393                                       bool                   exactContextNeedsRuntimeLookup,
18394                                       CORINFO_CALL_INFO*     callInfo)
18395 {
18396     // Let the strategy know there's another call
18397     impInlineRoot()->m_inlineStrategy->NoteCall();
18398
18399     if (!opts.OptEnabled(CLFLG_INLINING))
18400     {
18401         /* XXX Mon 8/18/2008
18402          * This assert is misleading.  The caller does not ensure that we have CLFLG_INLINING set before
18403          * calling impMarkInlineCandidate.  However, if this assert trips it means that we're an inlinee and
18404          * CLFLG_MINOPT is set.  That doesn't make a lot of sense.  If you hit this assert, work back and
18405          * figure out why we did not set MAXOPT for this compile.
18406          */
18407         assert(!compIsForInlining());
18408         return;
18409     }
18410
18411     if (compIsForImportOnly())
18412     {
18413         // Don't bother creating the inline candidate during verification.
18414         // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18415         // that leads to the creation of multiple instances of Compiler.
18416         return;
18417     }
18418
18419     GenTreeCall* call = callNode->AsCall();
18420     InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18421
18422     // Don't inline if not optimizing root method
18423     if (opts.compDbgCode)
18424     {
18425         inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18426         return;
18427     }
18428
18429     // Don't inline if inlining into root method is disabled.
18430     if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18431     {
18432         inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18433         return;
18434     }
18435
18436     // Inlining candidate determination needs to honor only IL tail prefix.
18437     // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18438     if (call->IsTailPrefixedCall())
18439     {
18440         inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18441         return;
18442     }
18443
18444     // Tail recursion elimination takes precedence over inlining.
18445     // TODO: We may want to do some of the additional checks from fgMorphCall
18446     // here to reduce the chance we don't inline a call that won't be optimized
18447     // as a fast tail call or turned into a loop.
18448     if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18449     {
18450         inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18451         return;
18452     }
18453
18454     if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18455     {
18456         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18457         return;
18458     }
18459
18460     /* Ignore helper calls */
18461
18462     if (call->gtCallType == CT_HELPER)
18463     {
18464         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18465         return;
18466     }
18467
18468     /* Ignore indirect calls */
18469     if (call->gtCallType == CT_INDIRECT)
18470     {
18471         inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18472         return;
18473     }
18474
18475     /* I removed the check for BBJ_THROW.  BBJ_THROW is usually marked as rarely run.  This more or less
18476      * restricts the inliner to non-expanding inlines.  I removed the check to allow for non-expanding
18477      * inlining in throw blocks.  I should consider the same thing for catch and filter regions. */
18478
18479     CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18480     unsigned              methAttr;
18481
18482     // Reuse method flags from the original callInfo if possible
18483     if (fncHandle == callInfo->hMethod)
18484     {
18485         methAttr = callInfo->methodFlags;
18486     }
18487     else
18488     {
18489         methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18490     }
18491
18492 #ifdef DEBUG
18493     if (compStressCompile(STRESS_FORCE_INLINE, 0))
18494     {
18495         methAttr |= CORINFO_FLG_FORCEINLINE;
18496     }
18497 #endif
18498
18499     // Check for COMPlus_AggressiveInlining
18500     if (compDoAggressiveInlining)
18501     {
18502         methAttr |= CORINFO_FLG_FORCEINLINE;
18503     }
18504
18505     if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18506     {
18507         /* Don't bother inline blocks that are in the filter region */
18508         if (bbInCatchHandlerILRange(compCurBB))
18509         {
18510 #ifdef DEBUG
18511             if (verbose)
18512             {
18513                 printf("\nWill not inline blocks that are in the catch handler region\n");
18514             }
18515
18516 #endif
18517
18518             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18519             return;
18520         }
18521
18522         if (bbInFilterILRange(compCurBB))
18523         {
18524 #ifdef DEBUG
18525             if (verbose)
18526             {
18527                 printf("\nWill not inline blocks that are in the filter region\n");
18528             }
18529 #endif
18530
18531             inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18532             return;
18533         }
18534     }
18535
18536     /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18537
18538     if (opts.compNeedSecurityCheck)
18539     {
18540         inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18541         return;
18542     }
18543
18544     /* Check if we tried to inline this method before */
18545
18546     if (methAttr & CORINFO_FLG_DONT_INLINE)
18547     {
18548         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18549         return;
18550     }
18551
18552     /* Cannot inline synchronized methods */
18553
18554     if (methAttr & CORINFO_FLG_SYNCH)
18555     {
18556         inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18557         return;
18558     }
18559
18560     /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18561
18562     if (methAttr & CORINFO_FLG_SECURITYCHECK)
18563     {
18564         inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18565         return;
18566     }
18567
18568     InlineCandidateInfo* inlineCandidateInfo = nullptr;
18569     impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18570
18571     if (inlineResult.IsFailure())
18572     {
18573         return;
18574     }
18575
18576     // The old value should be NULL
18577     assert(call->gtInlineCandidateInfo == nullptr);
18578
18579     // The new value should not be NULL.
18580     assert(inlineCandidateInfo != nullptr);
18581     inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
18582
18583     call->gtInlineCandidateInfo = inlineCandidateInfo;
18584
18585     // Mark the call node as inline candidate.
18586     call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18587
18588     // Let the strategy know there's another candidate.
18589     impInlineRoot()->m_inlineStrategy->NoteCandidate();
18590
18591     // Since we're not actually inlining yet, and this call site is
18592     // still just an inline candidate, there's nothing to report.
18593     inlineResult.SetReported();
18594 }
18595
18596 /******************************************************************************/
18597 // Returns true if the given intrinsic will be implemented by target-specific
18598 // instructions
18599
18600 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18601 {
18602 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18603     switch (intrinsicId)
18604     {
18605         // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18606         //
18607         // TODO: Because the x86 backend only targets SSE for floating-point code,
18608         //       it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18609         //       implemented those intrinsics as x87 instructions). If this poses
18610         //       a CQ problem, it may be necessary to change the implementation of
18611         //       the helper calls to decrease call overhead or switch back to the
18612         //       x87 instructions. This is tracked by #7097.
18613         case CORINFO_INTRINSIC_Sqrt:
18614         case CORINFO_INTRINSIC_Abs:
18615             return true;
18616
18617         default:
18618             return false;
18619     }
18620 #elif defined(_TARGET_ARM64_)
18621     switch (intrinsicId)
18622     {
18623         case CORINFO_INTRINSIC_Sqrt:
18624         case CORINFO_INTRINSIC_Abs:
18625         case CORINFO_INTRINSIC_Round:
18626             return true;
18627
18628         default:
18629             return false;
18630     }
18631 #elif defined(_TARGET_ARM_)
18632     switch (intrinsicId)
18633     {
18634         case CORINFO_INTRINSIC_Sqrt:
18635         case CORINFO_INTRINSIC_Abs:
18636         case CORINFO_INTRINSIC_Round:
18637             return true;
18638
18639         default:
18640             return false;
18641     }
18642 #elif defined(_TARGET_X86_)
18643     switch (intrinsicId)
18644     {
18645         case CORINFO_INTRINSIC_Sin:
18646         case CORINFO_INTRINSIC_Cos:
18647         case CORINFO_INTRINSIC_Sqrt:
18648         case CORINFO_INTRINSIC_Abs:
18649         case CORINFO_INTRINSIC_Round:
18650             return true;
18651
18652         default:
18653             return false;
18654     }
18655 #else
18656     // TODO: This portion of logic is not implemented for other arch.
18657     // The reason for returning true is that on all other arch the only intrinsic
18658     // enabled are target intrinsics.
18659     return true;
18660 #endif //_TARGET_AMD64_
18661 }
18662
18663 /******************************************************************************/
18664 // Returns true if the given intrinsic will be implemented by calling System.Math
18665 // methods.
18666
18667 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18668 {
18669     // Currently, if an math intrisic is not implemented by target-specific
18670     // intructions, it will be implemented by a System.Math call. In the
18671     // future, if we turn to implementing some of them with helper callers,
18672     // this predicate needs to be revisited.
18673     return !IsTargetIntrinsic(intrinsicId);
18674 }
18675
18676 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18677 {
18678     switch (intrinsicId)
18679     {
18680         case CORINFO_INTRINSIC_Sin:
18681         case CORINFO_INTRINSIC_Sqrt:
18682         case CORINFO_INTRINSIC_Abs:
18683         case CORINFO_INTRINSIC_Cos:
18684         case CORINFO_INTRINSIC_Round:
18685         case CORINFO_INTRINSIC_Cosh:
18686         case CORINFO_INTRINSIC_Sinh:
18687         case CORINFO_INTRINSIC_Tan:
18688         case CORINFO_INTRINSIC_Tanh:
18689         case CORINFO_INTRINSIC_Asin:
18690         case CORINFO_INTRINSIC_Acos:
18691         case CORINFO_INTRINSIC_Atan:
18692         case CORINFO_INTRINSIC_Atan2:
18693         case CORINFO_INTRINSIC_Log10:
18694         case CORINFO_INTRINSIC_Pow:
18695         case CORINFO_INTRINSIC_Exp:
18696         case CORINFO_INTRINSIC_Ceiling:
18697         case CORINFO_INTRINSIC_Floor:
18698             return true;
18699         default:
18700             return false;
18701     }
18702 }
18703
18704 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18705 {
18706     return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18707 }
18708
18709 //------------------------------------------------------------------------
18710 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
18711 //   normal call
18712 //
18713 // Arguments:
18714 //     call -- the call node to examine/modify
18715 //     thisObj  -- the value of 'this' for the call
18716 //     method   -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
18717 //     methodAttribs -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
18718 //     contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
18719 //     exactContextHnd -- [OUT] updated context handle iff call devirtualized
18720 //
18721 // Notes:
18722 //     Virtual calls in IL will always "invoke" the base class method.
18723 //
18724 //     This transformation looks for evidence that the type of 'this'
18725 //     in the call is exactly known, is a final class or would invoke
18726 //     a final method, and if that and other safety checks pan out,
18727 //     modifies the call and the call info to create a direct call.
18728 //
18729 //     This transformation is initially done in the importer and not
18730 //     in some subsequent optimization pass because we want it to be
18731 //     upstream of inline candidate identification.
18732 //
18733 //     However, later phases may supply improved type information that
18734 //     can enable further devirtualization. We currently reinvoke this
18735 //     code after inlining, if the return value of the inlined call is
18736 //     the 'this obj' of a subsequent virtual call.
18737 //
18738 void Compiler::impDevirtualizeCall(GenTreeCall*            call,
18739                                    GenTreePtr              thisObj,
18740                                    CORINFO_METHOD_HANDLE*  method,
18741                                    unsigned*               methodFlags,
18742                                    CORINFO_CONTEXT_HANDLE* contextHandle,
18743                                    CORINFO_CONTEXT_HANDLE* exactContextHandle)
18744 {
18745     assert(call != nullptr);
18746     assert(method != nullptr);
18747     assert(methodFlags != nullptr);
18748     assert(contextHandle != nullptr);
18749
18750     // This should be a virtual vtable or virtual stub call.
18751     assert(call->IsVirtual());
18752
18753     // Bail if not optimizing
18754     if (opts.MinOpts())
18755     {
18756         return;
18757     }
18758
18759     // Bail if debuggable codegen
18760     if (opts.compDbgCode)
18761     {
18762         return;
18763     }
18764
18765 #if defined(DEBUG)
18766     // Bail if devirt is disabled.
18767     if (JitConfig.JitEnableDevirtualization() == 0)
18768     {
18769         return;
18770     }
18771
18772     const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
18773 #endif // DEBUG
18774
18775     // Fetch information about the virtual method we're calling.
18776     CORINFO_METHOD_HANDLE baseMethod        = *method;
18777     unsigned              baseMethodAttribs = *methodFlags;
18778
18779     if (baseMethodAttribs == 0)
18780     {
18781         // For late devirt we may not have method attributes, so fetch them.
18782         baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18783     }
18784     else
18785     {
18786 #if defined(DEBUG)
18787         // Validate that callInfo has up to date method flags
18788         const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18789
18790         // All the base method attributes should agree, save that
18791         // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
18792         // because of concurrent jitting activity.
18793         //
18794         // Note we don't look at this particular flag bit below, and
18795         // later on (if we do try and inline) we will rediscover why
18796         // the method can't be inlined, so there's no danger here in
18797         // seeing this particular flag bit in different states between
18798         // the cached and fresh values.
18799         if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
18800         {
18801             assert(!"mismatched method attributes");
18802         }
18803 #endif // DEBUG
18804     }
18805
18806     // In R2R mode, we might see virtual stub calls to
18807     // non-virtuals. For instance cases where the non-virtual method
18808     // is in a different assembly but is called via CALLVIRT. For
18809     // verison resilience we must allow for the fact that the method
18810     // might become virtual in some update.
18811     //
18812     // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
18813     // regular call+nullcheck upstream, so we won't reach this
18814     // point.
18815     if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
18816     {
18817         assert(call->IsVirtualStub());
18818         assert(opts.IsReadyToRun());
18819         JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
18820         return;
18821     }
18822
18823     // See what we know about the type of 'this' in the call.
18824     bool                 isExact      = false;
18825     bool                 objIsNonNull = false;
18826     CORINFO_CLASS_HANDLE objClass     = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
18827
18828     // Bail if we know nothing.
18829     if (objClass == nullptr)
18830     {
18831         JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
18832         return;
18833     }
18834
18835     // Fetch information about the class that introduced the virtual method.
18836     CORINFO_CLASS_HANDLE baseClass        = info.compCompHnd->getMethodClass(baseMethod);
18837     const DWORD          baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
18838
18839 #if !defined(FEATURE_CORECLR)
18840     // If base class is not beforefieldinit then devirtualizing may
18841     // cause us to miss a base class init trigger. Spec says we don't
18842     // need a trigger for ref class callvirts but desktop seems to
18843     // have one anyways. So defer.
18844     if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
18845     {
18846         JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
18847         return;
18848     }
18849 #endif // FEATURE_CORECLR
18850
18851     // Is the call an interface call?
18852     const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
18853
18854     // If the objClass is sealed (final), then we may be able to devirtualize.
18855     const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
18856     const bool  objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
18857
18858 #if defined(DEBUG)
18859     const char* callKind       = isInterface ? "interface" : "virtual";
18860     const char* objClassNote   = "[?]";
18861     const char* objClassName   = "?objClass";
18862     const char* baseClassName  = "?baseClass";
18863     const char* baseMethodName = "?baseMethod";
18864
18865     if (verbose || doPrint)
18866     {
18867         objClassNote   = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
18868         objClassName   = info.compCompHnd->getClassName(objClass);
18869         baseClassName  = info.compCompHnd->getClassName(baseClass);
18870         baseMethodName = eeGetMethodName(baseMethod, nullptr);
18871
18872         if (verbose)
18873         {
18874             printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
18875                    "    class for 'this' is %s%s (attrib %08x)\n"
18876                    "    base method is %s::%s\n",
18877                    callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
18878         }
18879     }
18880 #endif // defined(DEBUG)
18881
18882     // Bail if obj class is an interface.
18883     // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
18884     //   IL_021d:  ldloc.0
18885     //   IL_021e:  callvirt   instance int32 System.Object::GetHashCode()
18886     if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
18887     {
18888         JITDUMP("--- obj class is interface, sorry\n");
18889         return;
18890     }
18891
18892     if (isInterface)
18893     {
18894         assert(call->IsVirtualStub());
18895         JITDUMP("--- base class is interface\n");
18896     }
18897
18898     // Fetch the method that would be called based on the declared type of 'this'
18899     CORINFO_CONTEXT_HANDLE ownerType     = *contextHandle;
18900     CORINFO_METHOD_HANDLE  derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
18901
18902     // If we failed to get a handle, we can't devirtualize.  This can
18903     // happen when prejitting, if the devirtualization crosses
18904     // servicing bubble boundaries.
18905     if (derivedMethod == nullptr)
18906     {
18907         JITDUMP("--- no derived method, sorry\n");
18908         return;
18909     }
18910
18911     // Fetch method attributes to see if method is marked final.
18912     const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
18913     const bool  derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
18914
18915 #if defined(DEBUG)
18916     const char* derivedClassName  = "?derivedClass";
18917     const char* derivedMethodName = "?derivedMethod";
18918
18919     const char* note = "speculative";
18920     if (isExact)
18921     {
18922         note = "exact";
18923     }
18924     else if (objClassIsFinal)
18925     {
18926         note = "final class";
18927     }
18928     else if (derivedMethodIsFinal)
18929     {
18930         note = "final method";
18931     }
18932
18933     if (verbose || doPrint)
18934     {
18935         derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
18936         if (verbose)
18937         {
18938             printf("    devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
18939             gtDispTree(call);
18940         }
18941     }
18942 #endif // defined(DEBUG)
18943
18944     if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
18945     {
18946         // Type is not exact, and neither class or method is final.
18947         //
18948         // We could speculatively devirtualize, but there's no
18949         // reason to believe the derived method is the one that
18950         // is likely to be invoked.
18951         //
18952         // If there's currently no further overriding (that is, at
18953         // the time of jitting, objClass has no subclasses that
18954         // override this method), then perhaps we'd be willing to
18955         // make a bet...?
18956         JITDUMP("    Class not final or exact, method not final, no devirtualization\n");
18957         return;
18958     }
18959
18960     // For interface calls we must have an exact type or final class.
18961     if (isInterface && !isExact && !objClassIsFinal)
18962     {
18963         JITDUMP("    Class not final or exact for interface, no devirtualization\n");
18964         return;
18965     }
18966
18967     JITDUMP("    %s; can devirtualize\n", note);
18968
18969     // Make the updates.
18970     call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
18971     call->gtFlags &= ~GTF_CALL_VIRT_STUB;
18972     call->gtCallMethHnd = derivedMethod;
18973     call->gtCallType    = CT_USER_FUNC;
18974
18975     // Virtual calls include an implicit null check, which we may
18976     // now need to make explicit.
18977     if (!objIsNonNull)
18978     {
18979         call->gtFlags |= GTF_CALL_NULLCHECK;
18980     }
18981
18982     // Clear the inline candidate info (may be non-null since
18983     // it's a union field used for other things by virtual
18984     // stubs)
18985     call->gtInlineCandidateInfo = nullptr;
18986
18987     // Fetch the class that introduced the derived method.
18988     //
18989     // Note this may not equal objClass, if there is a
18990     // final method that objClass inherits.
18991     CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
18992
18993 #ifdef FEATURE_READYTORUN_COMPILER
18994     if (opts.IsReadyToRun())
18995     {
18996         // For R2R, getCallInfo triggers bookkeeping on the zap
18997         // side so we need to call it here.
18998         //
18999         // First, cons up a suitable resolved token.
19000         CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19001
19002         derivedResolvedToken.tokenScope   = info.compScopeHnd;
19003         derivedResolvedToken.tokenContext = *contextHandle;
19004         derivedResolvedToken.token        = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19005         derivedResolvedToken.tokenType    = CORINFO_TOKENKIND_Method;
19006         derivedResolvedToken.hClass       = derivedClass;
19007         derivedResolvedToken.hMethod      = derivedMethod;
19008
19009         // Look up the new call info.
19010         CORINFO_CALL_INFO derivedCallInfo;
19011         eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19012
19013         // Update the call.
19014         call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19015         call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19016         call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19017     }
19018 #endif // FEATURE_READYTORUN_COMPILER
19019
19020     // Need to update call info too. This is fragile
19021     // but hopefully the derived method conforms to
19022     // the base in most other ways.
19023     *method        = derivedMethod;
19024     *methodFlags   = derivedMethodAttribs;
19025     *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19026
19027     // Update context handle.
19028     if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19029     {
19030         *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19031     }
19032
19033 #if defined(DEBUG)
19034     if (verbose)
19035     {
19036         printf("... after devirt...\n");
19037         gtDispTree(call);
19038     }
19039
19040     if (doPrint)
19041     {
19042         printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19043                baseMethodName, derivedClassName, derivedMethodName, note);
19044     }
19045 #endif // defined(DEBUG)
19046 }
19047
19048 //------------------------------------------------------------------------
19049 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19050 //
19051 // Arguments:
19052 //    token - init value for the allocated token.
19053 //
19054 // Return Value:
19055 //    pointer to token into jit-allocated memory.
19056 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19057 {
19058     CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19059     *memory                        = token;
19060     return memory;
19061 }