1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 XX Postconditions (for the nodes currently handled): XX
13 XX - All operands requiring a register are explicit in the graph XX
15 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
16 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
26 #if !defined(_TARGET_64BIT_)
27 #include "decomposelongs.h"
28 #endif // !defined(_TARGET_64BIT_)
30 //------------------------------------------------------------------------
31 // MakeSrcContained: Make "childNode" a contained node
34 // parentNode - is a non-leaf node that can contain its 'childNode'
35 // childNode - is an op that will now be contained by its parent.
38 // If 'childNode' it has any existing sources, they will now be sources for the parent.
40 void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode)
42 assert(!parentNode->OperIsLeaf());
43 assert(childNode->canBeContained());
44 childNode->SetContained();
45 assert(childNode->isContained());
48 //------------------------------------------------------------------------
49 // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate
50 // and, if so, makes it contained.
53 // parentNode - is any non-leaf node
54 // childNode - is an child op of 'parentNode'
57 // true if we are able to make childNode a contained immediate
59 bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode)
61 assert(!parentNode->OperIsLeaf());
62 // If childNode is a containable immediate
63 if (IsContainableImmed(parentNode, childNode))
65 // then make it contained within the parentNode
66 MakeSrcContained(parentNode, childNode);
72 //------------------------------------------------------------------------
73 // IsSafeToContainMem: Checks for conflicts between childNode and parentNode,
74 // and returns 'true' iff memory operand childNode can be contained in parentNode.
77 // parentNode - any non-leaf node
78 // childNode - some node that is an input to `parentNode`
81 // true if it is safe to make childNode a contained memory operand.
83 bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
85 m_scratchSideEffects.Clear();
86 m_scratchSideEffects.AddNode(comp, childNode);
88 for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext)
90 const bool strict = true;
91 if (m_scratchSideEffects.InterferesWith(comp, node, strict))
100 //------------------------------------------------------------------------
102 // This is the main entry point for Lowering.
103 GenTree* Lowering::LowerNode(GenTree* node)
105 assert(node != nullptr);
106 switch (node->gtOper)
109 TryCreateAddrMode(LIR::Use(BlockRange(), &node->gtOp.gtOp1, node), true);
110 ContainCheckIndir(node->AsIndir());
114 TryCreateAddrMode(LIR::Use(BlockRange(), &node->gtOp.gtOp1, node), true);
115 if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(node))
117 LowerStoreIndir(node->AsIndir());
123 GenTree* afterTransform = LowerAdd(node);
124 if (afterTransform != nullptr)
126 return afterTransform;
131 #if !defined(_TARGET_64BIT_)
141 ContainCheckBinary(node->AsOp());
146 #if defined(_TARGET_X86_)
149 ContainCheckMul(node->AsOp());
154 if (!LowerUnsignedDivOrMod(node->AsOp()))
156 ContainCheckDivOrMod(node->AsOp());
162 return LowerSignedDivOrMod(node);
165 return LowerSwitch(node);
180 return LowerCompare(node);
183 return LowerJTrue(node->AsOp());
186 LowerJmpMethod(node);
194 ContainCheckReturnTrap(node->AsOp());
201 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
202 case GT_ARR_BOUNDS_CHECK:
205 #endif // FEATURE_SIMD
206 #ifdef FEATURE_HW_INTRINSICS
207 case GT_HW_INTRINSIC_CHK:
208 #endif // FEATURE_HW_INTRINSICS
209 ContainCheckBoundsChk(node->AsBoundsChk());
211 #endif // _TARGET_XARCH_
213 return LowerArrElem(node);
216 ContainCheckArrOffset(node->AsArrOffs());
224 #ifndef _TARGET_64BIT_
227 ContainCheckShiftRotate(node->AsOp());
229 #endif // !_TARGET_64BIT_
234 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
235 LowerShift(node->AsOp());
237 ContainCheckShiftRotate(node->AsOp());
243 case GT_STORE_DYN_BLK:
245 GenTreeBlk* blkNode = node->AsBlk();
246 TryCreateAddrMode(LIR::Use(BlockRange(), &blkNode->Addr(), blkNode), false);
247 LowerBlockStore(blkNode);
252 ContainCheckLclHeap(node->AsOp());
255 #ifdef _TARGET_XARCH_
257 ContainCheckIntrinsic(node->AsOp());
259 #endif // _TARGET_XARCH_
263 LowerSIMD(node->AsSIMD());
265 #endif // FEATURE_SIMD
267 #ifdef FEATURE_HW_INTRINSICS
269 LowerHWIntrinsic(node->AsHWIntrinsic());
271 #endif // FEATURE_HW_INTRINSICS
275 // We should only encounter this for lclVars that are lvDoNotEnregister.
276 verifyLclFldDoNotEnregister(node->AsLclVarCommon()->gtLclNum);
281 WidenSIMD12IfNecessary(node->AsLclVarCommon());
284 case GT_STORE_LCL_VAR:
285 WidenSIMD12IfNecessary(node->AsLclVarCommon());
288 case GT_STORE_LCL_FLD:
290 #if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD)
291 GenTreeLclVarCommon* const store = node->AsLclVarCommon();
292 if ((store->TypeGet() == TYP_SIMD8) != (store->gtOp1->TypeGet() == TYP_SIMD8))
294 GenTreeUnOp* bitcast =
295 new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, store->TypeGet(), store->gtOp1, nullptr);
296 store->gtOp1 = bitcast;
297 BlockRange().InsertBefore(store, bitcast);
299 #endif // _TARGET_AMD64_
300 // TODO-1stClassStructs: Once we remove the requirement that all struct stores
301 // are block stores (GT_STORE_BLK or GT_STORE_OBJ), here is where we would put the local
302 // store under a block store if codegen will require it.
303 if ((node->TypeGet() == TYP_STRUCT) && (node->gtGetOp1()->OperGet() != GT_PHI))
305 #if FEATURE_MULTIREG_RET
306 GenTree* src = node->gtGetOp1();
307 assert((src->OperGet() == GT_CALL) && src->AsCall()->HasMultiRegRetVal());
308 #else // !FEATURE_MULTIREG_RET
309 assert(!"Unexpected struct local store in Lowering");
310 #endif // !FEATURE_MULTIREG_RET
312 LowerStoreLoc(node->AsLclVarCommon());
316 #if defined(_TARGET_ARM64_)
318 CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand);
322 CheckImmedAndMakeContained(node, node->gtOp.gtOp2);
324 #elif defined(_TARGET_XARCH_)
326 if (node->IsUnusedValue())
328 node->ClearUnusedValue();
329 // Make sure the types are identical, since the node type is changed to VOID
330 // CodeGen relies on op2's type to determine the instruction size.
331 // Note that the node type cannot be a small int but the data operand can.
332 assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet());
333 node->SetOper(GT_LOCKADD);
334 node->gtType = TYP_VOID;
335 CheckImmedAndMakeContained(node, node->gtGetOp2());
340 #ifndef _TARGET_ARMARCH_
341 // TODO-ARMARCH-CQ: We should contain this as long as the offset fits.
343 if (node->AsObj()->Addr()->OperIsLocalAddr())
345 node->AsObj()->Addr()->SetContained();
348 #endif // !_TARGET_ARMARCH_
357 /** -- Switch Lowering --
358 * The main idea of switch lowering is to keep transparency of the register requirements of this node
359 * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT
360 * is represented as a simple tree node, at the time we actually generate code for it we end up
361 * generating instructions that actually modify the flow of execution that imposes complicated
362 * register requirement and lifetimes.
364 * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually
365 * means and more importantly, which and when do we need a register for each instruction we want to issue
366 * to correctly allocate them downstream.
368 * For this purpose, this procedure performs switch lowering in two different ways:
370 * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination
371 * of the switch, we will store this destination in an array of addresses and the code generator will issue
372 * a data section where this array will live and will emit code that based on the switch index, will indirect and
373 * jump to the destination specified in the jump table.
375 * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch
376 * node for jump table based switches.
377 * The overall structure of a GT_SWITCH_TABLE is:
380 * |_________ localVar (a temporary local that holds the switch index)
381 * |_________ jumpTable (this is a special node that holds the address of the jump table array)
383 * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following:
385 * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH)
386 * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index)
388 * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be
389 * the default case of the switch in case the conditional is evaluated to true).
391 * ----- original block, transformed
392 * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index)
393 * |_____ expr (the index expression)
398 * |___ Int_Constant (This constant is the index of the default case
399 * that happens to be the highest index in the jump table).
400 * |___ tempLocal (The local variable were we stored the index expression).
402 * ----- new basic block
405 * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly
406 * and LinearCodeGen will be responsible to generate downstream).
408 * This way there are no implicit temporaries.
410 * b) For small-sized switches, we will actually morph them into a series of conditionals of the form
411 * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case }
412 * (For the default case conditional, we'll be constructing the exact same code as the jump table case one).
413 * else if (case == firstCase){ goto jumpTable[1]; }
414 * else if (case == secondCase) { goto jumptable[2]; } and so on.
416 * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer
417 * require internal temporaries to maintain the index we're evaluating plus we're using existing code from
418 * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and
419 * InstrGroups downstream.
422 GenTree* Lowering::LowerSwitch(GenTree* node)
426 BasicBlock** jumpTab;
428 assert(node->gtOper == GT_SWITCH);
430 // The first step is to build the default case conditional construct that is
431 // shared between both kinds of expansion of the switch node.
433 // To avoid confusion, we'll alias m_block to originalSwitchBB
434 // that represents the node we're morphing.
435 BasicBlock* originalSwitchBB = m_block;
436 LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB);
438 // jumpCnt is the number of elements in the jump table array.
439 // jumpTab is the actual pointer to the jump table array.
440 // targetCnt is the number of unique targets in the jump table array.
441 jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
442 jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
443 targetCnt = originalSwitchBB->NumSucc(comp);
445 // GT_SWITCH must be a top-level node with no use.
449 assert(!switchBBRange.TryGetUse(node, &use));
453 JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt);
455 // Handle a degenerate case: if the switch has only a default case, just convert it
456 // to an unconditional branch. This should only happen in minopts or with debuggable
460 JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
461 noway_assert(comp->opts.OptimizationDisabled());
462 if (originalSwitchBB->bbNext == jumpTab[0])
464 originalSwitchBB->bbJumpKind = BBJ_NONE;
465 originalSwitchBB->bbJumpDest = nullptr;
469 originalSwitchBB->bbJumpKind = BBJ_ALWAYS;
470 originalSwitchBB->bbJumpDest = jumpTab[0];
472 // Remove extra predecessor links if there was more than one case.
473 for (unsigned i = 1; i < jumpCnt; ++i)
475 (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
478 // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
479 // the result of the child subtree to a temp.
480 GenTree* rhs = node->gtOp.gtOp1;
482 unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
483 comp->lvaTable[lclNum].lvType = rhs->TypeGet();
485 GenTreeLclVar* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, rhs->TypeGet(), lclNum);
487 store->gtFlags = (rhs->gtFlags & GTF_COMMON_MASK);
488 store->gtFlags |= GTF_VAR_DEF;
490 switchBBRange.InsertAfter(node, store);
491 switchBBRange.Remove(node);
496 noway_assert(jumpCnt >= 2);
498 // Spill the argument to the switch node into a local so that it can be used later.
499 unsigned blockWeight = originalSwitchBB->getBBWeight(comp);
501 LIR::Use use(switchBBRange, &(node->gtOp.gtOp1), node);
502 ReplaceWithLclVar(use);
504 // GT_SWITCH(indexExpression) is now two statements:
505 // 1. a statement containing 'asg' (for temp = indexExpression)
506 // 2. and a statement with GT_SWITCH(temp)
508 assert(node->gtOper == GT_SWITCH);
509 GenTree* temp = node->gtOp.gtOp1;
510 assert(temp->gtOper == GT_LCL_VAR);
511 unsigned tempLclNum = temp->gtLclVarCommon.gtLclNum;
512 LclVarDsc* tempVarDsc = comp->lvaTable + tempLclNum;
513 var_types tempLclType = temp->TypeGet();
515 BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
516 BasicBlock* followingBB = originalSwitchBB->bbNext;
518 /* Is the number of cases right for a test and jump switch? */
519 const bool fFirstCaseFollows = (followingBB == jumpTab[0]);
520 const bool fDefaultFollows = (followingBB == defaultBB);
522 unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc
524 // This means really just a single cmp/jcc (aka a simple if/else)
525 if (fFirstCaseFollows || fDefaultFollows)
527 minSwitchTabJumpCnt++;
530 #if defined(_TARGET_ARM_)
531 // On ARM for small switch tables we will
532 // generate a sequence of compare and branch instructions
533 // because the code to load the base of the switch
534 // table is huge and hideous due to the relocation... :(
535 minSwitchTabJumpCnt += 2;
536 #endif // _TARGET_ARM_
538 // Once we have the temporary variable, we construct the conditional branch for
539 // the default case. As stated above, this conditional is being shared between
540 // both GT_SWITCH lowering code paths.
541 // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; }
542 GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
543 comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType)));
545 // Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
546 // is now less than zero 0 (that would also hit the default case).
547 gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED;
549 GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond);
550 gtDefaultCaseJump->gtFlags = node->gtFlags;
552 LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump);
553 switchBBRange.InsertAtEnd(std::move(condRange));
555 BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode());
557 // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor.
558 // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock
559 // representing the fall-through flow from originalSwitchBB.
560 assert(originalSwitchBB->bbJumpKind == BBJ_NONE);
561 assert(originalSwitchBB->bbNext == afterDefaultCondBlock);
562 assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH);
563 assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault);
564 assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
566 // The GT_SWITCH code is still in originalSwitchBB (it will be removed later).
568 // Turn originalSwitchBB into a BBJ_COND.
569 originalSwitchBB->bbJumpKind = BBJ_COND;
570 originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1];
572 // Fix the pred for the default case: the default block target still has originalSwitchBB
573 // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point
574 // to afterDefaultCondBlock.
575 flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock);
576 comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge);
578 bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt;
580 #if defined(_TARGET_UNIX_) && defined(_TARGET_ARM_)
581 // Force using an inlined jumping instead switch table generation.
582 // Switch jump table is generated with incorrect values in CoreRT case,
583 // so any large switch will crash after loading to PC any such value.
584 // I think this is due to the fact that we use absolute addressing
585 // instead of relative. But in CoreRT is used as a rule relative
586 // addressing when we generate an executable.
587 // See also https://github.com/dotnet/coreclr/issues/13194
588 // Also https://github.com/dotnet/coreclr/pull/13197
589 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI);
590 #endif // defined(_TARGET_UNIX_) && defined(_TARGET_ARM_)
592 // If we originally had 2 unique successors, check to see whether there is a unique
593 // non-default case, in which case we can eliminate the switch altogether.
594 // Note that the single unique successor case is handled above.
595 BasicBlock* uniqueSucc = nullptr;
598 uniqueSucc = jumpTab[0];
599 noway_assert(jumpCnt >= 2);
600 for (unsigned i = 1; i < jumpCnt - 1; i++)
602 if (jumpTab[i] != uniqueSucc)
604 uniqueSucc = nullptr;
609 if (uniqueSucc != nullptr)
611 // If the unique successor immediately follows this block, we have nothing to do -
612 // it will simply fall-through after we remove the switch, below.
613 // Otherwise, make this a BBJ_ALWAYS.
614 // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab:
615 // jumpTab[i-1] was the default target, which we handled above,
616 // jumpTab[0] is the first target, and we'll leave that predecessor link.
617 // Remove any additional predecessor links to uniqueSucc.
618 for (unsigned i = 1; i < jumpCnt - 1; ++i)
620 assert(jumpTab[i] == uniqueSucc);
621 (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock);
623 if (afterDefaultCondBlock->bbNext == uniqueSucc)
625 afterDefaultCondBlock->bbJumpKind = BBJ_NONE;
626 afterDefaultCondBlock->bbJumpDest = nullptr;
630 afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS;
631 afterDefaultCondBlock->bbJumpDest = uniqueSucc;
634 // If the number of possible destinations is small enough, we proceed to expand the switch
635 // into a series of conditional branches, otherwise we follow the jump table based switch
637 else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50))
639 // Lower the switch into a series of compare and branch IR trees.
641 // In this case we will morph the node in the following way:
642 // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.)
643 // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain
644 // a statement that is responsible for performing a comparison of the table index and conditional
647 JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum);
649 // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new
650 // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through),
652 bool fUsedAfterDefaultCondBlock = false;
653 BasicBlock* currentBlock = afterDefaultCondBlock;
654 LIR::Range* currentBBRange = &LIR::AsRange(currentBlock);
656 // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through.
657 // If no case target follows, the last one doesn't need to be a compare/branch: it can be an
658 // unconditional branch.
659 bool fAnyTargetFollows = false;
660 for (unsigned i = 0; i < jumpCnt - 1; ++i)
662 assert(currentBlock != nullptr);
664 // Remove the switch from the predecessor list of this case target's block.
665 // We'll add the proper new predecessor edge later.
666 flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock);
668 if (jumpTab[i] == followingBB)
670 // This case label follows the switch; let it fall through.
671 fAnyTargetFollows = true;
675 // We need a block to put in the new compare and/or branch.
676 // If we haven't used the afterDefaultCondBlock yet, then use that.
677 if (fUsedAfterDefaultCondBlock)
679 BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true);
680 comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
681 currentBlock = newBlock;
682 currentBBRange = &LIR::AsRange(currentBlock);
686 assert(currentBlock == afterDefaultCondBlock);
687 fUsedAfterDefaultCondBlock = true;
690 // We're going to have a branch, either a conditional or unconditional,
691 // to the target. Set the target.
692 currentBlock->bbJumpDest = jumpTab[i];
694 // Wire up the predecessor list for the "branch" case.
695 comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge);
697 if (!fAnyTargetFollows && (i == jumpCnt - 2))
699 // We're processing the last one, and there is no fall through from any case
700 // to the following block, so we can use an unconditional branch to the final
701 // case: there is no need to compare against the case index, since it's
702 // guaranteed to be taken (since the default case was handled first, above).
704 currentBlock->bbJumpKind = BBJ_ALWAYS;
708 // Otherwise, it's a conditional branch. Set the branch kind, then add the
709 // condition statement.
710 currentBlock->bbJumpKind = BBJ_COND;
712 // Now, build the conditional statement for the current case that is
717 // |____ (switchIndex) (The temp variable)
718 // |____ (ICon) (The actual case constant)
719 GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
720 comp->gtNewIconNode(i, tempLclType));
721 GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond);
722 LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch);
723 currentBBRange->InsertAtEnd(std::move(caseRange));
727 if (fAnyTargetFollows)
729 // There is a fall-through to the following block. In the loop
730 // above, we deleted all the predecessor edges from the switch.
731 // In this case, we need to add one back.
732 comp->fgAddRefPred(currentBlock->bbNext, currentBlock);
735 if (!fUsedAfterDefaultCondBlock)
737 // All the cases were fall-through! We don't need this block.
738 // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag
739 // so fgRemoveBlock() doesn't complain.
740 JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum);
741 assert(currentBlock == afterDefaultCondBlock);
742 assert(currentBlock->bbJumpKind == BBJ_SWITCH);
743 currentBlock->bbJumpKind = BBJ_NONE;
744 currentBlock->bbFlags &= ~BBF_DONT_REMOVE;
745 comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block.
750 // At this point the default case has already been handled and we need to generate a jump
751 // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both
752 // switch variants need the switch value so create the necessary LclVar node here.
753 GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType);
754 LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock);
755 switchBlockRange.InsertAtEnd(switchValue);
757 // Try generating a bit test based switch first,
758 // if that's not possible a jump table based switch will be generated.
759 if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue))
761 JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum);
763 #ifdef _TARGET_64BIT_
764 if (tempLclType != TYP_I_IMPL)
766 // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL.
767 // Note that the switch value is unsigned so the cast should be unsigned as well.
768 switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL);
769 switchBlockRange.InsertAtEnd(switchValue);
773 GenTree* switchTable = comp->gtNewJmpTableNode();
774 GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable);
775 switchBlockRange.InsertAfter(switchValue, switchTable, switchJump);
777 // this block no longer branches to the default block
778 afterDefaultCondBlock->bbJumpSwt->removeDefault();
781 comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock);
784 GenTree* next = node->gtNext;
786 // Get rid of the GT_SWITCH(temp).
787 switchBBRange.Remove(node->gtOp.gtOp1);
788 switchBBRange.Remove(node);
793 //------------------------------------------------------------------------
794 // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test.
797 // jumpTable - The jump table
798 // jumpCount - The number of blocks in the jump table
799 // targetCount - The number of distinct blocks in the jump table
800 // bbSwitch - The switch block
801 // switchValue - A LclVar node that provides the switch value
804 // true if the switch has been lowered to a bit test
807 // If the jump table contains less than 32 (64 on 64 bit targets) entries and there
808 // are at most 2 distinct jump targets then the jump table can be converted to a word
809 // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the
810 // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump
811 // to the appropriate target:
812 // mov eax, 245 ; jump table converted to a "bit table"
813 // bt eax, ebx ; ebx is supposed to contain the switch value
818 // Such code is both shorter and faster (in part due to the removal of a memory load)
819 // than the traditional jump table base code. And of course, it also avoids the need
820 // to emit the jump table itself that can reach up to 256 bytes (for 64 entries).
822 bool Lowering::TryLowerSwitchToBitTest(
823 BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue)
825 #ifndef _TARGET_XARCH_
826 // Other architectures may use this if they substitute GT_BT with equivalent code.
829 assert(jumpCount >= 2);
830 assert(targetCount >= 2);
831 assert(bbSwitch->bbJumpKind == BBJ_SWITCH);
832 assert(switchValue->OperIs(GT_LCL_VAR));
835 // Quick check to see if it's worth going through the jump table. The bit test switch supports
836 // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets.
837 // We'll ensure that there are only 2 targets when building the bit table.
846 // The number of bits in the bit table is the same as the number of jump table entries. But the
847 // jump table also includes the default target (at the end) so we need to ignore it. The default
848 // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates.
851 const unsigned bitCount = jumpCount - 1;
853 if (bitCount > (genTypeSize(TYP_I_IMPL) * 8))
859 // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to
860 // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit
861 // table and/or swap the blocks if it's beneficial.
864 BasicBlock* bbCase0 = nullptr;
865 BasicBlock* bbCase1 = jumpTable[0];
868 for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++)
870 if (jumpTable[bitIndex] == bbCase1)
872 bitTable |= (size_t(1) << bitIndex);
874 else if (bbCase0 == nullptr)
876 bbCase0 = jumpTable[bitIndex];
878 else if (jumpTable[bitIndex] != bbCase0)
880 // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more
881 // than 3 because of the check at the start of the function.
882 assert(targetCount == 3);
888 // One of the case blocks has to follow the switch block. This requirement could be avoided
889 // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively
890 // impacts register allocation.
893 if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1))
898 #ifdef _TARGET_64BIT_
900 // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1
901 // then inverting the bit table will make them 0 so that the table now fits in 32 bits.
902 // Note that this does not change the number of bits in the bit table, it just takes
903 // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero
904 // extends the immediate value to 64 bit.
907 if (~bitTable <= UINT32_MAX)
909 bitTable = ~bitTable;
910 std::swap(bbCase0, bbCase1);
915 // Rewire the blocks as needed and figure out the condition to use for JCC.
918 GenCondition bbSwitchCondition;
919 bbSwitch->bbJumpKind = BBJ_COND;
921 comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
922 comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
924 if (bbSwitch->bbNext == bbCase0)
926 // GenCondition::C generates JC so we jump to bbCase1 when the bit is set
927 bbSwitchCondition = GenCondition::C;
928 bbSwitch->bbJumpDest = bbCase1;
930 comp->fgAddRefPred(bbCase0, bbSwitch);
931 comp->fgAddRefPred(bbCase1, bbSwitch);
935 assert(bbSwitch->bbNext == bbCase1);
937 // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
938 bbSwitchCondition = GenCondition::NC;
939 bbSwitch->bbJumpDest = bbCase0;
941 comp->fgAddRefPred(bbCase0, bbSwitch);
942 comp->fgAddRefPred(bbCase1, bbSwitch);
946 // Append BT(bitTable, switchValue) and JCC(condition) to the switch block.
949 var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG;
950 GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType);
951 GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue);
952 bitTest->gtFlags |= GTF_SET_FLAGS;
953 GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition);
954 jcc->gtFlags |= GTF_USE_FLAGS;
956 LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc);
959 #endif // _TARGET_XARCH_
962 // NOTE: this method deliberately does not update the call arg table. It must only
963 // be used by NewPutArg and LowerArg; these functions are responsible for updating
964 // the call arg table as necessary.
965 void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast)
967 assert(argSlot != nullptr);
968 assert(*argSlot != nullptr);
969 assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST));
971 GenTree* arg = *argSlot;
973 // Replace the argument with the putarg/copy
974 *argSlot = putArgOrBitcast;
975 putArgOrBitcast->gtOp.gtOp1 = arg;
977 // Insert the putarg/copy into the block
978 BlockRange().InsertAfter(arg, putArgOrBitcast);
981 //------------------------------------------------------------------------
982 // NewPutArg: rewrites the tree to put an arg in a register or on the stack.
985 // call - the call whose arg is being rewritten.
986 // arg - the arg being rewritten.
987 // info - the fgArgTabEntry information for the argument.
988 // type - the type of the argument.
991 // The new tree that was created to put the arg in the right place
992 // or the incoming arg if the arg tree was not rewritten.
995 // call, arg, and info must be non-null.
998 // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
999 // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
1000 // for two eightbyte structs.
1002 // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
1003 // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
1004 // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
1005 // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
1007 GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type)
1009 assert(call != nullptr);
1010 assert(arg != nullptr);
1011 assert(info != nullptr);
1013 GenTree* putArg = nullptr;
1014 bool updateArgTable = true;
1016 bool isOnStack = true;
1017 isOnStack = info->regNum == REG_STK;
1019 #ifdef _TARGET_ARMARCH_
1020 // Mark contained when we pass struct
1021 // GT_FIELD_LIST is always marked contained when it is generated
1022 if (type == TYP_STRUCT)
1024 arg->SetContained();
1025 if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR))
1027 MakeSrcContained(arg, arg->AsObj()->Addr());
1032 #if FEATURE_ARG_SPLIT
1033 // Struct can be split into register(s) and stack on ARM
1036 assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST);
1037 // TODO: Need to check correctness for FastTailCall
1038 if (call->IsFastTailCall())
1041 NYI_ARM("lower: struct argument by fast tail call");
1042 #endif // _TARGET_ARM_
1045 putArg = new (comp, GT_PUTARG_SPLIT)
1046 GenTreePutArgSplit(arg, info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots), info->numRegs,
1047 call->IsFastTailCall(), call);
1049 // If struct argument is morphed to GT_FIELD_LIST node(s),
1050 // we can know GC info by type of each GT_FIELD_LIST node.
1051 // So we skip setting GC Pointer info.
1053 GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit();
1054 for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++)
1056 argSplit->SetRegNumByIdx(info->getRegNum(regIndex), regIndex);
1059 if (arg->OperGet() == GT_OBJ)
1061 BYTE* gcLayout = nullptr;
1062 unsigned numRefs = 0;
1063 GenTreeObj* argObj = arg->AsObj();
1065 if (argObj->IsGCInfoInitialized())
1067 gcLayout = argObj->gtGcPtrs;
1068 numRefs = argObj->GetGcPtrCount();
1072 // Set GC Pointer info
1073 gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots + info->numRegs];
1074 numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
1075 argSplit->setGcPointers(numRefs, gcLayout);
1078 // Set type of registers
1079 for (unsigned index = 0; index < info->numRegs; index++)
1081 var_types regType = comp->getJitGCType(gcLayout[index]);
1082 // Account for the possibility that float fields may be passed in integer registers.
1083 if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index)))
1085 regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
1087 argSplit->m_regType[index] = regType;
1092 GenTreeFieldList* fieldListPtr = arg->AsFieldList();
1093 for (unsigned index = 0; index < info->numRegs; fieldListPtr = fieldListPtr->Rest(), index++)
1095 var_types regType = fieldListPtr->gtGetOp1()->TypeGet();
1096 // Account for the possibility that float fields may be passed in integer registers.
1097 if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index)))
1099 regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
1101 argSplit->m_regType[index] = regType;
1103 // Clear the register assignments on the fieldList nodes, as these are contained.
1104 fieldListPtr->gtRegNum = REG_NA;
1109 #endif // FEATURE_ARG_SPLIT
1113 #if FEATURE_MULTIREG_ARGS
1114 if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
1116 assert(arg->OperGet() == GT_FIELD_LIST);
1118 assert(arg->AsFieldList()->IsFieldListHead());
1119 unsigned int regIndex = 0;
1120 for (GenTreeFieldList* fieldListPtr = arg->AsFieldList(); fieldListPtr != nullptr;
1121 fieldListPtr = fieldListPtr->Rest())
1123 regNumber argReg = info->getRegNum(regIndex);
1124 GenTree* curOp = fieldListPtr->gtOp.gtOp1;
1125 var_types curTyp = curOp->TypeGet();
1127 // Create a new GT_PUTARG_REG node with op1
1128 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg);
1130 // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST
1131 ReplaceArgWithPutArgOrBitcast(&fieldListPtr->gtOp.gtOp1, newOper);
1134 // Initialize all the gtRegNum's since the list won't be traversed in an LIR traversal.
1135 fieldListPtr->gtRegNum = REG_NA;
1138 // Just return arg. The GT_FIELD_LIST is not replaced.
1139 // Nothing more to do.
1143 #endif // FEATURE_MULTIREG_ARGS
1145 putArg = comp->gtNewPutArgReg(type, arg, info->regNum);
1150 // Mark this one as tail call arg if it is a fast tail call.
1151 // This provides the info to put this argument in in-coming arg area slot
1152 // instead of in out-going arg area slot.
1154 // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce
1155 // a result. So the type of its operand must be the correct type to push on the stack.
1156 // For a FIELD_LIST, this will be the type of the field (not the type of the arg),
1157 // but otherwise it is generally the type of the operand.
1158 info->checkIsStruct();
1159 if ((arg->OperGet() != GT_FIELD_LIST))
1161 #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
1162 if (type == TYP_SIMD12)
1164 assert(info->numSlots == 3);
1167 #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
1169 assert(genActualType(arg->TypeGet()) == type);
1174 new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg,
1175 info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots),
1176 call->IsFastTailCall(), call);
1178 #ifdef FEATURE_PUT_STRUCT_ARG_STK
1179 // If the ArgTabEntry indicates that this arg is a struct
1180 // get and store the number of slots that are references.
1181 // This is later used in the codegen for PUT_ARG_STK implementation
1182 // for struct to decide whether and how many single eight-byte copies
1183 // to be done (only for reference slots), so gcinfo is emitted.
1184 // For non-reference slots faster/smaller size instructions are used -
1185 // pair copying using XMM registers or rep mov instructions.
1188 // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments.
1189 if (arg->OperIsLocal())
1191 // This must have a type with a known size (SIMD or has been morphed to a primitive type).
1192 assert(arg->TypeGet() != TYP_STRUCT);
1194 else if (arg->OperIs(GT_OBJ))
1196 unsigned numRefs = 0;
1197 BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots];
1198 assert(!varTypeIsSIMD(arg));
1199 numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
1200 putArg->AsPutArgStk()->setGcPointers(numRefs, gcLayout);
1203 // On x86 VM lies about the type of a struct containing a pointer sized
1204 // integer field by returning the type of its field as the type of struct.
1205 // Such struct can be passed in a register depending its position in
1206 // parameter list. VM does this unwrapping only one level and therefore
1207 // a type like Struct Foo { Struct Bar { int f}} awlays needs to be
1208 // passed on stack. Also, VM doesn't lie about type of such a struct
1209 // when it is a field of another struct. That is VM doesn't lie about
1210 // the type of Foo.Bar
1212 // We now support the promotion of fields that are of type struct.
1213 // However we only support a limited case where the struct field has a
1214 // single field and that single field must be a scalar type. Say Foo.Bar
1215 // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT,
1216 // as per x86 ABI it should always be passed on stack. Therefore GenTree
1217 // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where
1218 // local v1 could be a promoted field standing for Foo.Bar. Note that
1219 // the type of v1 will be the type of field of Foo.Bar.f when Foo is
1220 // promoted. That is v1 will be a scalar type. In this case we need to
1221 // pass v1 on stack instead of in a register.
1223 // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is
1224 // a scalar type and the width of GT_OBJ matches the type size of v1.
1225 // Note that this cannot be done till call node arguments are morphed
1226 // because we should not lose the fact that the type of argument is
1227 // a struct so that the arg gets correctly marked to be passed on stack.
1228 GenTree* objOp1 = arg->gtGetOp1();
1229 if (objOp1->OperGet() == GT_LCL_VAR_ADDR)
1231 unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum();
1232 if (comp->lvaTable[lclNum].lvType != TYP_STRUCT)
1234 comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_VMNeedsStackAddr));
1237 #endif // _TARGET_X86_
1239 else if (!arg->OperIs(GT_FIELD_LIST))
1241 assert(varTypeIsSIMD(arg) || (info->numSlots == 1));
1244 #endif // FEATURE_PUT_STRUCT_ARG_STK
1248 JITDUMP("new node is : ");
1252 if (arg->gtFlags & GTF_LATE_ARG)
1254 putArg->gtFlags |= GTF_LATE_ARG;
1256 else if (updateArgTable)
1258 info->node = putArg;
1263 //------------------------------------------------------------------------
1264 // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between
1265 // the argument evaluation and the call. This is the point at which the source is
1266 // consumed and the value transitions from control of the register allocator to the calling
1270 // call - The call node
1271 // ppArg - Pointer to the call argument pointer. We might replace the call argument by
1277 void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg)
1279 GenTree* arg = *ppArg;
1281 JITDUMP("lowering arg : ");
1284 // No assignments should remain by Lowering.
1285 assert(!arg->OperIs(GT_ASG));
1286 assert(!arg->OperIsPutArgStk());
1288 // Assignments/stores at this level are not really placing an argument.
1289 // They are setting up temporary locals that will later be placed into
1290 // outgoing regs or stack.
1291 // Note that atomic ops may be stores and still produce a value.
1292 if (!arg->IsValue())
1294 assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
1295 arg->OperIsCopyBlkOp());
1299 fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg);
1300 assert(info->node == arg);
1301 var_types type = arg->TypeGet();
1303 if (varTypeIsSmall(type))
1305 // Normalize 'type', it represents the item that we will be storing in the Outgoing Args
1309 #if defined(FEATURE_SIMD)
1310 #if defined(_TARGET_X86_)
1311 // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their
1312 // allocated size (see lvSize()). However, when passing the variables as arguments, and
1313 // storing the variables to the outgoing argument area on the stack, we must use their
1314 // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written.
1315 if (type == TYP_SIMD16)
1317 if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR))
1319 unsigned varNum = arg->AsLclVarCommon()->GetLclNum();
1320 LclVarDsc* varDsc = &comp->lvaTable[varNum];
1321 type = varDsc->lvType;
1323 else if (arg->OperGet() == GT_SIMD)
1325 assert((arg->AsSIMD()->gtSIMDSize == 16) || (arg->AsSIMD()->gtSIMDSize == 12));
1327 if (arg->AsSIMD()->gtSIMDSize == 12)
1333 #elif defined(_TARGET_AMD64_)
1334 // TYP_SIMD8 parameters that are passed as longs
1335 if (type == TYP_SIMD8 && genIsValidIntReg(info->regNum))
1337 GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, TYP_LONG, arg, nullptr);
1338 BlockRange().InsertAfter(arg, bitcast);
1340 info->node = *ppArg = arg = bitcast;
1343 #endif // defined(_TARGET_X86_)
1344 #endif // defined(FEATURE_SIMD)
1346 // If we hit this we are probably double-lowering.
1347 assert(!arg->OperIsPutArg());
1349 #if !defined(_TARGET_64BIT_)
1350 if (varTypeIsLong(type))
1352 bool isReg = (info->regNum != REG_STK);
1355 noway_assert(arg->OperGet() == GT_LONG);
1356 assert(info->numRegs == 2);
1358 GenTree* argLo = arg->gtGetOp1();
1359 GenTree* argHi = arg->gtGetOp2();
1361 GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(argLo, 0, TYP_INT, nullptr);
1362 // Only the first fieldList node (GTF_FIELD_LIST_HEAD) is in the instruction sequence.
1363 (void)new (comp, GT_FIELD_LIST) GenTreeFieldList(argHi, 4, TYP_INT, fieldList);
1364 GenTree* putArg = NewPutArg(call, fieldList, info, type);
1366 BlockRange().InsertBefore(arg, putArg);
1367 BlockRange().Remove(arg);
1369 info->node = fieldList;
1373 assert(arg->OperGet() == GT_LONG);
1374 // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK.
1375 // Although the hi argument needs to be pushed first, that will be handled by the general case,
1376 // in which the fields will be reversed.
1377 assert(info->numSlots == 2);
1378 GenTree* argLo = arg->gtGetOp1();
1379 GenTree* argHi = arg->gtGetOp2();
1380 GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(argLo, 0, TYP_INT, nullptr);
1381 // Only the first fieldList node (GTF_FIELD_LIST_HEAD) is in the instruction sequence.
1382 (void)new (comp, GT_FIELD_LIST) GenTreeFieldList(argHi, 4, TYP_INT, fieldList);
1383 GenTree* putArg = NewPutArg(call, fieldList, info, type);
1384 putArg->gtRegNum = info->regNum;
1386 // We can't call ReplaceArgWithPutArgOrBitcast here because it presumes that we are keeping the original
1388 BlockRange().InsertBefore(arg, fieldList, putArg);
1389 BlockRange().Remove(arg);
1394 #endif // !defined(_TARGET_64BIT_)
1397 #ifdef _TARGET_ARMARCH_
1398 if (call->IsVarargs() || comp->opts.compUseSoftFP)
1400 // For vararg call or on armel, reg args should be all integer.
1401 // Insert copies as needed to move float value to integer register.
1402 GenTree* newNode = LowerFloatArg(ppArg, info);
1403 if (newNode != nullptr)
1405 type = newNode->TypeGet();
1408 #endif // _TARGET_ARMARCH_
1410 GenTree* putArg = NewPutArg(call, arg, info, type);
1412 // In the case of register passable struct (in one or two registers)
1413 // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.)
1414 // If an extra node is returned, splice it in the right place in the tree.
1417 ReplaceArgWithPutArgOrBitcast(ppArg, putArg);
1422 #ifdef _TARGET_ARMARCH_
1423 //------------------------------------------------------------------------
1424 // LowerFloatArg: Lower float call arguments on the arm platform.
1427 // arg - The arg node
1428 // info - call argument info
1431 // Return nullptr, if no transformation was done;
1432 // return arg if there was in place transformation;
1433 // return a new tree if the root was changed.
1436 // This must handle scalar float arguments as well as GT_FIELD_LISTs
1437 // with floating point fields.
1439 GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info)
1441 GenTree* arg = *pArg;
1442 if (info->regNum != REG_STK)
1444 if (arg->OperIsFieldList())
1446 GenTreeFieldList* currListNode = arg->AsFieldList();
1447 regNumber currRegNumber = info->regNum;
1449 // Transform fields that are passed as registers in place.
1450 unsigned fieldRegCount;
1451 for (unsigned i = 0; i < info->numRegs; i += fieldRegCount)
1453 assert(currListNode != nullptr);
1454 GenTree* node = currListNode->Current();
1455 if (varTypeIsFloating(node))
1457 GenTree* intNode = LowerFloatArgReg(node, currRegNumber);
1458 assert(intNode != nullptr);
1460 ReplaceArgWithPutArgOrBitcast(currListNode->pCurrent(), intNode);
1461 currListNode->ChangeType(intNode->TypeGet());
1464 if (node->TypeGet() == TYP_DOUBLE)
1466 currRegNumber = REG_NEXT(REG_NEXT(currRegNumber));
1471 currRegNumber = REG_NEXT(currRegNumber);
1474 currListNode = currListNode->Rest();
1476 // List fields were replaced in place.
1479 else if (varTypeIsFloating(arg))
1481 GenTree* intNode = LowerFloatArgReg(arg, info->regNum);
1482 assert(intNode != nullptr);
1483 ReplaceArgWithPutArgOrBitcast(pArg, intNode);
1490 //------------------------------------------------------------------------
1491 // LowerFloatArgReg: Lower the float call argument node that is passed via register.
1494 // arg - The arg node
1495 // regNum - register number
1498 // Return new bitcast node, that moves float to int register.
1500 GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum)
1502 var_types floatType = arg->TypeGet();
1503 assert(varTypeIsFloating(floatType));
1504 var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
1505 GenTree* intArg = comp->gtNewBitCastNode(intType, arg);
1506 intArg->gtRegNum = regNum;
1508 if (floatType == TYP_DOUBLE)
1510 regNumber nextReg = REG_NEXT(regNum);
1511 intArg->AsMultiRegOp()->gtOtherReg = nextReg;
1518 // do lowering steps for each arg of a call
1519 void Lowering::LowerArgsForCall(GenTreeCall* call)
1521 JITDUMP("objp:\n======\n");
1522 if (call->gtCallObjp)
1524 LowerArg(call, &call->gtCallObjp);
1527 GenTreeArgList* args = call->gtCallArgs;
1529 JITDUMP("\nargs:\n======\n");
1530 for (; args; args = args->Rest())
1532 LowerArg(call, &args->Current());
1535 JITDUMP("\nlate:\n======\n");
1536 for (args = call->gtCallLateArgs; args; args = args->Rest())
1538 LowerArg(call, &args->Current());
1542 // helper that create a node representing a relocatable physical address computation
1543 GenTree* Lowering::AddrGen(ssize_t addr)
1545 // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
1546 GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
1550 // variant that takes a void*
1551 GenTree* Lowering::AddrGen(void* addr)
1553 return AddrGen((ssize_t)addr);
1556 // do lowering steps for a call
1558 // - adding the placement nodes (either stack or register variety) for arguments
1559 // - lowering the expression that calculates the target address
1560 // - adding nodes for other operations that occur after the call sequence starts and before
1561 // control transfer occurs (profiling and tail call helpers, pinvoke incantations)
1563 void Lowering::LowerCall(GenTree* node)
1565 GenTreeCall* call = node->AsCall();
1567 JITDUMP("lowering call (before):\n");
1568 DISPTREERANGE(BlockRange(), call);
1571 call->ClearOtherRegs();
1572 LowerArgsForCall(call);
1574 // note that everything generated from this point on runs AFTER the outgoing args are placed
1575 GenTree* controlExpr = nullptr;
1577 // for x86, this is where we record ESP for checking later to make sure stack is balanced
1579 // Check for Delegate.Invoke(). If so, we inline it. We get the
1580 // target-object and target-function from the delegate-object, and do
1581 // an indirect call.
1582 if (call->IsDelegateInvoke())
1584 controlExpr = LowerDelegateInvoke(call);
1588 // Virtual and interface calls
1589 switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
1591 case GTF_CALL_VIRT_STUB:
1592 controlExpr = LowerVirtualStubCall(call);
1595 case GTF_CALL_VIRT_VTABLE:
1596 // stub dispatching is off or this is not a virtual call (could be a tailcall)
1597 controlExpr = LowerVirtualVtableCall(call);
1600 case GTF_CALL_NONVIRT:
1601 if (call->IsUnmanaged())
1603 controlExpr = LowerNonvirtPinvokeCall(call);
1605 else if (call->gtCallType == CT_INDIRECT)
1607 controlExpr = LowerIndirectNonvirtCall(call);
1611 controlExpr = LowerDirectCall(call);
1616 noway_assert(!"strange call type");
1621 if (call->IsTailCallViaHelper())
1623 // Either controlExpr or gtCallAddr must contain real call target.
1624 if (controlExpr == nullptr)
1626 assert(call->gtCallType == CT_INDIRECT);
1627 assert(call->gtCallAddr != nullptr);
1628 controlExpr = call->gtCallAddr;
1631 controlExpr = LowerTailCallViaHelper(call, controlExpr);
1634 if (controlExpr != nullptr)
1636 LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr);
1638 JITDUMP("results of lowering call:\n");
1639 DISPRANGE(controlExprRange);
1641 GenTree* insertionPoint = call;
1642 if (!call->IsTailCallViaHelper())
1644 // The controlExpr should go before the gtCallCookie and the gtCallAddr, if they exist
1646 // TODO-LIR: find out what's really required here, as this is currently a tree order
1648 if (call->gtCallType == CT_INDIRECT)
1650 bool isClosed = false;
1651 if (call->gtCallCookie != nullptr)
1654 GenTree* firstCallAddrNode = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
1656 assert(call->gtCallCookie->Precedes(firstCallAddrNode));
1659 insertionPoint = BlockRange().GetTreeRange(call->gtCallCookie, &isClosed).FirstNode();
1662 else if (call->gtCallAddr != nullptr)
1664 insertionPoint = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
1670 ContainCheckRange(controlExprRange);
1671 BlockRange().InsertBefore(insertionPoint, std::move(controlExprRange));
1673 call->gtControlExpr = controlExpr;
1675 if (call->IsFastTailCall())
1677 // Lower fast tail call can introduce new temps to set up args correctly for Callee.
1678 // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args
1679 // and replacing them with a new temp. Control expr also can contain nodes that need
1681 // Therefore lower fast tail call must be done after controlExpr is inserted into LIR.
1682 // There is one side effect which is flipping the order of PME and control expression
1683 // since LowerFastTailCall calls InsertPInvokeMethodEpilog.
1684 LowerFastTailCall(call);
1687 if (comp->opts.IsJit64Compat())
1689 CheckVSQuirkStackPaddingNeeded(call);
1692 ContainCheckCallOperands(call);
1693 JITDUMP("lowering call (after):\n");
1694 DISPTREERANGE(BlockRange(), call);
1698 // Though the below described issue gets fixed in intellitrace dll of VS2015 (a.k.a Dev14),
1699 // we still need this quirk for desktop so that older version of VS (e.g. VS2010/2012)
1700 // continues to work.
1701 // This quirk is excluded from other targets that have no back compat burden.
1703 // Quirk for VS debug-launch scenario to work:
1704 // See if this is a PInvoke call with exactly one param that is the address of a struct local.
1705 // In such a case indicate to frame-layout logic to add 16-bytes of padding
1706 // between save-reg area and locals. This is to protect against the buffer
1707 // overrun bug in microsoft.intellitrace.11.0.0.dll!ProfilerInterop.InitInterop().
1709 // A work-around to this bug is to disable IntelliTrace debugging
1710 // (VS->Tools->Options->IntelliTrace->Enable IntelliTrace - uncheck this option).
1711 // The reason why this works on Jit64 is that at the point of AV the call stack is
1713 // GetSystemInfo() Native call
1714 // IL_Stub generated for PInvoke declaration.
1715 // ProfilerInterface::InitInterop()
1716 // ProfilerInterface.Cctor()
1719 // The cctor body has just the call to InitInterop(). VM asm worker is holding
1720 // something in rbx that is used immediately after the Cctor call. Jit64 generated
1721 // InitInterop() method is pushing the registers in the following order
1731 // Due to buffer overrun, rbx doesn't get impacted. Whereas RyuJIT jitted code of
1732 // the same method is pushing regs in the following order
1740 // Therefore as a fix, we add padding between save-reg area and locals to
1741 // make this scenario work against JB.
1743 // Note: If this quirk gets broken due to other JIT optimizations, we should consider
1744 // more tolerant fix. One such fix is to padd the struct.
1745 void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
1747 assert(comp->opts.IsJit64Compat());
1749 #ifdef _TARGET_AMD64_
1750 // Confine this to IL stub calls which aren't marked as unmanaged.
1751 if (call->IsPInvoke() && !call->IsUnmanaged())
1753 bool paddingNeeded = false;
1754 GenTree* firstPutArgReg = nullptr;
1755 for (GenTreeArgList* args = call->gtCallLateArgs; args; args = args->Rest())
1757 GenTree* tmp = args->Current();
1758 if (tmp->OperGet() == GT_PUTARG_REG)
1760 if (firstPutArgReg == nullptr)
1762 firstPutArgReg = tmp;
1763 GenTree* op1 = firstPutArgReg->gtOp.gtOp1;
1765 if (op1->OperGet() == GT_LCL_VAR_ADDR)
1767 unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
1768 // TODO-1stClassStructs: This is here to duplicate previous behavior,
1769 // but is not needed because the scenario being quirked did not involve
1770 // a SIMD or enregisterable struct.
1771 // if(comp->lvaTable[lclNum].TypeGet() == TYP_STRUCT)
1772 if (varTypeIsStruct(comp->lvaTable[lclNum].TypeGet()))
1774 // First arg is addr of a struct local.
1775 paddingNeeded = true;
1779 // Not a struct local.
1780 assert(paddingNeeded == false);
1786 // First arg is not a local var addr.
1787 assert(paddingNeeded == false);
1793 // Has more than one arg.
1794 paddingNeeded = false;
1802 comp->compVSQuirkStackPaddingNeeded = VSQUIRK_STACK_PAD;
1805 #endif // _TARGET_AMD64_
1808 // Inserts profiler hook, GT_PROF_HOOK for a tail call node.
1811 // We need to insert this after all nested calls, but before all the arguments to this call have been set up.
1812 // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before
1813 // that. If there are no args, then it should be inserted before the call node.
1816 // * stmtExpr void (top level) (IL 0x000...0x010)
1817 // arg0 SETUP | /--* argPlace ref REG NA $c5
1818 // this in rcx | | /--* argPlace ref REG NA $c1
1819 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2
1820 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2
1821 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2
1822 // arg1 in rdx | | +--* putarg_reg ref REG NA
1823 // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80
1824 // this in rcx | | +--* putarg_reg ref REG NA
1825 // | | /--* call nullcheck ref System.String.ToLower $c5
1826 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? )
1827 // | | { \--* prof_hook void REG NA
1828 // arg0 in rcx | +--* putarg_reg ref REG NA
1829 // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA
1830 // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void
1832 // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call
1833 // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call.
1836 // Insert the profiler hook immediately before the call. The profiler hook will preserve
1837 // all argument registers (ECX, EDX), but nothing else.
1840 // callNode - tail call node
1841 // insertionPoint - if non-null, insert the profiler hook before this point.
1842 // If null, insert the profiler hook before args are setup
1843 // but after all arg side effects are computed.
1845 void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint)
1847 assert(call->IsTailCall());
1848 assert(comp->compIsProfilerHookNeeded());
1850 #if defined(_TARGET_X86_)
1852 if (insertionPoint == nullptr)
1854 insertionPoint = call;
1857 #else // !defined(_TARGET_X86_)
1859 if (insertionPoint == nullptr)
1861 GenTree* tmp = nullptr;
1862 for (GenTreeArgList* args = call->gtCallArgs; args; args = args->Rest())
1864 tmp = args->Current();
1865 assert(tmp->OperGet() != GT_PUTARG_REG); // We don't expect to see these in gtCallArgs
1866 if (tmp->OperGet() == GT_PUTARG_STK)
1869 insertionPoint = tmp;
1874 if (insertionPoint == nullptr)
1876 for (GenTreeArgList* args = call->gtCallLateArgs; args; args = args->Rest())
1878 tmp = args->Current();
1879 if ((tmp->OperGet() == GT_PUTARG_REG) || (tmp->OperGet() == GT_PUTARG_STK))
1882 insertionPoint = tmp;
1887 // If there are no args, insert before the call node
1888 if (insertionPoint == nullptr)
1890 insertionPoint = call;
1895 #endif // !defined(_TARGET_X86_)
1897 assert(insertionPoint != nullptr);
1898 GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
1899 BlockRange().InsertBefore(insertionPoint, profHookNode);
1902 // Lower fast tail call implemented as epilog+jmp.
1903 // Also inserts PInvoke method epilog if required.
1904 void Lowering::LowerFastTailCall(GenTreeCall* call)
1906 #if FEATURE_FASTTAILCALL
1907 // Tail call restrictions i.e. conditions under which tail prefix is ignored.
1908 // Most of these checks are already done by importer or fgMorphTailCall().
1909 // This serves as a double sanity check.
1910 assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
1911 assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
1912 assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
1913 assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
1915 #ifdef _TARGET_AMD64_
1916 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
1917 #endif // _TARGET_AMD64_
1919 // We expect to see a call that meets the following conditions
1920 assert(call->IsFastTailCall());
1922 // VM cannot use return address hijacking when A() and B() tail call each
1923 // other in mutual recursion. Therefore, this block is reachable through
1924 // a GC-safe point or the whole method is marked as fully interruptible.
1927 // optReachWithoutCall() depends on the fact that loop headers blocks
1928 // will have a block number > fgLastBB. These loop headers gets added
1929 // after dominator computation and get skipped by OptReachWithoutCall().
1930 // The below condition cannot be asserted in lower because fgSimpleLowering()
1931 // can add a new basic block for range check failure which becomes
1932 // fgLastBB with block number > loop header block number.
1933 // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
1934 // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->genInterruptible);
1936 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
1937 // a method returns. This is a case of caller method has both PInvokes and tail calls.
1938 if (comp->info.compCallUnmanaged)
1940 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
1943 // Args for tail call are setup in incoming arg area. The gc-ness of args of
1944 // caller and callee (which being tail called) may not match. Therefore, everything
1945 // from arg setup until the epilog need to be non-interuptible by GC. This is
1946 // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node
1947 // of call is setup. Note that once a stack arg is setup, it cannot have nested
1948 // calls subsequently in execution order to setup other args, because the nested
1949 // call could over-write the stack arg that is setup earlier.
1950 GenTree* firstPutArgStk = nullptr;
1951 GenTreeArgList* args;
1952 ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack));
1954 for (args = call->gtCallArgs; args; args = args->Rest())
1956 GenTree* tmp = args->Current();
1957 if (tmp->OperGet() == GT_PUTARG_STK)
1963 for (args = call->gtCallLateArgs; args; args = args->Rest())
1965 GenTree* tmp = args->Current();
1966 if (tmp->OperGet() == GT_PUTARG_STK)
1972 if (!putargs.Empty())
1974 firstPutArgStk = putargs.Bottom();
1977 // If we have a putarg_stk node, also count the number of non-standard args the
1978 // call node has. Note that while determining whether a tail call can be fast
1979 // tail called, we don't count non-standard args (passed in R10 or R11) since they
1980 // don't contribute to outgoing arg space. These non-standard args are not
1981 // accounted in caller's arg count but accounted in callee's arg count after
1982 // fgMorphArgs(). Therefore, exclude callee's non-standard args while mapping
1983 // callee's stack arg num to corresponding caller's stack arg num.
1984 unsigned calleeNonStandardArgCount = call->GetNonStandardAddedArgCount(comp);
1986 // Say Caller(a, b, c, d, e) fast tail calls Callee(e, d, c, b, a)
1987 // i.e. passes its arguments in reverse to Callee. During call site
1988 // setup, after computing argument side effects, stack args are setup
1989 // first and reg args next. In the above example, both Callers and
1990 // Callee stack args (e and a respectively) share the same stack slot
1991 // and are alive at the same time. The act of setting up Callee's
1992 // stack arg will over-write the stack arg of Caller and if there are
1993 // further uses of Caller stack arg we have to make sure that we move
1994 // it to a temp before over-writing its slot and use temp in place of
1995 // the corresponding Caller stack arg.
1997 // For the above example, conceptually this is what is done
1999 // Stack slot of e = a
2000 // R9 = b, R8 = c, RDx = d
2003 // The below logic is meant to detect cases like this and introduce
2004 // temps to set up args correctly for Callee.
2006 for (int i = 0; i < putargs.Height(); i++)
2008 GenTree* putArgStkNode = putargs.Bottom(i);
2010 assert(putArgStkNode->OperGet() == GT_PUTARG_STK);
2012 // Get the caller arg num corresponding to this callee arg.
2013 // Note that these two args share the same stack slot. Therefore,
2014 // if there are further uses of corresponding caller arg, we need
2015 // to move it to a temp and use the temp in this call tree.
2017 // Note that Caller is guaranteed to have a param corresponding to
2018 // this Callee's arg since fast tail call mechanism counts the
2019 // stack slots required for both Caller and Callee for passing params
2020 // and allow fast tail call only if stack slots required by Caller >=
2022 fgArgTabEntry* argTabEntry = comp->gtArgEntryByNode(call, putArgStkNode);
2023 assert(argTabEntry);
2024 unsigned callerArgNum = argTabEntry->argNum - calleeNonStandardArgCount;
2025 noway_assert(callerArgNum < comp->info.compArgsCount);
2027 unsigned callerArgLclNum = callerArgNum;
2028 LclVarDsc* callerArgDsc = comp->lvaTable + callerArgLclNum;
2029 if (callerArgDsc->lvPromoted)
2032 callerArgDsc->lvFieldLclStart; // update the callerArgNum to the promoted struct field's lclNum
2033 callerArgDsc = comp->lvaTable + callerArgLclNum;
2035 noway_assert(callerArgDsc->lvIsParam);
2037 // Start searching in execution order list till we encounter call node
2038 unsigned tmpLclNum = BAD_VAR_NUM;
2039 var_types tmpType = TYP_UNDEF;
2040 for (GenTree* treeNode = putArgStkNode->gtNext; treeNode != call; treeNode = treeNode->gtNext)
2042 if (treeNode->OperIsLocal() || treeNode->OperIsLocalAddr())
2044 // This should not be a GT_PHI_ARG.
2045 assert(treeNode->OperGet() != GT_PHI_ARG);
2047 GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
2048 LclVarDsc* lclVar = &comp->lvaTable[lcl->gtLclNum];
2050 // Fast tail calling criteria permits passing of structs of size 1, 2, 4 and 8 as args.
2051 // It is possible that the callerArgLclNum corresponds to such a struct whose stack slot
2052 // is getting over-written by setting up of a stack arg and there are further uses of
2053 // any of its fields if such a struct is type-dependently promoted. In this case too
2054 // we need to introduce a temp.
2055 if ((lcl->gtLclNum == callerArgNum) || (lcl->gtLclNum == callerArgLclNum))
2057 // Create tmp and use it in place of callerArgDsc
2058 if (tmpLclNum == BAD_VAR_NUM)
2060 // Set tmpType first before calling lvaGrabTemp, as that call invalidates callerArgDsc
2061 tmpType = genActualType(callerArgDsc->lvaArgType());
2062 tmpLclNum = comp->lvaGrabTemp(
2063 true DEBUGARG("Fast tail call lowering is creating a new local variable"));
2065 comp->lvaTable[tmpLclNum].lvType = tmpType;
2066 comp->lvaTable[tmpLclNum].lvDoNotEnregister = comp->lvaTable[lcl->gtLclNum].lvDoNotEnregister;
2069 lcl->SetLclNum(tmpLclNum);
2074 // If we have created a temp, insert an embedded assignment stmnt before
2075 // the first putargStkNode i.e.
2076 // tmpLcl = CallerArg
2077 if (tmpLclNum != BAD_VAR_NUM)
2079 assert(tmpType != TYP_UNDEF);
2080 GenTreeLclVar* local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, tmpType, callerArgLclNum);
2081 GenTree* assignExpr = comp->gtNewTempAssign(tmpLclNum, local);
2082 ContainCheckRange(local, assignExpr);
2083 BlockRange().InsertBefore(firstPutArgStk, LIR::SeqTree(comp, assignExpr));
2087 // Insert GT_START_NONGC node before the first GT_PUTARG_STK node.
2088 // Note that if there are no args to be setup on stack, no need to
2089 // insert GT_START_NONGC node.
2090 GenTree* startNonGCNode = nullptr;
2091 if (firstPutArgStk != nullptr)
2093 startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
2094 BlockRange().InsertBefore(firstPutArgStk, startNonGCNode);
2096 // Gc-interruptability in the following case:
2097 // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
2098 // bar(a, b, c, d, e) { foo(a, b, d, d, e); }
2100 // Since the instruction group starting from the instruction that sets up first
2101 // stack arg to the end of the tail call is marked as non-gc interruptible,
2102 // this will form a non-interruptible tight loop causing gc-starvation. To fix
2103 // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
2104 // has a single basic block and is not a GC-safe point. The presence of a single
2105 // nop outside non-gc interruptible region will prevent gc starvation.
2106 if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT))
2108 assert(comp->fgFirstBB == comp->compCurBB);
2109 GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
2110 BlockRange().InsertBefore(startNonGCNode, noOp);
2114 // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be
2115 // inserted before the args are setup but after the side effects of args are
2116 // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC
2117 // node if one exists.
2118 if (comp->compIsProfilerHookNeeded())
2120 InsertProfTailCallHook(call, startNonGCNode);
2123 #else // !FEATURE_FASTTAILCALL
2125 // Platform choose not to implement fast tail call mechanism.
2126 // In such a case we should never be reaching this method as
2127 // the expectation is that IsTailCallViaHelper() will always
2128 // be true on such a platform.
2133 //------------------------------------------------------------------------
2134 // LowerTailCallViaHelper: lower a call via the tailcall helper. Morph
2135 // has already inserted tailcall helper special arguments. This function
2136 // inserts actual data for some placeholders.
2138 // For ARM32, AMD64, lower
2139 // tail.call(void* copyRoutine, void* dummyArg, ...)
2141 // Jit_TailCall(void* copyRoutine, void* callTarget, ...)
2144 // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg)
2146 // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
2148 // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention.
2150 // Also inserts PInvoke method epilog if required.
2153 // call - The call node
2154 // callTarget - The real call target. This is used to replace the dummyArg during lowering.
2157 // Returns control expression tree for making a call to helper Jit_TailCall.
2159 GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget)
2161 // Tail call restrictions i.e. conditions under which tail prefix is ignored.
2162 // Most of these checks are already done by importer or fgMorphTailCall().
2163 // This serves as a double sanity check.
2164 assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
2165 assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
2166 assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
2167 assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
2169 #ifdef _TARGET_AMD64_
2170 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
2171 #endif // _TARGET_AMD64_
2173 // We expect to see a call that meets the following conditions
2174 assert(call->IsTailCallViaHelper());
2175 assert(callTarget != nullptr);
2177 // The TailCall helper call never returns to the caller and is not GC interruptible.
2178 // Therefore the block containing the tail call should be a GC safe point to avoid
2179 // GC starvation. It is legal for the block to be unmarked iff the entry block is a
2180 // GC safe point, as the entry block trivially dominates every reachable block.
2181 assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT));
2183 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
2184 // a method returns. This is a case of caller method has both PInvokes and tail calls.
2185 if (comp->info.compCallUnmanaged)
2187 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
2190 // Remove gtCallAddr from execution order if present.
2191 if (call->gtCallType == CT_INDIRECT)
2193 assert(call->gtCallAddr != nullptr);
2196 LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed);
2199 BlockRange().Remove(std::move(callAddrRange));
2202 // The callTarget tree needs to be sequenced.
2203 LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget);
2205 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
2207 // For ARM32 and AMD64, first argument is CopyRoutine and second argument is a place holder node.
2208 fgArgTabEntry* argEntry;
2211 argEntry = comp->gtArgEntryByArgNum(call, 0);
2212 assert(argEntry != nullptr);
2213 assert(argEntry->node->gtOper == GT_PUTARG_REG);
2214 GenTree* firstArg = argEntry->node->gtOp.gtOp1;
2215 assert(firstArg->gtOper == GT_CNS_INT);
2218 // Replace second arg by callTarget.
2219 argEntry = comp->gtArgEntryByArgNum(call, 1);
2220 assert(argEntry != nullptr);
2221 assert(argEntry->node->gtOper == GT_PUTARG_REG);
2222 GenTree* secondArg = argEntry->node->gtOp.gtOp1;
2224 ContainCheckRange(callTargetRange);
2225 BlockRange().InsertAfter(secondArg, std::move(callTargetRange));
2228 LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(secondArg, &isClosed);
2231 BlockRange().Remove(std::move(secondArgRange));
2233 argEntry->node->gtOp.gtOp1 = callTarget;
2235 #elif defined(_TARGET_X86_)
2237 // Verify the special args are what we expect, and replace the dummy args with real values.
2238 // We need to figure out the size of the outgoing stack arguments, not including the special args.
2239 // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes.
2240 // This number is exactly the next slot number in the call's argument info struct.
2241 unsigned nNewStkArgsWords = call->fgArgInfo->GetNextSlotNum();
2242 assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args.
2243 nNewStkArgsWords -= 4;
2245 unsigned numArgs = call->fgArgInfo->ArgCount();
2247 fgArgTabEntry* argEntry;
2249 // arg 0 == callTarget.
2250 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1);
2251 assert(argEntry != nullptr);
2252 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2253 GenTree* arg0 = argEntry->node->gtOp.gtOp1;
2255 ContainCheckRange(callTargetRange);
2256 BlockRange().InsertAfter(arg0, std::move(callTargetRange));
2259 LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed);
2261 BlockRange().Remove(std::move(secondArgRange));
2263 argEntry->node->gtOp.gtOp1 = callTarget;
2266 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2);
2267 assert(argEntry != nullptr);
2268 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2269 GenTree* arg1 = argEntry->node->gtOp.gtOp1;
2270 assert(arg1->gtOper == GT_CNS_INT);
2272 ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
2273 (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
2274 arg1->gtIntCon.gtIconVal = tailCallHelperFlags;
2276 // arg 2 == numberOfNewStackArgsWords
2277 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3);
2278 assert(argEntry != nullptr);
2279 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2280 GenTree* arg2 = argEntry->node->gtOp.gtOp1;
2281 assert(arg2->gtOper == GT_CNS_INT);
2283 arg2->gtIntCon.gtIconVal = nNewStkArgsWords;
2286 // arg 3 == numberOfOldStackArgsWords
2287 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4);
2288 assert(argEntry != nullptr);
2289 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2290 GenTree* arg3 = argEntry->node->gtOp.gtOp1;
2291 assert(arg3->gtOper == GT_CNS_INT);
2295 NYI("LowerTailCallViaHelper");
2298 // Transform this call node into a call to Jit tail call helper.
2299 call->gtCallType = CT_HELPER;
2300 call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL);
2301 call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
2303 // Lower this as if it were a pure helper call.
2304 call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER);
2305 GenTree* result = LowerDirectCall(call);
2307 // Now add back tail call flags for identifying this node as tail call dispatched via helper.
2308 call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER;
2310 #ifdef PROFILING_SUPPORTED
2311 // Insert profiler tail call hook if needed.
2312 // Since we don't know the insertion point, pass null for second param.
2313 if (comp->compIsProfilerHookNeeded())
2315 InsertProfTailCallHook(call, nullptr);
2317 #endif // PROFILING_SUPPORTED
2319 assert(call->IsTailCallViaHelper());
2324 #ifndef _TARGET_64BIT_
2325 //------------------------------------------------------------------------
2326 // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node.
2329 // cmp - the compare node
2332 // The next node to lower.
2335 // This is done during lowering because DecomposeLongs handles only nodes
2336 // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values
2337 // but produce TYP_INT values.
2339 GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
2341 assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG);
2343 GenTree* src1 = cmp->gtGetOp1();
2344 GenTree* src2 = cmp->gtGetOp2();
2345 assert(src1->OperIs(GT_LONG));
2346 assert(src2->OperIs(GT_LONG));
2347 GenTree* loSrc1 = src1->gtGetOp1();
2348 GenTree* hiSrc1 = src1->gtGetOp2();
2349 GenTree* loSrc2 = src2->gtGetOp1();
2350 GenTree* hiSrc2 = src2->gtGetOp2();
2351 BlockRange().Remove(src1);
2352 BlockRange().Remove(src2);
2354 genTreeOps condition = cmp->OperGet();
2358 if (cmp->OperIs(GT_EQ, GT_NE))
2361 // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can
2362 // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we
2363 // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction.
2365 // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when
2366 // the first happens to be a constant. Usually only the second compare operand is a constant but it's
2367 // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast
2368 // then hiSrc1 would be 0.
2371 if (loSrc1->OperIs(GT_CNS_INT))
2373 std::swap(loSrc1, loSrc2);
2376 if (loSrc2->IsIntegralConst(0))
2378 BlockRange().Remove(loSrc2);
2383 loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2);
2384 BlockRange().InsertBefore(cmp, loCmp);
2385 ContainCheckBinary(loCmp->AsOp());
2388 if (hiSrc1->OperIs(GT_CNS_INT))
2390 std::swap(hiSrc1, hiSrc2);
2393 if (hiSrc2->IsIntegralConst(0))
2395 BlockRange().Remove(hiSrc2);
2400 hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2);
2401 BlockRange().InsertBefore(cmp, hiCmp);
2402 ContainCheckBinary(hiCmp->AsOp());
2405 hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp);
2406 BlockRange().InsertBefore(cmp, hiCmp);
2407 ContainCheckBinary(hiCmp->AsOp());
2411 assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT));
2414 // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0).
2415 // If the compare is unsigned we can still use SUB but we need to check the Carry flag,
2416 // not the actual result. In both cases we can simply check the appropiate condition flags
2417 // and ignore the actual result:
2418 // SUB_LO loSrc1, loSrc2
2419 // SUB_HI hiSrc1, hiSrc2
2420 // SETCC|JCC (signed|unsigned LT|GE)
2421 // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can
2422 // be turned into a CMP because the first SUB would have set carry to 0. This effectively
2423 // transforms a long compare against 0 into an int compare of the high part against 0.
2425 // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value
2426 // is greater than 0 is not so easy. We need to turn this into a positive/negative check
2427 // like the one we get for LT|GE compares, this can be achieved by swapping the compare:
2428 // (x LE|GT y) becomes (y GE|LT x)
2430 // Having to swap operands is problematic when the second operand is a constant. The constant
2431 // moves to the first operand where it cannot be contained and thus needs a register. This can
2432 // be avoided by changing the constant such that LE|GT becomes LT|GE:
2433 // (x LE|GT 41) becomes (x LT|GE 42)
2436 if (cmp->OperIs(GT_LE, GT_GT))
2438 bool mustSwap = true;
2440 if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT))
2442 uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue());
2443 uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue());
2444 uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32);
2445 uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX;
2447 if (value != maxValue)
2450 loValue = value & UINT32_MAX;
2451 hiValue = (value >> 32) & UINT32_MAX;
2452 loSrc2->AsIntCon()->SetIconValue(loValue);
2453 hiSrc2->AsIntCon()->SetIconValue(hiValue);
2455 condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE;
2462 std::swap(loSrc1, loSrc2);
2463 std::swap(hiSrc1, hiSrc2);
2464 condition = GenTree::SwapRelop(condition);
2468 assert((condition == GT_LT) || (condition == GT_GE));
2470 if (loSrc2->IsIntegralConst(0))
2472 BlockRange().Remove(loSrc2);
2474 // Very conservative dead code removal... but it helps.
2476 if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD))
2478 BlockRange().Remove(loSrc1);
2482 loSrc1->SetUnusedValue();
2485 hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2);
2486 BlockRange().InsertBefore(cmp, hiCmp);
2487 ContainCheckCompare(hiCmp->AsOp());
2491 loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2);
2492 hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2);
2493 BlockRange().InsertBefore(cmp, loCmp, hiCmp);
2494 ContainCheckCompare(loCmp->AsOp());
2495 ContainCheckBinary(hiCmp->AsOp());
2498 // Try to move the first SUB_HI operands right in front of it, this allows using
2499 // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do
2500 // this only for locals as they won't change condition flags. Note that we could
2501 // move constants (except 0 which generates XOR reg, reg) but it's extremly rare
2502 // to have a constant as the first operand.
2505 if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD))
2507 BlockRange().Remove(hiSrc1);
2508 BlockRange().InsertBefore(hiCmp, hiSrc1);
2513 hiCmp->gtFlags |= GTF_SET_FLAGS;
2514 if (hiCmp->IsValue())
2516 hiCmp->SetUnusedValue();
2520 if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
2522 BlockRange().Remove(cmp);
2524 GenTree* jcc = cmpUse.User();
2525 jcc->gtOp.gtOp1 = nullptr;
2526 jcc->ChangeOper(GT_JCC);
2527 jcc->gtFlags |= GTF_USE_FLAGS;
2528 jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
2532 cmp->gtOp.gtOp1 = nullptr;
2533 cmp->gtOp.gtOp2 = nullptr;
2534 cmp->ChangeOper(GT_SETCC);
2535 cmp->gtFlags |= GTF_USE_FLAGS;
2536 cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
2541 #endif // !_TARGET_64BIT_
2543 //------------------------------------------------------------------------
2544 // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations.
2547 // cmp - the compare node
2550 // The original compare node if lowering should proceed as usual or the next node
2551 // to lower if the compare node was changed in such a way that lowering is no
2555 // - Narrow operands to enable memory operand containment (XARCH specific).
2556 // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could
2557 // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added).
2558 // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific)
2559 // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the
2560 // condition flags appropriately (XARCH/ARM64 specific but could be extended
2561 // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS).
2563 GenTree* Lowering::OptimizeConstCompare(GenTree* cmp)
2565 assert(cmp->gtGetOp2()->IsIntegralConst());
2567 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
2568 GenTree* op1 = cmp->gtGetOp1();
2569 var_types op1Type = op1->TypeGet();
2570 GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
2571 ssize_t op2Value = op2->IconValue();
2573 #ifdef _TARGET_XARCH_
2574 if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genSmallTypeCanRepresentValue(op1Type, op2Value))
2577 // If op1's type is small then try to narrow op2 so it has the same type as op1.
2578 // Small types are usually used by memory loads and if both compare operands have
2579 // the same type then the memory load can be contained. In certain situations
2580 // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding.
2583 op2->gtType = op1Type;
2587 if (op1->OperIs(GT_CAST) && !op1->gtOverflow())
2589 GenTreeCast* cast = op1->AsCast();
2590 var_types castToType = cast->CastToType();
2591 GenTree* castOp = cast->gtGetOp1();
2593 if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value))
2596 // Since we're going to remove the cast we need to be able to narrow the cast operand
2597 // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR).
2598 // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but
2599 // doing so would produce incorrect results (e.g. RSZ, RSH).
2601 // The below list of handled opers is conservative but enough to handle the most common
2602 // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens
2603 // the result of bool returning calls.
2606 #ifdef _TARGET_ARM64_
2607 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) &&
2609 (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical()
2610 #ifdef _TARGET_XARCH_
2611 || IsContainableMemoryOp(castOp)
2617 assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
2619 #ifdef _TARGET_ARM64_
2620 bool cmpEq = cmp->OperIs(GT_EQ);
2622 cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE);
2623 op2->SetIconValue(0xff);
2624 op2->gtType = castOp->gtType;
2626 castOp->gtType = castToType;
2627 op2->gtType = castToType;
2629 // If we have any contained memory ops on castOp, they must now not be contained.
2630 if (castOp->OperIsLogical())
2632 GenTree* op1 = castOp->gtGetOp1();
2633 if ((op1 != nullptr) && !op1->IsCnsIntOrI())
2635 op1->ClearContained();
2637 GenTree* op2 = castOp->gtGetOp2();
2638 if ((op2 != nullptr) && !op2->IsCnsIntOrI())
2640 op2->ClearContained();
2643 cmp->gtOp.gtOp1 = castOp;
2645 BlockRange().Remove(cast);
2649 else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE))
2652 // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible.
2655 GenTree* andOp1 = op1->gtGetOp1();
2656 GenTree* andOp2 = op1->gtGetOp2();
2661 // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
2662 // into ((x AND mask) NE|EQ 0) when mask is a single bit.
2665 if (isPow2(static_cast<size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
2668 op2->SetIconValue(0);
2669 cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
2675 BlockRange().Remove(op1);
2676 BlockRange().Remove(op2);
2678 cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE);
2679 cmp->gtOp.gtOp1 = andOp1;
2680 cmp->gtOp.gtOp2 = andOp2;
2681 // We will re-evaluate containment below
2682 andOp1->ClearContained();
2683 andOp2->ClearContained();
2685 #ifdef _TARGET_XARCH_
2686 if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
2689 // For "test" we only care about the bits that are set in the second operand (mask).
2690 // If the mask fits in a small type then we can narrow both operands to generate a "test"
2691 // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid
2692 // a widening load in some cases.
2694 // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches
2695 // the behavior of a previous implementation and avoids adding more cases where we generate
2696 // 16 bit instructions that require a length changing prefix (0x66). These suffer from
2697 // significant decoder stalls on Intel CPUs.
2699 // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help.
2700 // In such cases morph narrows down the existing GT_AND by inserting a cast between it and
2701 // the memory operand so we'd need to add more code to recognize and eliminate that cast.
2704 size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue());
2706 if (FitsIn<UINT8>(mask))
2708 andOp1->gtType = TYP_UBYTE;
2709 andOp2->gtType = TYP_UBYTE;
2711 else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2)
2713 andOp1->gtType = TYP_USHORT;
2714 andOp2->gtType = TYP_USHORT;
2721 if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE))
2723 #ifdef _TARGET_XARCH_
2725 // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT
2726 // results in smaller and faster code. It also doesn't have special register
2727 // requirements, unlike LSH that requires the shift count to be in ECX.
2728 // Note that BT has the same behavior as LSH when the bit index exceeds the
2729 // operand bit size - it uses (bit_index MOD bit_size).
2732 GenTree* lsh = cmp->gtGetOp2();
2735 if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) &&
2736 BlockRange().TryGetUse(cmp, &cmpUse))
2738 GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC;
2740 cmp->SetOper(GT_BT);
2741 cmp->gtType = TYP_VOID;
2742 cmp->gtFlags |= GTF_SET_FLAGS;
2743 cmp->gtOp.gtOp2 = lsh->gtGetOp2();
2744 cmp->gtGetOp2()->ClearContained();
2746 BlockRange().Remove(lsh->gtGetOp1());
2747 BlockRange().Remove(lsh);
2751 if (cmpUse.User()->OperIs(GT_JTRUE))
2753 cmpUse.User()->ChangeOper(GT_JCC);
2754 cc = cmpUse.User()->AsCC();
2755 cc->gtCondition = condition;
2759 cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
2760 BlockRange().InsertAfter(cmp, cc);
2761 cmpUse.ReplaceWith(comp, cc);
2764 cc->gtFlags |= GTF_USE_FLAGS;
2768 #endif // _TARGET_XARCH_
2770 else if (cmp->OperIs(GT_EQ, GT_NE))
2772 GenTree* op1 = cmp->gtGetOp1();
2773 GenTree* op2 = cmp->gtGetOp2();
2775 // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most
2776 // cases because in majority of cases op1, op2 and cmp would be in that order in
2777 // execution. In general we should be able to check that all the nodes that come
2778 // after op1 do not modify the flags so that it is safe to avoid generating a
2779 // test instruction.
2781 if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) &&
2782 #ifdef _TARGET_XARCH_
2783 op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG))
2784 #else // _TARGET_ARM64_
2785 op1->OperIs(GT_AND, GT_ADD, GT_SUB))
2788 op1->gtFlags |= GTF_SET_FLAGS;
2789 op1->SetUnusedValue();
2791 BlockRange().Remove(op2);
2793 GenTree* next = cmp->gtNext;
2798 // Fast check for the common case - relop used by a JTRUE that immediately follows it.
2799 if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp))
2804 BlockRange().Remove(cmp);
2806 else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
2811 BlockRange().Remove(cmp);
2813 else // The relop is not used by a JTRUE or it is not used at all.
2815 // Transform the relop node it into a SETCC. If it's not used we could remove
2816 // it completely but that means doing more work to handle a rare case.
2821 GenCondition condition = GenCondition::FromIntegralRelop(cmp);
2822 cc->ChangeOper(ccOp);
2823 cc->AsCC()->gtCondition = condition;
2824 cc->gtFlags |= GTF_USE_FLAGS;
2829 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
2834 //------------------------------------------------------------------------
2835 // Lowering::LowerCompare: Lowers a compare node.
2838 // cmp - the compare node
2841 // The next node to lower.
2843 GenTree* Lowering::LowerCompare(GenTree* cmp)
2845 #ifndef _TARGET_64BIT_
2846 if (cmp->gtGetOp1()->TypeGet() == TYP_LONG)
2848 return DecomposeLongCompare(cmp);
2852 if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts())
2854 GenTree* next = OptimizeConstCompare(cmp);
2856 // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering.
2863 #ifdef _TARGET_XARCH_
2864 if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet())
2866 if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet()))
2869 // If both operands have the same type then codegen will use the common operand type to
2870 // determine the instruction type. For small types this would result in performing a
2871 // signed comparison of two small unsigned values without zero extending them to TYP_INT
2872 // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen
2873 // has to generate a small comparison, it can still correctly generate a TYP_INT comparison.
2876 cmp->gtFlags |= GTF_UNSIGNED;
2879 #endif // _TARGET_XARCH_
2880 ContainCheckCompare(cmp->AsOp());
2884 //------------------------------------------------------------------------
2885 // Lowering::LowerJTrue: Lowers a JTRUE node.
2888 // jtrue - the JTRUE node
2891 // The next node to lower (usually nullptr).
2894 // On ARM64 this may remove the JTRUE node and transform its associated
2895 // relop into a JCMP node.
2897 GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
2899 #ifdef _TARGET_ARM64_
2900 GenTree* relop = jtrue->gtGetOp1();
2901 GenTree* relopOp2 = relop->gtOp.gtGetOp2();
2903 if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI())
2905 bool useJCMP = false;
2908 if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0))
2910 // Codegen will use cbz or cbnz in codegen which do not affect the flag register
2911 flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : 0;
2914 else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue()))
2916 // Codegen will use tbz or tbnz in codegen which do not affect the flag register
2917 flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : 0);
2923 relop->SetOper(GT_JCMP);
2924 relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ);
2925 relop->gtFlags |= flags;
2926 relop->gtType = TYP_VOID;
2928 relopOp2->SetContained();
2930 BlockRange().Remove(jtrue);
2932 assert(relop->gtNext == nullptr);
2936 #endif // _TARGET_ARM64_
2938 ContainCheckJTrue(jtrue);
2940 assert(jtrue->gtNext == nullptr);
2944 // Lower "jmp <method>" tail call to insert PInvoke method epilog if required.
2945 void Lowering::LowerJmpMethod(GenTree* jmp)
2947 assert(jmp->OperGet() == GT_JMP);
2949 JITDUMP("lowering GT_JMP\n");
2951 JITDUMP("============");
2953 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
2954 // a method returns.
2955 if (comp->info.compCallUnmanaged)
2957 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp));
2961 // Lower GT_RETURN node to insert PInvoke method epilog if required.
2962 void Lowering::LowerRet(GenTree* ret)
2964 assert(ret->OperGet() == GT_RETURN);
2966 JITDUMP("lowering GT_RETURN\n");
2968 JITDUMP("============");
2970 #if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD)
2971 GenTreeUnOp* const unOp = ret->AsUnOp();
2972 if ((unOp->TypeGet() == TYP_LONG) && (unOp->gtOp1->TypeGet() == TYP_SIMD8))
2974 GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, TYP_LONG, unOp->gtOp1, nullptr);
2975 unOp->gtOp1 = bitcast;
2976 BlockRange().InsertBefore(unOp, bitcast);
2978 #endif // _TARGET_AMD64_
2980 // Method doing PInvokes has exactly one return block unless it has tail calls.
2981 if (comp->info.compCallUnmanaged && (comp->compCurBB == comp->genReturnBB))
2983 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret));
2985 ContainCheckRet(ret->AsOp());
2988 GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
2990 noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
2992 // Don't support tail calling helper methods.
2993 // But we might encounter tail calls dispatched via JIT helper appear as a tail call to helper.
2994 noway_assert(!call->IsTailCall() || call->IsTailCallViaHelper() || call->gtCallType == CT_USER_FUNC);
2996 // Non-virtual direct/indirect calls: Work out if the address of the
2997 // call is known at JIT time. If not it is either an indirect call
2998 // or the address must be accessed via an single/double indirection.
3001 InfoAccessType accessType;
3002 CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd);
3004 #ifdef FEATURE_READYTORUN_COMPILER
3005 if (call->gtEntryPoint.addr != nullptr)
3007 accessType = call->gtEntryPoint.accessType;
3008 addr = call->gtEntryPoint.addr;
3012 if (call->gtCallType == CT_HELPER)
3014 noway_assert(helperNum != CORINFO_HELP_UNDEF);
3016 // the convention on getHelperFtn seems to be (it's not documented)
3017 // that it returns an address or if it returns null, pAddr is set to
3018 // another address, which requires an indirection
3020 addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr);
3022 if (addr != nullptr)
3024 assert(pAddr == nullptr);
3025 accessType = IAT_VALUE;
3029 accessType = IAT_PVALUE;
3035 noway_assert(helperNum == CORINFO_HELP_UNDEF);
3037 CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
3039 if (call->IsSameThis())
3041 aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
3044 if (!call->NeedsNullCheck())
3046 aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
3049 CORINFO_CONST_LOOKUP addrInfo;
3050 comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags);
3052 accessType = addrInfo.accessType;
3053 addr = addrInfo.addr;
3056 GenTree* result = nullptr;
3060 // Non-virtual direct call to known address
3061 if (!IsCallTargetInRange(addr) || call->IsTailCall())
3063 result = AddrGen(addr);
3067 // a direct call within range of hardware relative call instruction
3068 // stash the address for codegen
3069 call->gtDirectCallAddress = addr;
3075 // Non-virtual direct calls to addresses accessed by
3076 // a single indirection.
3077 GenTree* cellAddr = AddrGen(addr);
3078 GenTree* indir = Ind(cellAddr);
3084 // Non-virtual direct calls to addresses accessed by
3085 // a double indirection.
3087 // Double-indirection. Load the address into a register
3088 // and call indirectly through the register
3089 noway_assert(helperNum == CORINFO_HELP_UNDEF);
3090 result = AddrGen(addr);
3091 result = Ind(Ind(result));
3096 // Non-virtual direct calls to addresses accessed by
3097 // a single relative indirection.
3098 GenTree* cellAddr = AddrGen(addr);
3099 GenTree* indir = Ind(cellAddr);
3100 result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr));
3105 noway_assert(!"Bad accessType");
3112 GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
3114 noway_assert(call->gtCallType == CT_USER_FUNC);
3116 assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) &
3117 (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
3119 GenTree* thisArgNode;
3120 if (call->IsTailCallViaHelper())
3122 #ifdef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
3123 const unsigned argNum = 0;
3124 #else // !_TARGET_X86_
3125 // In case of helper dispatched tail calls, "thisptr" will be the third arg.
3126 // The first two args are: real call target and addr of args copy routine.
3127 const unsigned argNum = 2;
3128 #endif // !_TARGET_X86_
3130 fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum);
3131 thisArgNode = thisArgTabEntry->node;
3135 thisArgNode = comp->gtGetThisArg(call);
3138 assert(thisArgNode->gtOper == GT_PUTARG_REG);
3139 GenTree* originalThisExpr = thisArgNode->gtOp.gtOp1;
3140 GenTree* thisExpr = originalThisExpr;
3142 // We're going to use the 'this' expression multiple times, so make a local to copy it.
3147 if (call->IsTailCallViaHelper() && originalThisExpr->IsLocal())
3149 // For ordering purposes for the special tailcall arguments on x86, we forced the
3150 // 'this' pointer in this case to a local in Compiler::fgMorphTailCall().
3151 // We could possibly use this case to remove copies for all architectures and non-tailcall
3152 // calls by creating a new lcl var or lcl field reference, as is done in the
3153 // LowerVirtualVtableCall() code.
3154 assert(originalThisExpr->OperGet() == GT_LCL_VAR);
3155 lclNum = originalThisExpr->AsLclVarCommon()->GetLclNum();
3158 #endif // _TARGET_X86_
3160 unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
3162 LIR::Use thisExprUse(BlockRange(), &thisArgNode->gtOp.gtOp1, thisArgNode);
3163 ReplaceWithLclVar(thisExprUse, delegateInvokeTmp);
3165 thisExpr = thisExprUse.Def(); // it's changed; reload it.
3166 lclNum = delegateInvokeTmp;
3169 // replace original expression feeding into thisPtr with
3170 // [originalThis + offsetOfDelegateInstance]
3172 GenTree* newThisAddr = new (comp, GT_LEA)
3173 GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance);
3175 GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
3177 BlockRange().InsertAfter(thisExpr, newThisAddr, newThis);
3179 thisArgNode->gtOp.gtOp1 = newThis;
3180 ContainCheckIndir(newThis->AsIndir());
3182 // the control target is
3183 // [originalThis + firstTgtOffs]
3185 GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(originalThisExpr->TypeGet(), lclNum);
3187 unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget;
3188 GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
3189 GenTree* callTarget = Ind(result);
3191 // don't need to sequence and insert this tree, caller will do it
3196 GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call)
3199 if (call->gtCallCookie != nullptr)
3201 NYI_X86("Morphing indirect non-virtual call with non-standard args");
3205 // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args.
3206 // Hence we should never see this type of call in lower.
3208 noway_assert(call->gtCallCookie == nullptr);
3213 //------------------------------------------------------------------------
3214 // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke
3215 // epilogs to invoke a GC under a condition. The return trap checks some global
3216 // location (the runtime tells us where that is and how many indirections to make),
3217 // then, based on the result, conditionally calls a GC helper. We use a special node
3218 // for this because at this time (late in the compilation phases), introducing flow
3219 // is tedious/difficult.
3221 // This is used for PInvoke inlining.
3224 // Code tree to perform the action.
3226 GenTree* Lowering::CreateReturnTrapSeq()
3228 // The GT_RETURNTRAP node expands to this:
3229 // if (g_TrapReturningThreads)
3231 // RareDisablePreemptiveGC();
3234 // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'.
3236 void* pAddrOfCaptureThreadGlobal = nullptr;
3237 LONG* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal);
3240 if (addrOfCaptureThreadGlobal != nullptr)
3242 testTree = Ind(AddrGen(addrOfCaptureThreadGlobal));
3246 testTree = Ind(Ind(AddrGen(pAddrOfCaptureThreadGlobal)));
3248 return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, testTree);
3251 //------------------------------------------------------------------------
3252 // SetGCState: Create a tree that stores the given constant (0 or 1) into the
3253 // thread's GC state field.
3255 // This is used for PInvoke inlining.
3258 // state - constant (0 or 1) to store into the thread's GC state field.
3261 // Code tree to perform the action.
3263 GenTree* Lowering::SetGCState(int state)
3265 // Thread.offsetOfGcState = 0/1
3267 assert(state == 0 || state == 1);
3269 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3271 GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(TYP_I_IMPL, comp->info.compLvFrameListRoot);
3273 GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state);
3274 GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState);
3275 GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode);
3276 return storeGcState;
3279 //------------------------------------------------------------------------
3280 // CreateFrameLinkUpdate: Create a tree that either links or unlinks the
3281 // locally-allocated InlinedCallFrame from the Frame list.
3283 // This is used for PInvoke inlining.
3286 // action - whether to link (push) or unlink (pop) the Frame
3289 // Code tree to perform the action.
3291 GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
3293 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3294 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
3296 GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
3299 GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
3301 GenTree* data = nullptr;
3303 if (action == PushFrame)
3305 // Thread->m_pFrame = &inlinedCallFrame;
3306 data = new (comp, GT_LCL_FLD_ADDR)
3307 GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
3311 assert(action == PopFrame);
3312 // Thread->m_pFrame = inlinedCallFrame.m_pNext;
3314 data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar,
3315 pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
3317 GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
3321 //------------------------------------------------------------------------
3322 // InsertPInvokeMethodProlog: Create the code that runs at the start of
3323 // every method that has PInvoke calls.
3325 // Initialize the TCB local and the InlinedCallFrame object. Then link ("push")
3326 // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame
3327 // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information.
3328 // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo().
3330 // The (current) layout is as follows:
3332 // 64-bit 32-bit CORINFO_EE_INFO
3333 // offset offset field name offset when set
3334 // -----------------------------------------------------------------------------------------
3335 // +00h +00h GS cookie offsetOfGSCookie
3336 // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog
3337 // +10h +08h m_Next offsetOfFrameLink method prolog
3338 // +18h +0Ch m_Datum offsetOfCallTarget call site
3339 // +20h n/a m_StubSecretArg not set by JIT
3340 // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method
3342 // non-x86: method prolog (SP remains
3343 // constant in function, after prolog: no
3344 // localloc and PInvoke in same function)
3345 // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site
3346 // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT
3347 // +1Ch JIT retval spill area (int) before call_gc ???
3348 // +20h JIT retval spill area (long) before call_gc ???
3349 // +24h Saved value of EBP method prolog ???
3351 // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points
3352 // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before*
3353 // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location,
3354 // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie.
3359 void Lowering::InsertPInvokeMethodProlog()
3361 noway_assert(comp->info.compCallUnmanaged);
3362 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3364 if (comp->opts.ShouldUsePInvokeHelpers())
3369 JITDUMP("======= Inserting PInvoke method prolog\n");
3371 // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog.
3372 assert(comp->fgFirstBBisScratch());
3374 LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB);
3376 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3377 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
3379 // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr
3381 GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR)
3382 GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
3384 // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list:
3385 // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg);
3386 // for x86, don't pass the secretArg.
3387 CLANG_FORMAT_COMMENT_ANCHOR;
3389 #if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
3390 GenTreeArgList* argList = comp->gtNewArgList(frameAddr);
3392 GenTreeArgList* argList = comp->gtNewArgList(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
3395 GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList);
3397 // some sanity checks on the frame list root vardsc
3398 LclVarDsc* varDsc = &comp->lvaTable[comp->info.compLvFrameListRoot];
3399 noway_assert(!varDsc->lvIsParam);
3400 noway_assert(varDsc->lvType == TYP_I_IMPL);
3403 new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
3404 store->gtOp.gtOp1 = call;
3405 store->gtFlags |= GTF_VAR_DEF;
3407 GenTree* const insertionPoint = firstBlockRange.FirstNonPhiOrCatchArgNode();
3409 comp->fgMorphTree(store);
3410 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store));
3411 DISPTREERANGE(firstBlockRange, store);
3413 #if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_)
3414 // For x86, this step is done at the call site (due to stack pointer not being static in the function).
3415 // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
3417 // --------------------------------------------------------
3418 // InlinedCallFrame.m_pCallSiteSP = @RSP;
3420 GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD)
3421 GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
3422 storeSP->gtOp1 = PhysReg(REG_SPBASE);
3423 storeSP->gtFlags |= GTF_VAR_DEF;
3425 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP));
3426 DISPTREERANGE(firstBlockRange, storeSP);
3428 #endif // !defined(_TARGET_X86_) && !defined(_TARGET_ARM_)
3430 #if !defined(_TARGET_ARM_)
3431 // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
3433 // --------------------------------------------------------
3434 // InlinedCallFrame.m_pCalleeSavedEBP = @RBP;
3436 GenTreeLclFld* storeFP =
3437 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3438 callFrameInfo.offsetOfCalleeSavedFP);
3439 storeFP->gtOp1 = PhysReg(REG_FPBASE);
3440 storeFP->gtFlags |= GTF_VAR_DEF;
3442 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP));
3443 DISPTREERANGE(firstBlockRange, storeFP);
3444 #endif // !defined(_TARGET_ARM_)
3446 // --------------------------------------------------------
3447 // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto
3448 // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame.
3449 CLANG_FORMAT_COMMENT_ANCHOR;
3451 #ifdef _TARGET_64BIT_
3452 if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3454 // Push a frame - if we are NOT in an IL stub, this is done right before the call
3455 // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack
3456 GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
3457 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
3458 ContainCheckStoreIndir(frameUpd->AsIndir());
3459 DISPTREERANGE(firstBlockRange, frameUpd);
3461 #endif // _TARGET_64BIT_
3464 //------------------------------------------------------------------------
3465 // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method
3466 // that has PInvoke inlines. This needs to be inserted any place you can exit the
3467 // function: returns, tailcalls and jmps.
3470 // returnBB - basic block from which a method can return
3471 // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg)
3474 // Code tree to perform the action.
3476 void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr))
3478 assert(returnBB != nullptr);
3479 assert(comp->info.compCallUnmanaged);
3481 if (comp->opts.ShouldUsePInvokeHelpers())
3486 JITDUMP("======= Inserting PInvoke method epilog\n");
3488 // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls.
3489 assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) ||
3490 returnBB->endsWithTailCallOrJmp(comp));
3492 LIR::Range& returnBlockRange = LIR::AsRange(returnBB);
3494 GenTree* insertionPoint = returnBlockRange.LastNode();
3495 assert(insertionPoint == lastExpr);
3497 // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution
3498 // order so that it is guaranteed that there will be no further PInvokes after that point in the method.
3500 // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be
3501 // Op1, PME, GT_RETURN
3503 // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be
3504 // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL
3505 // After inserting PME execution order would be:
3506 // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL
3508 // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP
3509 // That is after PME, args for GT_JMP call will be setup.
3511 // TODO-Cleanup: setting GCState to 1 seems to be redundant as InsertPInvokeCallProlog will set it to zero before a
3512 // PInvoke call and InsertPInvokeCallEpilog() will set it back to 1 after the PInvoke. Though this is redundant,
3514 // Note that liveness is artificially extending the life of compLvFrameListRoot var if the method being compiled has
3515 // PInvokes. Deleting the below stmnt would cause an an assert in lsra.cpp::SetLastUses() since compLvFrameListRoot
3516 // will be live-in to a BBJ_RETURN block without any uses. Long term we need to fix liveness for x64 case to
3517 // properly extend the life of compLvFrameListRoot var.
3519 // Thread.offsetOfGcState = 0/1
3520 // That is [tcb + offsetOfGcState] = 1
3521 GenTree* storeGCState = SetGCState(1);
3522 returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeGCState));
3523 ContainCheckStoreIndir(storeGCState->AsIndir());
3525 // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do
3526 // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call.
3527 CLANG_FORMAT_COMMENT_ANCHOR;
3529 #ifdef _TARGET_64BIT_
3530 if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3531 #endif // _TARGET_64BIT_
3533 GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame);
3534 returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
3535 ContainCheckStoreIndir(frameUpd->AsIndir());
3539 //------------------------------------------------------------------------
3540 // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code.
3541 // It does all the necessary call-site setup of the InlinedCallFrame.
3544 // call - the call for which we are inserting the PInvoke prolog.
3549 void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
3551 JITDUMP("======= Inserting PInvoke call prolog\n");
3553 GenTree* insertBefore = call;
3554 if (call->gtCallType == CT_INDIRECT)
3557 insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
3561 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
3563 gtCallTypes callType = (gtCallTypes)call->gtCallType;
3565 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3567 if (comp->opts.ShouldUsePInvokeHelpers())
3569 // First argument is the address of the frame variable.
3570 GenTree* frameAddr =
3571 new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
3573 // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN
3574 GenTree* helperCall =
3575 comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, comp->gtNewArgList(frameAddr));
3577 comp->fgMorphTree(helperCall);
3578 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall));
3579 LowerNode(helperCall); // helper call is inserted before current node and should be lowered here.
3583 // Emit the following sequence:
3585 // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum
3586 // InlinedCallFrame.m_pCallSiteSP = SP // x86 only
3587 // InlinedCallFrame.m_pCallerReturnAddress = return address
3588 // GT_START_PREEEMPTC
3589 // Thread.gcState = 0
3590 // (non-stub) - update top Frame on TCB // 64-bit targets only
3592 // ----------------------------------------------------------------------------------
3593 // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it).
3594 // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings.
3596 GenTree* src = nullptr;
3598 if (callType == CT_INDIRECT)
3600 #if !defined(_TARGET_64BIT_)
3601 // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum.
3602 const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotNum() * TARGET_POINTER_SIZE;
3604 src = comp->gtNewIconNode(numStkArgBytes, TYP_INT);
3606 // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum.
3607 // If the stub parameter value is not needed, m_Datum will be initialized by the VM.
3608 if (comp->info.compPublishStubParam)
3610 src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL);
3612 #endif // !defined(_TARGET_64BIT_)
3616 assert(callType == CT_USER_FUNC);
3618 void* pEmbedMethodHandle = nullptr;
3619 CORINFO_METHOD_HANDLE embedMethodHandle =
3620 comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle);
3622 noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle));
3624 if (embedMethodHandle != nullptr)
3626 // InlinedCallFrame.callSiteTarget = methodHandle
3627 src = AddrGen(embedMethodHandle);
3631 // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle
3632 src = Ind(AddrGen(pEmbedMethodHandle));
3638 // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget.
3639 GenTreeLclFld* store =
3640 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3641 callFrameInfo.offsetOfCallTarget);
3643 store->gtFlags |= GTF_VAR_DEF;
3645 InsertTreeBeforeAndContainCheck(insertBefore, store);
3650 // ----------------------------------------------------------------------------------
3651 // InlinedCallFrame.m_pCallSiteSP = SP
3653 GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD)
3654 GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
3656 storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE);
3657 storeCallSiteSP->gtFlags |= GTF_VAR_DEF;
3659 InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP);
3663 // ----------------------------------------------------------------------------------
3664 // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call)
3666 GenTreeLclFld* storeLab =
3667 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3668 callFrameInfo.offsetOfReturnAddress);
3670 storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
3671 storeLab->gtFlags |= GTF_VAR_DEF;
3673 InsertTreeBeforeAndContainCheck(insertBefore, storeLab);
3675 // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method
3676 // contains PInvokes; on 64-bit targets this is necessary in non-stubs.
3677 CLANG_FORMAT_COMMENT_ANCHOR;
3679 #ifdef _TARGET_64BIT_
3680 if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3682 // Set the TCB's frame to be the one we just created.
3683 // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
3684 // has prepended it to the linked list to maintain the stack of Frames.
3686 // Stubs do this once per stub, not once per call.
3687 GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
3688 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd));
3689 ContainCheckStoreIndir(frameUpd->AsIndir());
3691 #endif // _TARGET_64BIT_
3693 // IMPORTANT **** This instruction must be the last real instruction ****
3694 // It changes the thread's state to Preemptive mode
3695 // ----------------------------------------------------------------------------------
3696 // [tcb + offsetOfGcState] = 0
3697 GenTree* storeGCState = SetGCState(0);
3698 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState));
3699 ContainCheckStoreIndir(storeGCState->AsIndir());
3701 // Indicate that codegen has switched this thread to preemptive GC.
3702 // This tree node doesn't generate any code, but impacts LSRA and gc reporting.
3703 // This tree node is simple so doesn't require sequencing.
3704 GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID);
3705 BlockRange().InsertBefore(insertBefore, preemptiveGCNode);
3708 //------------------------------------------------------------------------
3709 // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call.
3712 // call - the call for which we are inserting the PInvoke epilog.
3717 void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
3719 JITDUMP("======= Inserting PInvoke call epilog\n");
3721 if (comp->opts.ShouldUsePInvokeHelpers())
3723 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3725 // First argument is the address of the frame variable.
3726 GenTree* frameAddr =
3727 new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
3728 frameAddr->SetOperRaw(GT_LCL_VAR_ADDR);
3730 // Insert call to CORINFO_HELP_JIT_PINVOKE_END
3731 GenTreeCall* helperCall =
3732 comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewArgList(frameAddr));
3734 comp->fgMorphTree(helperCall);
3735 BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall));
3736 ContainCheckCallOperands(helperCall);
3741 GenTree* insertionPoint = call->gtNext;
3743 GenTree* tree = SetGCState(1);
3744 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3745 ContainCheckStoreIndir(tree->AsIndir());
3747 tree = CreateReturnTrapSeq();
3748 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3749 ContainCheckReturnTrap(tree->AsOp());
3751 // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi
3752 // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive.
3753 CLANG_FORMAT_COMMENT_ANCHOR;
3755 #ifdef _TARGET_64BIT_
3756 if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3758 tree = CreateFrameLinkUpdate(PopFrame);
3759 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3760 ContainCheckStoreIndir(tree->AsIndir());
3763 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
3765 // ----------------------------------------------------------------------------------
3766 // InlinedCallFrame.m_pCallerReturnAddress = nullptr
3768 GenTreeLclFld* const storeCallSiteTracker =
3769 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3770 callFrameInfo.offsetOfReturnAddress);
3772 GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
3774 storeCallSiteTracker->gtOp1 = constantZero;
3775 storeCallSiteTracker->gtFlags |= GTF_VAR_DEF;
3777 BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker);
3778 ContainCheckStoreLoc(storeCallSiteTracker);
3779 #endif // _TARGET_64BIT_
3782 //------------------------------------------------------------------------
3783 // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call
3786 // call - The call to lower.
3789 // The lowered call tree.
3791 GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
3793 // PInvoke lowering varies depending on the flags passed in by the EE. By default,
3794 // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified,
3795 // GC transitions are instead performed using helper calls. Examples of each case are given
3796 // below. Note that the data structure that is used to store information about a call frame
3797 // containing any P/Invoke calls is initialized in the method prolog (see
3798 // InsertPInvokeMethod{Prolog,Epilog} for details).
3800 // Inline transitions:
3801 // InlinedCallFrame inlinedCallFrame;
3805 // // Set up frame information
3806 // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum
3807 // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only
3808 // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the
3810 // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only)
3812 // // Switch the thread's GC mode to preemptive mode
3813 // thread->m_fPreemptiveGCDisabled = 0;
3815 // // Call the unmanaged method
3818 // // Switch the thread's GC mode back to cooperative mode
3819 // thread->m_fPreemptiveGCDisabled = 1;
3821 // // Rendezvous with a running collection if necessary
3822 // if (g_TrapReturningThreads)
3823 // RareDisablePreemptiveGC();
3825 // Transistions using helpers:
3827 // OpaqueFrame opaqueFrame;
3831 // // Call the JIT_PINVOKE_BEGIN helper
3832 // JIT_PINVOKE_BEGIN(&opaqueFrame);
3834 // // Call the unmanaged method
3837 // // Call the JIT_PINVOKE_END helper
3838 // JIT_PINVOKE_END(&opaqueFrame);
3840 // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target
3841 // platform. They may be changed in the future such that they preserve all register values.
3843 GenTree* result = nullptr;
3844 void* addr = nullptr;
3846 // assert we have seen one of these
3847 noway_assert(comp->info.compCallUnmanaged != 0);
3849 // All code generated by this function must not contain the randomly-inserted NOPs
3850 // that we insert to inhibit JIT spraying in partial trust scenarios.
3851 // The PINVOKE_PROLOG op signals this to the code generator/emitter.
3853 GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID);
3854 BlockRange().InsertBefore(call, prolog);
3856 InsertPInvokeCallProlog(call);
3858 if (call->gtCallType != CT_INDIRECT)
3860 noway_assert(call->gtCallType == CT_USER_FUNC);
3861 CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
3863 CORINFO_CONST_LOOKUP lookup;
3864 comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup);
3866 void* addr = lookup.addr;
3867 switch (lookup.accessType)
3870 if (!IsCallTargetInRange(addr))
3872 result = AddrGen(addr);
3876 // a direct call within range of hardware relative call instruction
3877 // stash the address for codegen
3878 call->gtDirectCallAddress = addr;
3879 #ifdef FEATURE_READYTORUN_COMPILER
3880 call->gtEntryPoint.addr = nullptr;
3881 call->gtEntryPoint.accessType = IAT_VALUE;
3887 result = Ind(AddrGen(addr));
3891 result = Ind(Ind(AddrGen(addr)));
3899 InsertPInvokeCallEpilog(call);
3904 // Expand the code necessary to calculate the control target.
3905 // Returns: the expression needed to calculate the control target
3906 // May insert embedded statements
3907 GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
3909 noway_assert(call->gtCallType == CT_USER_FUNC);
3911 // If this is a tail call via helper, thisPtr will be the third argument.
3913 regNumber thisPtrArgReg;
3915 #ifndef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
3916 if (call->IsTailCallViaHelper())
3919 thisPtrArgReg = REG_ARG_2;
3922 #endif // !_TARGET_X86_
3925 thisPtrArgReg = comp->codeGen->genGetThisArgReg(call);
3928 // get a reference to the thisPtr being passed
3929 fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, thisPtrArgNum);
3930 assert(argEntry->regNum == thisPtrArgReg);
3931 assert(argEntry->node->gtOper == GT_PUTARG_REG);
3932 GenTree* thisPtr = argEntry->node->gtOp.gtOp1;
3934 // If what we are passing as the thisptr is not already a local, make a new local to place it in
3935 // because we will be creating expressions based on it.
3937 if (thisPtr->IsLocal())
3939 lclNum = thisPtr->gtLclVarCommon.gtLclNum;
3943 // Split off the thisPtr and store to a temporary variable.
3944 if (vtableCallTemp == BAD_VAR_NUM)
3946 vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call"));
3949 LIR::Use thisPtrUse(BlockRange(), &(argEntry->node->gtOp.gtOp1), argEntry->node);
3950 ReplaceWithLclVar(thisPtrUse, vtableCallTemp);
3952 lclNum = vtableCallTemp;
3955 // Get hold of the vtable offset (note: this might be expensive)
3956 unsigned vtabOffsOfIndirection;
3957 unsigned vtabOffsAfterIndirection;
3959 comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
3960 &vtabOffsAfterIndirection, &isRelative);
3962 // If the thisPtr is a local field, then construct a local field type node
3964 if (thisPtr->isLclField())
3966 local = new (comp, GT_LCL_FLD)
3967 GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->gtLclOffs);
3971 local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum);
3974 // pointer to virtual table = [REG_CALL_THIS + offs]
3975 GenTree* result = Ind(Offset(local, VPTR_OFFS));
3977 // Get the appropriate vtable chunk
3978 if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
3982 // MethodTable offset is a relative pointer.
3984 // Additional temporary variable is used to store virtual table pointer.
3985 // Address of method is obtained by the next computations:
3987 // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
3988 // vtable-1st-level-indirection):
3991 // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
3992 // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
3995 // If relative pointers are also in second level indirection, additional temporary is used:
3997 // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection]
3998 // result = tmp2 + [tmp2]
4000 unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
4001 unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2"));
4003 GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
4005 GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet());
4006 tmpTree = Offset(tmpTree, vtabOffsOfIndirection);
4008 tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false);
4009 GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT);
4010 result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs);
4012 GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1);
4013 GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base);
4015 LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
4016 JITDUMP("result of obtaining pointer to virtual table:\n");
4018 BlockRange().InsertBefore(call, std::move(range));
4020 LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2);
4021 JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n");
4023 BlockRange().InsertAfter(lclvNodeStore, std::move(range2));
4025 result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
4027 comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
4031 // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
4032 result = Ind(Offset(result, vtabOffsOfIndirection));
4037 assert(!isRelative);
4040 // Load the function address
4041 // result = [reg+vtabOffs]
4044 result = Ind(Offset(result, vtabOffsAfterIndirection));
4050 // Lower stub dispatched virtual calls.
4051 GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
4053 assert(call->IsVirtualStub());
4055 // An x86 JIT which uses full stub dispatch must generate only
4056 // the following stub dispatch calls:
4058 // (1) isCallRelativeIndirect:
4059 // call dword ptr [rel32] ; FF 15 ---rel32----
4060 // (2) isCallRelative:
4061 // call abc ; E8 ---rel32----
4062 // (3) isCallRegisterIndirect:
4064 // call dword ptr [eax] ; FF 10
4066 // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
4067 // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
4069 GenTree* result = nullptr;
4071 #ifdef _TARGET_64BIT_
4072 // Non-tail calls: Jump Stubs are not taken into account by VM for mapping an AV into a NullRef
4073 // exception. Therefore, JIT needs to emit an explicit null check. Note that Jit64 too generates
4074 // an explicit null check.
4076 // Tail calls: fgMorphTailCall() materializes null check explicitly and hence no need to emit
4079 // Non-64-bit: No need to null check the this pointer - the dispatch code will deal with this.
4080 // The VM considers exceptions that occur in stubs on 64-bit to be not managed exceptions and
4081 // it would be difficult to change this in a way so that it affects only the right stubs.
4083 if (!call->IsTailCallViaHelper())
4085 call->gtFlags |= GTF_CALL_NULLCHECK;
4089 // This is code to set up an indirect call to a stub address computed
4090 // via dictionary lookup.
4091 if (call->gtCallType == CT_INDIRECT)
4093 // The importer decided we needed a stub call via a computed
4094 // stub dispatch address, i.e. an address which came from a dictionary lookup.
4095 // - The dictionary lookup produces an indirected address, suitable for call
4096 // via "call [VirtualStubParam.reg]"
4098 // This combination will only be generated for shared generic code and when
4099 // stub dispatch is active.
4101 // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg.
4102 // All we have to do here is add an indirection to generate the actual call target.
4104 GenTree* ind = Ind(call->gtCallAddr);
4105 BlockRange().InsertAfter(call->gtCallAddr, ind);
4106 call->gtCallAddr = ind;
4108 ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG;
4110 ContainCheckIndir(ind->AsIndir());
4114 // Direct stub call.
4115 // Get stub addr. This will return NULL if virtual call stubs are not active
4116 void* stubAddr = call->gtStubCallStubAddr;
4117 noway_assert(stubAddr != nullptr);
4119 // If not CT_INDIRECT, then it should always be relative indir call.
4120 // This is ensured by VM.
4121 noway_assert(call->IsVirtualStubRelativeIndir());
4123 // Direct stub calls, though the stubAddr itself may still need to be
4124 // accessed via an indirection.
4125 GenTree* addr = AddrGen(stubAddr);
4128 // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as
4129 // the target address, and we set a flag that it's a VSD call. The helper then
4130 // handles any necessary indirection.
4131 if (call->IsTailCallViaHelper())
4135 #endif // _TARGET_X86_
4137 if (result == nullptr)
4143 // TODO-Cleanup: start emitting random NOPS
4147 //------------------------------------------------------------------------
4148 // AddrModeCleanupHelper: Remove the nodes that are no longer used after an
4149 // addressing mode is constructed
4152 // addrMode - A pointer to a new GenTreeAddrMode
4153 // node - The node currently being considered for removal
4159 // 'addrMode' and 'node' must be contained in the current block
4161 void Lowering::AddrModeCleanupHelper(GenTreeAddrMode* addrMode, GenTree* node)
4163 if (node == addrMode->Base() || node == addrMode->Index())
4168 // TODO-LIR: change this to use the LIR mark bit and iterate instead of recursing
4169 node->VisitOperands([this, addrMode](GenTree* operand) -> GenTree::VisitResult {
4170 AddrModeCleanupHelper(addrMode, operand);
4171 return GenTree::VisitResult::Continue;
4174 BlockRange().Remove(node);
4177 //------------------------------------------------------------------------
4178 // Lowering::AreSourcesPossibleModifiedLocals:
4179 // Given two nodes which will be used in an addressing mode (base,
4180 // index), check to see if they are lclVar reads, and if so, walk
4181 // backwards from the use until both reads have been visited to
4182 // determine if they are potentially modified in that range.
4185 // addr - the node that uses the base and index nodes
4186 // base - the base node
4187 // index - the index node
4189 // Returns: true if either the base or index may be modified between the
4192 bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index)
4194 assert(addr != nullptr);
4196 unsigned markCount = 0;
4198 SideEffectSet baseSideEffects;
4199 if (base != nullptr)
4201 if (base->OperIsLocalRead())
4203 baseSideEffects.AddNode(comp, base);
4211 SideEffectSet indexSideEffects;
4212 if (index != nullptr)
4214 if (index->OperIsLocalRead())
4216 indexSideEffects.AddNode(comp, index);
4224 for (GenTree* cursor = addr;; cursor = cursor->gtPrev)
4226 assert(cursor != nullptr);
4233 if (cursor == index)
4238 if ((base == nullptr) && (index == nullptr))
4243 m_scratchSideEffects.Clear();
4244 m_scratchSideEffects.AddNode(comp, cursor);
4245 if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false))
4250 if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false))
4257 //------------------------------------------------------------------------
4258 // TryCreateAddrMode: recognize trees which can be implemented using an
4259 // addressing mode and transform them to a GT_LEA
4262 // use: the use of the address we want to transform
4263 // isIndir: true if this addressing mode is the child of an indir
4266 // The created LEA node or the original address node if an LEA could
4269 GenTree* Lowering::TryCreateAddrMode(LIR::Use&& use, bool isIndir)
4271 GenTree* addr = use.Def();
4272 GenTree* base = nullptr;
4273 GenTree* index = nullptr;
4278 // TODO-1stClassStructs: This logic is here to preserve prior behavior. Note that previously
4279 // block ops were not considered for addressing modes, but an add under it may have been.
4280 // This should be replaced with logic that more carefully determines when an addressing mode
4281 // would be beneficial for a block op.
4284 GenTree* indir = use.User();
4285 if (indir->TypeGet() == TYP_STRUCT)
4289 else if (varTypeIsStruct(indir))
4291 // We can have an indirection on the rhs of a block copy (it is the source
4292 // object). This is not a "regular" indirection.
4293 // (Note that the user check could be costly.)
4295 if (BlockRange().TryGetUse(indir, &indirUse) && indirUse.User()->OperIsIndir())
4301 isIndir = !indir->OperIsBlk();
4306 // Find out if an addressing mode can be constructed
4307 bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address
4309 &rev, // reverse ops
4311 &index, // index val
4312 #if SCALED_ADDR_MODES
4314 #endif // SCALED_ADDR_MODES
4315 &offset); // displacement
4324 // this is just a reg-const add
4325 if (index == nullptr)
4330 // this is just a reg-reg add
4331 if (scale == 1 && offset == 0)
4337 // make sure there are not any side effects between def of leaves and use
4338 if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index))
4340 JITDUMP("No addressing mode:\n ");
4345 GenTree* arrLength = nullptr;
4347 JITDUMP("Addressing mode:\n");
4348 JITDUMP(" Base\n ");
4350 if (index != nullptr)
4352 JITDUMP(" + Index * %u + %d\n ", scale, offset);
4357 JITDUMP(" + %d\n", offset);
4360 var_types addrModeType = addr->TypeGet();
4361 if (addrModeType == TYP_REF)
4363 addrModeType = TYP_BYREF;
4366 GenTreeAddrMode* addrMode = new (comp, GT_LEA) GenTreeAddrMode(addrModeType, base, index, scale, offset);
4368 // Neither the base nor the index should now be contained.
4369 if (base != nullptr)
4371 base->ClearContained();
4373 if (index != nullptr)
4375 index->ClearContained();
4377 addrMode->gtFlags |= (addr->gtFlags & GTF_IND_FLAGS);
4378 addrMode->gtFlags &= ~GTF_ALL_EFFECT; // LEAs are side-effect-free.
4380 JITDUMP("New addressing mode node:\n");
4384 BlockRange().InsertAfter(addr, addrMode);
4386 // Now we need to remove all the nodes subsumed by the addrMode
4387 AddrModeCleanupHelper(addrMode, addr);
4389 // Replace the original address node with the addrMode.
4390 use.ReplaceWith(comp, addrMode);
4395 //------------------------------------------------------------------------
4396 // LowerAdd: turn this add into a GT_LEA if that would be profitable
4399 // node - the node we care about
4402 // The next node to lower if we have transformed the ADD; nullptr otherwise.
4404 GenTree* Lowering::LowerAdd(GenTree* node)
4406 GenTree* next = node->gtNext;
4408 #ifndef _TARGET_ARMARCH_
4409 if (varTypeIsIntegralOrI(node))
4412 if (BlockRange().TryGetUse(node, &use))
4414 // If this is a child of an indir, let the parent handle it.
4415 // If there is a chain of adds, only look at the topmost one.
4416 GenTree* parent = use.User();
4417 if (!parent->OperIsIndir() && (parent->gtOper != GT_ADD))
4419 GenTree* addr = TryCreateAddrMode(std::move(use), false);
4422 return addr->gtNext;
4427 #endif // !_TARGET_ARMARCH_
4432 //------------------------------------------------------------------------
4433 // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node.
4436 // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered
4439 // Returns a boolean indicating whether the node was transformed.
4442 // - Transform UDIV/UMOD by power of 2 into RSZ/AND
4443 // - Transform UDIV by constant >= 2^(N-1) into GE
4444 // - Transform UDIV/UMOD by constant >= 3 into "magic division"
4447 bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
4449 assert(divMod->OperIs(GT_UDIV, GT_UMOD));
4451 #if defined(USE_HELPERS_FOR_INT_DIV)
4452 if (!varTypeIsIntegral(divMod->TypeGet()))
4454 assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls");
4456 assert(varTypeIsFloating(divMod->TypeGet()));
4457 #endif // USE_HELPERS_FOR_INT_DIV
4458 #if defined(_TARGET_ARM64_)
4459 assert(divMod->OperGet() != GT_UMOD);
4460 #endif // _TARGET_ARM64_
4462 GenTree* next = divMod->gtNext;
4463 GenTree* dividend = divMod->gtGetOp1();
4464 GenTree* divisor = divMod->gtGetOp2();
4466 #if !defined(_TARGET_64BIT_)
4467 if (dividend->OperIs(GT_LONG))
4473 if (!divisor->IsCnsIntOrI())
4478 if (dividend->IsCnsIntOrI())
4480 // We shouldn't see a divmod with constant operands here but if we do then it's likely
4481 // because optimizations are disabled or it's a case that's supposed to throw an exception.
4482 // Don't optimize this.
4486 const var_types type = divMod->TypeGet();
4487 assert((type == TYP_INT) || (type == TYP_I_IMPL));
4489 size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue());
4491 if (type == TYP_INT)
4493 // Clear up the upper 32 bits of the value, they may be set to 1 because constants
4494 // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
4495 divisorValue &= UINT32_MAX;
4498 if (divisorValue == 0)
4503 const bool isDiv = divMod->OperIs(GT_UDIV);
4505 if (isPow2(divisorValue))
4512 divisorValue = genLog2(divisorValue);
4520 divMod->SetOper(newOper);
4521 divisor->gtIntCon.SetIconValue(divisorValue);
4522 ContainCheckNode(divMod);
4527 // If the divisor is greater or equal than 2^(N - 1) then the result is 1
4528 // iff the dividend is greater or equal than the divisor.
4529 if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) ||
4530 ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2))))
4532 divMod->SetOper(GT_GE);
4533 divMod->gtFlags |= GTF_UNSIGNED;
4534 ContainCheckNode(divMod);
4539 // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
4540 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
4541 if (!comp->opts.MinOpts() && (divisorValue >= 3))
4547 if (type == TYP_INT)
4549 magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &add, &shift);
4553 #ifdef _TARGET_64BIT_
4554 magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &add, &shift);
4560 // Depending on the "add" flag returned by GetUnsignedMagicNumberForDivide we need to generate:
4561 // add == false (when divisor == 3 for example):
4562 // div = (dividend MULHI magic) RSZ shift
4563 // add == true (when divisor == 7 for example):
4564 // mulhi = dividend MULHI magic
4565 // div = (((dividend SUB mulhi) RSZ 1) ADD mulhi)) RSZ (shift - 1)
4566 const bool requiresAdjustment = add;
4567 const bool requiresDividendMultiuse = requiresAdjustment || !isDiv;
4568 const unsigned curBBWeight = m_block->getBBWeight(comp);
4570 if (requiresDividendMultiuse)
4572 LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod);
4573 dividend = ReplaceWithLclVar(dividendUse);
4576 // Insert a new GT_MULHI node before the existing GT_UDIV/GT_UMOD node.
4577 // The existing node will later be transformed into a GT_RSZ/GT_SUB that
4578 // computes the final result. This way don't need to find and change the use
4579 // of the existing node.
4580 GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, dividend, divisor);
4581 mulhi->gtFlags |= GTF_UNSIGNED;
4582 divisor->AsIntCon()->SetIconValue(magic);
4583 BlockRange().InsertBefore(divMod, mulhi);
4584 GenTree* firstNode = mulhi;
4586 if (requiresAdjustment)
4588 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4589 GenTree* sub = comp->gtNewOperNode(GT_SUB, type, dividend, mulhi);
4590 BlockRange().InsertBefore(divMod, dividend, sub);
4592 GenTree* one = comp->gtNewIconNode(1, TYP_INT);
4593 GenTree* rsz = comp->gtNewOperNode(GT_RSZ, type, sub, one);
4594 BlockRange().InsertBefore(divMod, one, rsz);
4596 LIR::Use mulhiUse(BlockRange(), &sub->gtOp.gtOp2, sub);
4597 mulhi = ReplaceWithLclVar(mulhiUse);
4599 mulhi = comp->gtNewLclvNode(mulhi->AsLclVar()->GetLclNum(), mulhi->TypeGet());
4600 GenTree* add = comp->gtNewOperNode(GT_ADD, type, rsz, mulhi);
4601 BlockRange().InsertBefore(divMod, mulhi, add);
4607 GenTree* shiftBy = comp->gtNewIconNode(shift, TYP_INT);
4608 BlockRange().InsertBefore(divMod, shiftBy);
4612 divMod->SetOper(GT_RSZ);
4613 divMod->gtOp1 = mulhi;
4614 divMod->gtOp2 = shiftBy;
4618 GenTree* div = comp->gtNewOperNode(GT_RSZ, type, mulhi, shiftBy);
4620 // divisor UMOD dividend = dividend SUB (div MUL divisor)
4621 GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
4622 GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
4623 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4625 divMod->SetOper(GT_SUB);
4626 divMod->gtOp1 = dividend;
4627 divMod->gtOp2 = mul;
4629 BlockRange().InsertBefore(divMod, div, divisor, mul, dividend);
4631 ContainCheckRange(firstNode, divMod);
4639 // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2
4640 // const divisor into equivalent but faster sequences.
4643 // node - pointer to the DIV or MOD node
4646 // nullptr if no transformation is done, or the next node in the transformed node sequence that
4647 // needs to be lowered.
4649 GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node)
4651 assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
4652 GenTree* divMod = node;
4653 GenTree* dividend = divMod->gtGetOp1();
4654 GenTree* divisor = divMod->gtGetOp2();
4656 const var_types type = divMod->TypeGet();
4657 assert((type == TYP_INT) || (type == TYP_LONG));
4659 #if defined(USE_HELPERS_FOR_INT_DIV)
4660 assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls");
4661 #endif // USE_HELPERS_FOR_INT_DIV
4662 #if defined(_TARGET_ARM64_)
4663 assert(node->OperGet() != GT_MOD);
4664 #endif // _TARGET_ARM64_
4666 if (!divisor->IsCnsIntOrI())
4668 return nullptr; // no transformations to make
4671 if (dividend->IsCnsIntOrI())
4673 // We shouldn't see a divmod with constant operands here but if we do then it's likely
4674 // because optimizations are disabled or it's a case that's supposed to throw an exception.
4675 // Don't optimize this.
4679 ssize_t divisorValue = divisor->gtIntCon.IconValue();
4681 if (divisorValue == -1 || divisorValue == 0)
4683 // x / 0 and x % 0 can't be optimized because they are required to throw an exception.
4685 // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
4687 // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is
4688 // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this
4689 // case so optimizing this case would break C# code.
4691 // A runtime check could be used to handle this case but it's probably too rare to matter.
4695 bool isDiv = divMod->OperGet() == GT_DIV;
4699 if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN))
4701 // If the divisor is the minimum representable integer value then we can use a compare,
4702 // the result is 1 iff the dividend equals divisor.
4703 divMod->SetOper(GT_EQ);
4708 size_t absDivisorValue =
4709 (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
4711 if (!isPow2(absDivisorValue))
4713 if (comp->opts.MinOpts())
4718 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
4722 if (type == TYP_INT)
4724 magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift);
4728 #ifdef _TARGET_64BIT_
4729 magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift);
4730 #else // !_TARGET_64BIT_
4732 #endif // !_TARGET_64BIT_
4735 divisor->gtIntConCommon.SetIconValue(magic);
4737 // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node.
4738 // The existing node will later be transformed into a GT_ADD/GT_SUB that
4739 // computes the final result. This way don't need to find and change the
4740 // use of the existing node.
4741 GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend);
4742 BlockRange().InsertBefore(divMod, mulhi);
4744 // mulhi was the easy part. Now we need to generate different code depending
4745 // on the divisor value:
4747 // div = signbit(mulhi) + mulhi
4749 // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
4751 // mulhi += dividend ; requires add adjust
4752 // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust
4754 // mulhi -= dividend ; requires sub adjust
4755 // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
4756 bool requiresAddSubAdjust = signum(divisorValue) != signum(magic);
4757 bool requiresShiftAdjust = shift != 0;
4758 bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv;
4759 unsigned curBBWeight = comp->compCurBB->getBBWeight(comp);
4761 if (requiresDividendMultiuse)
4763 LIR::Use dividendUse(BlockRange(), &mulhi->gtOp.gtOp2, mulhi);
4764 dividend = ReplaceWithLclVar(dividendUse);
4769 if (requiresAddSubAdjust)
4771 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4772 adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend);
4773 BlockRange().InsertBefore(divMod, dividend, adjusted);
4780 GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type);
4781 GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy);
4782 BlockRange().InsertBefore(divMod, shiftBy, signBit);
4784 LIR::Use adjustedUse(BlockRange(), &signBit->gtOp.gtOp1, signBit);
4785 adjusted = ReplaceWithLclVar(adjustedUse);
4786 adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet());
4787 BlockRange().InsertBefore(divMod, adjusted);
4789 if (requiresShiftAdjust)
4791 shiftBy = comp->gtNewIconNode(shift, TYP_INT);
4792 adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy);
4793 BlockRange().InsertBefore(divMod, shiftBy, adjusted);
4798 divMod->SetOperRaw(GT_ADD);
4799 divMod->gtOp.gtOp1 = adjusted;
4800 divMod->gtOp.gtOp2 = signBit;
4804 GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit);
4806 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4808 // divisor % dividend = dividend - divisor x div
4809 GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
4810 GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
4811 BlockRange().InsertBefore(divMod, dividend, div, divisor, mul);
4813 divMod->SetOperRaw(GT_SUB);
4814 divMod->gtOp.gtOp1 = dividend;
4815 divMod->gtOp.gtOp2 = mul;
4819 #elif defined(_TARGET_ARM_)
4820 // Currently there's no GT_MULHI for ARM32
4823 #error Unsupported or unset target architecture
4827 // We're committed to the conversion now. Go find the use if any.
4829 if (!BlockRange().TryGetUse(node, &use))
4834 // We need to use the dividend node multiple times so its value needs to be
4835 // computed once and stored in a temp variable.
4837 unsigned curBBWeight = comp->compCurBB->getBBWeight(comp);
4839 LIR::Use opDividend(BlockRange(), &divMod->gtOp.gtOp1, divMod);
4840 dividend = ReplaceWithLclVar(opDividend);
4842 GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
4844 if (absDivisorValue == 2)
4846 // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1.
4847 // We can get the same result by using GT_RSZ instead of GT_RSH.
4848 adjustment->SetOper(GT_RSZ);
4852 adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type));
4855 GenTree* adjustedDividend =
4856 comp->gtNewOperNode(GT_ADD, type, adjustment,
4857 comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()));
4863 // perform the division by right shifting the adjusted dividend
4864 divisor->gtIntCon.SetIconValue(genLog2(absDivisorValue));
4866 newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor);
4867 ContainCheckShiftRotate(newDivMod->AsOp());
4869 if (divisorValue < 0)
4871 // negate the result if the divisor is negative
4872 newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod);
4873 ContainCheckNode(newDivMod);
4878 // divisor % dividend = dividend - divisor x (dividend / divisor)
4879 // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor)
4880 // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1)
4881 divisor->gtIntCon.SetIconValue(~(absDivisorValue - 1));
4883 newDivMod = comp->gtNewOperNode(GT_SUB, type,
4884 comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()),
4885 comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor));
4888 // Remove the divisor and dividend nodes from the linear order,
4889 // since we have reused them and will resequence the tree
4890 BlockRange().Remove(divisor);
4891 BlockRange().Remove(dividend);
4893 // linearize and insert the new tree before the original divMod node
4894 InsertTreeBeforeAndContainCheck(divMod, newDivMod);
4895 BlockRange().Remove(divMod);
4897 // replace the original divmod node with the new divmod tree
4898 use.ReplaceWith(comp, newDivMod);
4900 return newDivMod->gtNext;
4902 //------------------------------------------------------------------------
4903 // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
4904 // const divisor into equivalent but faster sequences.
4907 // node - the DIV or MOD node
4910 // The next node to lower.
4912 GenTree* Lowering::LowerSignedDivOrMod(GenTree* node)
4914 assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
4915 GenTree* next = node->gtNext;
4916 GenTree* divMod = node;
4917 GenTree* dividend = divMod->gtGetOp1();
4918 GenTree* divisor = divMod->gtGetOp2();
4920 if (varTypeIsIntegral(node->TypeGet()))
4922 // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node.
4923 GenTree* newNode = LowerConstIntDivOrMod(node);
4924 if (newNode != nullptr)
4929 ContainCheckDivOrMod(node->AsOp());
4934 //------------------------------------------------------------------------
4935 // LowerShift: Lower shift nodes
4938 // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ)
4941 // Remove unnecessary shift count masking, xarch shift instructions
4942 // mask the shift count to 5 bits (or 6 bits for 64 bit operations).
4944 void Lowering::LowerShift(GenTreeOp* shift)
4946 assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ));
4949 #ifdef _TARGET_64BIT_
4950 if (varTypeIsLong(shift->TypeGet()))
4955 assert(!varTypeIsLong(shift->TypeGet()));
4958 for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1())
4960 GenTree* maskOp = andOp->gtGetOp2();
4962 if (!maskOp->IsCnsIntOrI())
4967 if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask)
4972 shift->gtOp2 = andOp->gtGetOp1();
4973 BlockRange().Remove(andOp);
4974 BlockRange().Remove(maskOp);
4975 // The parent was replaced, clear contain and regOpt flag.
4976 shift->gtOp2->ClearContained();
4978 ContainCheckShiftRotate(shift);
4981 void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node)
4984 if (node->TypeGet() == TYP_SIMD12)
4987 // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
4988 // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
4989 // reading and writing purposes.
4992 // RyuJit backend is making another implicit assumption that Vector3 type args when passed in
4993 // registers or on stack, the upper most 4-bytes will be zero.
4995 // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
4996 // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
4999 // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
5000 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
5001 // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
5002 // there is no need to clear upper 4-bytes of Vector3 type args.
5004 // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
5005 // Vector3 return values are returned two return registers and Caller assembles them into a
5006 // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
5007 // type args in prolog and Vector3 type return value of a call
5009 // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments
5010 // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed
5011 // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear
5014 unsigned varNum = node->AsLclVarCommon()->GetLclNum();
5015 LclVarDsc* varDsc = &comp->lvaTable[varNum];
5017 if (comp->lvaMapSimd12ToSimd16(varDsc))
5019 JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n");
5021 JITDUMP("============");
5023 node->gtType = TYP_SIMD16;
5026 #endif // FEATURE_SIMD
5029 //------------------------------------------------------------------------
5030 // LowerArrElem: Lower a GT_ARR_ELEM node
5033 // node - the GT_ARR_ELEM node to lower.
5036 // The next node to lower.
5039 // pTree points to a pointer to a GT_ARR_ELEM node.
5042 // This performs the following lowering. We start with a node of the form:
5048 // First, we create temps for arrObj if it is not already a lclVar, and for any of the index
5049 // expressions that have side-effects.
5050 // We then transform the tree into:
5051 // <offset is null - no accumulated offset for the first index>
5054 // /--* ArrIndex[i, ]
5056 // /--| arrOffs[i, ]
5059 // +--* ArrIndex[*,j]
5061 // /--| arrOffs[*,j]
5062 // +--* lclVar NewTemp
5063 // /--* lea (scale = element size, offset = offset of first element)
5065 // The new stmtExpr may be omitted if the <arrObj> is a lclVar.
5066 // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for
5067 // the statement containing the original arrMD.
5068 // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second
5069 // reference to NewTemp), because that provides more accurate lifetimes.
5070 // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively.
5072 GenTree* Lowering::LowerArrElem(GenTree* node)
5074 // This will assert if we don't have an ArrElem node
5075 GenTreeArrElem* arrElem = node->AsArrElem();
5076 const unsigned char rank = arrElem->gtArrElem.gtArrRank;
5077 const unsigned blockWeight = m_block->getBBWeight(comp);
5079 JITDUMP("Lowering ArrElem\n");
5080 JITDUMP("============\n");
5081 DISPTREERANGE(BlockRange(), arrElem);
5084 assert(arrElem->gtArrObj->TypeGet() == TYP_REF);
5086 // We need to have the array object in a lclVar.
5087 if (!arrElem->gtArrObj->IsLocal())
5089 LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem);
5090 ReplaceWithLclVar(arrObjUse);
5093 GenTree* arrObjNode = arrElem->gtArrObj;
5094 assert(arrObjNode->IsLocal());
5096 LclVarDsc* const varDsc = &comp->lvaTable[arrElem->gtArrObj->AsLclVarCommon()->gtLclNum];
5098 GenTree* insertionPoint = arrElem;
5100 // The first ArrOffs node will have 0 for the offset of the previous dimension.
5101 GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
5102 BlockRange().InsertBefore(insertionPoint, prevArrOffs);
5103 GenTree* nextToLower = prevArrOffs;
5105 for (unsigned char dim = 0; dim < rank; dim++)
5107 GenTree* indexNode = arrElem->gtArrElem.gtArrInds[dim];
5109 // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones.
5110 GenTree* idxArrObjNode;
5113 idxArrObjNode = arrObjNode;
5117 idxArrObjNode = comp->gtClone(arrObjNode);
5118 BlockRange().InsertBefore(insertionPoint, idxArrObjNode);
5121 // Next comes the GT_ARR_INDEX node.
5122 GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX)
5123 GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElem.gtArrElemType);
5124 arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT);
5125 BlockRange().InsertBefore(insertionPoint, arrMDIdx);
5127 GenTree* offsArrObjNode = comp->gtClone(arrObjNode);
5128 BlockRange().InsertBefore(insertionPoint, offsArrObjNode);
5130 GenTreeArrOffs* arrOffs =
5131 new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank,
5132 arrElem->gtArrElem.gtArrElemType);
5133 arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
5134 BlockRange().InsertBefore(insertionPoint, arrOffs);
5136 prevArrOffs = arrOffs;
5139 // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the
5141 unsigned scale = arrElem->gtArrElem.gtArrElemSize;
5142 unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrElem.gtArrElemType, arrElem->gtArrElem.gtArrRank);
5144 GenTree* leaIndexNode = prevArrOffs;
5145 if (!jitIsScaleIndexMul(scale))
5147 // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are
5149 GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
5150 GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
5151 BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode);
5152 leaIndexNode = mulNode;
5156 GenTree* leaBase = comp->gtClone(arrObjNode);
5157 BlockRange().InsertBefore(insertionPoint, leaBase);
5159 GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
5161 BlockRange().InsertBefore(insertionPoint, leaNode);
5163 LIR::Use arrElemUse;
5164 if (BlockRange().TryGetUse(arrElem, &arrElemUse))
5166 arrElemUse.ReplaceWith(comp, leaNode);
5170 leaNode->SetUnusedValue();
5173 BlockRange().Remove(arrElem);
5175 JITDUMP("Results of lowering ArrElem:\n");
5176 DISPTREERANGE(BlockRange(), leaNode);
5182 void Lowering::DoPhase()
5184 // If we have any PInvoke calls, insert the one-time prolog code. We'll inserted the epilog code in the
5185 // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke
5186 // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination.
5187 if (comp->info.compCallUnmanaged)
5189 InsertPInvokeMethodProlog();
5192 #if !defined(_TARGET_64BIT_)
5193 DecomposeLongs decomp(comp); // Initialize the long decomposition class.
5194 if (comp->compLongUsed)
5196 decomp.PrepareForDecomposition();
5198 #endif // !defined(_TARGET_64BIT_)
5200 for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
5202 /* Make the block publicly available */
5203 comp->compCurBB = block;
5205 #if !defined(_TARGET_64BIT_)
5206 if (comp->compLongUsed)
5208 decomp.DecomposeBlock(block);
5210 #endif //!_TARGET_64BIT_
5216 JITDUMP("Lower has completed modifying nodes.\n");
5219 comp->fgDispBasicBlocks(true);
5223 // Recompute local var ref counts before potentially sorting for liveness.
5224 // Note this does minimal work in cases where we are not going to sort.
5225 const bool isRecompute = true;
5226 const bool setSlotNumbers = false;
5227 comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
5229 comp->fgLocalVarLiveness();
5230 // local var liveness can delete code, which may create empty blocks
5231 if (comp->opts.OptimizationEnabled())
5233 comp->optLoopsMarked = false;
5234 bool modified = comp->fgUpdateFlowGraph();
5237 JITDUMP("had to run another liveness pass:\n");
5238 comp->fgLocalVarLiveness();
5242 // Recompute local var ref counts again after liveness to reflect
5243 // impact of any dead code removal. Note this may leave us with
5244 // tracked vars that have zero refs.
5245 comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
5248 JITDUMP("Liveness pass finished after lowering, IR:\n");
5251 comp->fgDispBasicBlocks(true);
5254 for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
5256 assert(LIR::AsRange(block).CheckLIR(comp, true));
5263 //------------------------------------------------------------------------
5264 // Lowering::CheckCallArg: check that a call argument is in an expected
5265 // form after lowering.
5268 // arg - the argument to check.
5270 void Lowering::CheckCallArg(GenTree* arg)
5272 if (!arg->IsValue() && !arg->OperIsPutArgStk())
5274 assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
5275 arg->OperIsCopyBlkOp());
5279 switch (arg->OperGet())
5283 GenTreeFieldList* list = arg->AsFieldList();
5284 assert(list->isContained());
5285 assert(list->IsFieldListHead());
5287 for (; list != nullptr; list = list->Rest())
5289 assert(list->Current()->OperIsPutArg());
5295 assert(arg->OperIsPutArg());
5300 //------------------------------------------------------------------------
5301 // Lowering::CheckCall: check that a call is in an expected form after
5302 // lowering. Currently this amounts to checking its
5303 // arguments, but could be expanded to verify more
5304 // properties in the future.
5307 // call - the call to check.
5309 void Lowering::CheckCall(GenTreeCall* call)
5311 if (call->gtCallObjp != nullptr)
5313 CheckCallArg(call->gtCallObjp);
5316 for (GenTreeArgList* args = call->gtCallArgs; args != nullptr; args = args->Rest())
5318 CheckCallArg(args->Current());
5321 for (GenTreeArgList* args = call->gtCallLateArgs; args != nullptr; args = args->Rest())
5323 CheckCallArg(args->Current());
5327 //------------------------------------------------------------------------
5328 // Lowering::CheckNode: check that an LIR node is in an expected form
5332 // compiler - the compiler context.
5333 // node - the node to check.
5335 void Lowering::CheckNode(Compiler* compiler, GenTree* node)
5337 switch (node->OperGet())
5340 CheckCall(node->AsCall());
5345 assert(node->TypeGet() != TYP_SIMD12);
5347 #ifdef _TARGET_64BIT_
5349 case GT_STORE_LCL_VAR:
5351 unsigned lclNum = node->AsLclVarCommon()->GetLclNum();
5352 LclVarDsc* lclVar = &compiler->lvaTable[lclNum];
5353 assert(node->TypeGet() != TYP_SIMD12 || compiler->lvaIsFieldOfDependentlyPromotedStruct(lclVar));
5356 #endif // _TARGET_64BIT_
5364 //------------------------------------------------------------------------
5365 // Lowering::CheckBlock: check that the contents of an LIR block are in an
5366 // expected form after lowering.
5369 // compiler - the compiler context.
5370 // block - the block to check.
5372 bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block)
5374 assert(block->isEmpty() || block->IsLIR());
5376 LIR::Range& blockRange = LIR::AsRange(block);
5377 for (GenTree* node : blockRange)
5379 CheckNode(compiler, node);
5382 assert(blockRange.CheckLIR(compiler, true));
5387 void Lowering::LowerBlock(BasicBlock* block)
5389 assert(block == comp->compCurBB); // compCurBB must already be set.
5390 assert(block->isEmpty() || block->IsLIR());
5394 // NOTE: some of the lowering methods insert calls before the node being
5395 // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In
5396 // general, any code that is inserted before the current node should be
5397 // "pre-lowered" as they won't be subject to further processing.
5398 // Lowering::CheckBlock() runs some extra checks on call arguments in
5399 // order to help catch unlowered nodes.
5401 GenTree* node = BlockRange().FirstNode();
5402 while (node != nullptr)
5404 node = LowerNode(node);
5407 assert(CheckBlock(comp, block));
5410 /** Verifies if both of these trees represent the same indirection.
5411 * Used by Lower to annotate if CodeGen generate an instruction of the
5412 * form *addrMode BinOp= expr
5414 * Preconditions: both trees are children of GT_INDs and their underlying children
5415 * have the same gtOper.
5417 * This is a first iteration to actually recognize trees that can be code-generated
5418 * as a single read-modify-write instruction on AMD64/x86. For now
5419 * this method only supports the recognition of simple addressing modes (through GT_LEA)
5420 * or local var indirections. Local fields, array access and other more complex nodes are
5421 * not yet supported.
5423 * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize
5424 * arbitrary complex trees and support much more addressing patterns.
5426 bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd)
5428 assert(candidate->OperGet() == GT_IND);
5429 assert(storeInd->OperGet() == GT_STOREIND);
5431 // We should check the size of the indirections. If they are
5432 // different, say because of a cast, then we can't call them equivalent. Doing so could cause us
5434 // Signed-ness difference is okay and expected since a store indirection must always
5435 // be signed based on the CIL spec, but a load could be unsigned.
5436 if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType))
5441 GenTree* pTreeA = candidate->gtGetOp1();
5442 GenTree* pTreeB = storeInd->gtGetOp1();
5444 // This method will be called by codegen (as well as during lowering).
5445 // After register allocation, the sources may have been spilled and reloaded
5446 // to a different register, indicated by an inserted GT_RELOAD node.
5447 pTreeA = pTreeA->gtSkipReloadOrCopy();
5448 pTreeB = pTreeB->gtSkipReloadOrCopy();
5452 if (pTreeA->OperGet() != pTreeB->OperGet())
5457 oper = pTreeA->OperGet();
5461 case GT_LCL_VAR_ADDR:
5462 case GT_CLS_VAR_ADDR:
5464 return NodesAreEquivalentLeaves(pTreeA, pTreeB);
5468 GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode();
5469 GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode();
5470 return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) &&
5471 NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
5472 (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset());
5475 // We don't handle anything that is not either a constant,
5476 // a local var or LEA.
5481 /** Test whether the two given nodes are the same leaves.
5482 * Right now, only constant integers and local variables are supported
5484 bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
5486 if (tree1 == nullptr && tree2 == nullptr)
5491 // both null, they are equivalent, otherwise if either is null not equivalent
5492 if (tree1 == nullptr || tree2 == nullptr)
5497 tree1 = tree1->gtSkipReloadOrCopy();
5498 tree2 = tree2->gtSkipReloadOrCopy();
5500 if (tree1->TypeGet() != tree2->TypeGet())
5505 if (tree1->OperGet() != tree2->OperGet())
5510 if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf())
5515 switch (tree1->OperGet())
5518 return tree1->gtIntCon.gtIconVal == tree2->gtIntCon.gtIconVal &&
5519 tree1->IsIconHandle() == tree2->IsIconHandle();
5521 case GT_LCL_VAR_ADDR:
5522 return tree1->gtLclVarCommon.gtLclNum == tree2->gtLclVarCommon.gtLclNum;
5523 case GT_CLS_VAR_ADDR:
5524 return tree1->gtClsVar.gtClsVarHnd == tree2->gtClsVar.gtClsVarHnd;
5530 //------------------------------------------------------------------------
5531 // Containment Analysis
5532 //------------------------------------------------------------------------
5533 void Lowering::ContainCheckNode(GenTree* node)
5535 switch (node->gtOper)
5537 case GT_STORE_LCL_VAR:
5538 case GT_STORE_LCL_FLD:
5539 ContainCheckStoreLoc(node->AsLclVarCommon());
5552 ContainCheckCompare(node->AsOp());
5556 ContainCheckJTrue(node->AsOp());
5561 #if !defined(_TARGET_64BIT_)
5570 ContainCheckBinary(node->AsOp());
5573 #if defined(_TARGET_X86_)
5578 ContainCheckMul(node->AsOp());
5584 ContainCheckDivOrMod(node->AsOp());
5591 #ifndef _TARGET_64BIT_
5595 ContainCheckShiftRotate(node->AsOp());
5598 ContainCheckArrOffset(node->AsArrOffs());
5601 ContainCheckLclHeap(node->AsOp());
5604 ContainCheckRet(node->AsOp());
5607 ContainCheckReturnTrap(node->AsOp());
5610 ContainCheckStoreIndir(node->AsIndir());
5612 ContainCheckIndir(node->AsIndir());
5616 #if FEATURE_ARG_SPLIT
5617 case GT_PUTARG_SPLIT:
5618 #endif // FEATURE_ARG_SPLIT
5619 // The regNum must have been set by the lowering of the call.
5620 assert(node->gtRegNum != REG_NA);
5622 #ifdef _TARGET_XARCH_
5624 ContainCheckIntrinsic(node->AsOp());
5626 #endif // _TARGET_XARCH_
5629 ContainCheckSIMD(node->AsSIMD());
5631 #endif // FEATURE_SIMD
5632 #ifdef FEATURE_HW_INTRINSICS
5633 case GT_HWIntrinsic:
5634 ContainCheckHWIntrinsic(node->AsHWIntrinsic());
5636 #endif // FEATURE_HW_INTRINSICS
5642 //------------------------------------------------------------------------
5643 // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained.
5646 // node - pointer to the GT_RETURNTRAP node
5648 void Lowering::ContainCheckReturnTrap(GenTreeOp* node)
5650 #ifdef _TARGET_XARCH_
5651 assert(node->OperIs(GT_RETURNTRAP));
5652 // This just turns into a compare of its child with an int + a conditional call
5653 if (node->gtOp1->isIndir())
5655 MakeSrcContained(node, node->gtOp1);
5657 #endif // _TARGET_XARCH_
5660 //------------------------------------------------------------------------
5661 // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained.
5664 // node - pointer to the GT_ARR_OFFSET node
5666 void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node)
5668 assert(node->OperIs(GT_ARR_OFFSET));
5669 // we don't want to generate code for this
5670 if (node->gtOffset->IsIntegralConst(0))
5672 MakeSrcContained(node, node->gtArrOffs.gtOffset);
5676 //------------------------------------------------------------------------
5677 // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained.
5680 // node - pointer to the node
5682 void Lowering::ContainCheckLclHeap(GenTreeOp* node)
5684 assert(node->OperIs(GT_LCLHEAP));
5685 GenTree* size = node->gtOp.gtOp1;
5686 if (size->IsCnsIntOrI())
5688 MakeSrcContained(node, size);
5692 //------------------------------------------------------------------------
5693 // ContainCheckRet: determine whether the source of a node should be contained.
5696 // node - pointer to the node
5698 void Lowering::ContainCheckRet(GenTreeOp* ret)
5700 assert(ret->OperIs(GT_RETURN));
5702 #if !defined(_TARGET_64BIT_)
5703 if (ret->TypeGet() == TYP_LONG)
5705 GenTree* op1 = ret->gtGetOp1();
5706 noway_assert(op1->OperGet() == GT_LONG);
5707 MakeSrcContained(ret, op1);
5709 #endif // !defined(_TARGET_64BIT_)
5710 #if FEATURE_MULTIREG_RET
5711 if (varTypeIsStruct(ret))
5713 GenTree* op1 = ret->gtGetOp1();
5714 // op1 must be either a lclvar or a multi-reg returning call
5715 if (op1->OperGet() == GT_LCL_VAR)
5717 GenTreeLclVarCommon* lclVarCommon = op1->AsLclVarCommon();
5718 LclVarDsc* varDsc = &(comp->lvaTable[lclVarCommon->gtLclNum]);
5719 assert(varDsc->lvIsMultiRegRet);
5721 // Mark var as contained if not enregistrable.
5722 if (!varTypeIsEnregisterableStruct(op1))
5724 MakeSrcContained(ret, op1);
5728 #endif // FEATURE_MULTIREG_RET
5731 //------------------------------------------------------------------------
5732 // ContainCheckJTrue: determine whether the source of a JTRUE should be contained.
5735 // node - pointer to the node
5737 void Lowering::ContainCheckJTrue(GenTreeOp* node)
5739 // The compare does not need to be generated into a register.
5740 GenTree* cmp = node->gtGetOp1();
5741 cmp->gtType = TYP_VOID;
5742 cmp->gtFlags |= GTF_SET_FLAGS;