1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 XX Postconditions (for the nodes currently handled): XX
13 XX - All operands requiring a register are explicit in the graph XX
15 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
16 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
26 #if !defined(_TARGET_64BIT_)
27 #include "decomposelongs.h"
28 #endif // !defined(_TARGET_64BIT_)
30 //------------------------------------------------------------------------
31 // MakeSrcContained: Make "childNode" a contained node
34 // parentNode - is a non-leaf node that can contain its 'childNode'
35 // childNode - is an op that will now be contained by its parent.
38 // If 'childNode' it has any existing sources, they will now be sources for the parent.
40 void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode)
42 assert(!parentNode->OperIsLeaf());
43 assert(childNode->canBeContained());
44 childNode->SetContained();
45 assert(childNode->isContained());
48 //------------------------------------------------------------------------
49 // CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate
50 // and, if so, makes it contained.
53 // parentNode - is any non-leaf node
54 // childNode - is an child op of 'parentNode'
57 // true if we are able to make childNode a contained immediate
59 bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode)
61 assert(!parentNode->OperIsLeaf());
62 // If childNode is a containable immediate
63 if (IsContainableImmed(parentNode, childNode))
65 // then make it contained within the parentNode
66 MakeSrcContained(parentNode, childNode);
72 //------------------------------------------------------------------------
73 // IsSafeToContainMem: Checks for conflicts between childNode and parentNode,
74 // and returns 'true' iff memory operand childNode can be contained in parentNode.
77 // parentNode - any non-leaf node
78 // childNode - some node that is an input to `parentNode`
81 // true if it is safe to make childNode a contained memory operand.
83 bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode)
85 m_scratchSideEffects.Clear();
86 m_scratchSideEffects.AddNode(comp, childNode);
88 for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext)
90 const bool strict = true;
91 if (m_scratchSideEffects.InterferesWith(comp, node, strict))
100 //------------------------------------------------------------------------
102 // This is the main entry point for Lowering.
103 GenTree* Lowering::LowerNode(GenTree* node)
105 assert(node != nullptr);
106 switch (node->gtOper)
109 TryCreateAddrMode(LIR::Use(BlockRange(), &node->gtOp.gtOp1, node), true);
110 ContainCheckIndir(node->AsIndir());
114 TryCreateAddrMode(LIR::Use(BlockRange(), &node->gtOp.gtOp1, node), true);
115 if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(node))
117 LowerStoreIndir(node->AsIndir());
123 GenTree* afterTransform = LowerAdd(node);
124 if (afterTransform != nullptr)
126 return afterTransform;
131 #if !defined(_TARGET_64BIT_)
141 ContainCheckBinary(node->AsOp());
146 #if defined(_TARGET_X86_)
149 ContainCheckMul(node->AsOp());
154 if (!LowerUnsignedDivOrMod(node->AsOp()))
156 ContainCheckDivOrMod(node->AsOp());
162 return LowerSignedDivOrMod(node);
165 return LowerSwitch(node);
180 return LowerCompare(node);
183 return LowerJTrue(node->AsOp());
186 LowerJmpMethod(node);
194 ContainCheckReturnTrap(node->AsOp());
201 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
202 case GT_ARR_BOUNDS_CHECK:
205 #endif // FEATURE_SIMD
206 #ifdef FEATURE_HW_INTRINSICS
207 case GT_HW_INTRINSIC_CHK:
208 #endif // FEATURE_HW_INTRINSICS
209 ContainCheckBoundsChk(node->AsBoundsChk());
211 #endif // _TARGET_XARCH_
213 return LowerArrElem(node);
216 ContainCheckArrOffset(node->AsArrOffs());
224 #ifndef _TARGET_64BIT_
227 ContainCheckShiftRotate(node->AsOp());
229 #endif // !_TARGET_64BIT_
234 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
235 LowerShift(node->AsOp());
237 ContainCheckShiftRotate(node->AsOp());
243 case GT_STORE_DYN_BLK:
245 GenTreeBlk* blkNode = node->AsBlk();
246 TryCreateAddrMode(LIR::Use(BlockRange(), &blkNode->Addr(), blkNode), false);
247 LowerBlockStore(blkNode);
252 ContainCheckLclHeap(node->AsOp());
255 #ifdef _TARGET_XARCH_
257 ContainCheckIntrinsic(node->AsOp());
259 #endif // _TARGET_XARCH_
263 LowerSIMD(node->AsSIMD());
265 #endif // FEATURE_SIMD
267 #ifdef FEATURE_HW_INTRINSICS
269 LowerHWIntrinsic(node->AsHWIntrinsic());
271 #endif // FEATURE_HW_INTRINSICS
275 // We should only encounter this for lclVars that are lvDoNotEnregister.
276 verifyLclFldDoNotEnregister(node->AsLclVarCommon()->gtLclNum);
281 WidenSIMD12IfNecessary(node->AsLclVarCommon());
284 case GT_STORE_LCL_VAR:
285 WidenSIMD12IfNecessary(node->AsLclVarCommon());
288 case GT_STORE_LCL_FLD:
290 #if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD)
291 GenTreeLclVarCommon* const store = node->AsLclVarCommon();
292 if ((store->TypeGet() == TYP_SIMD8) != (store->gtOp1->TypeGet() == TYP_SIMD8))
294 GenTreeUnOp* bitcast =
295 new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, store->TypeGet(), store->gtOp1, nullptr);
296 store->gtOp1 = bitcast;
297 BlockRange().InsertBefore(store, bitcast);
299 #endif // _TARGET_AMD64_
300 // TODO-1stClassStructs: Once we remove the requirement that all struct stores
301 // are block stores (GT_STORE_BLK or GT_STORE_OBJ), here is where we would put the local
302 // store under a block store if codegen will require it.
303 if ((node->TypeGet() == TYP_STRUCT) && (node->gtGetOp1()->OperGet() != GT_PHI))
305 #if FEATURE_MULTIREG_RET
306 GenTree* src = node->gtGetOp1();
307 assert((src->OperGet() == GT_CALL) && src->AsCall()->HasMultiRegRetVal());
308 #else // !FEATURE_MULTIREG_RET
309 assert(!"Unexpected struct local store in Lowering");
310 #endif // !FEATURE_MULTIREG_RET
312 LowerStoreLoc(node->AsLclVarCommon());
316 #if defined(_TARGET_ARM64_)
318 CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand);
322 CheckImmedAndMakeContained(node, node->gtOp.gtOp2);
324 #elif defined(_TARGET_XARCH_)
326 if (node->IsUnusedValue())
328 node->ClearUnusedValue();
329 // Make sure the types are identical, since the node type is changed to VOID
330 // CodeGen relies on op2's type to determine the instruction size.
331 // Note that the node type cannot be a small int but the data operand can.
332 assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet());
333 node->SetOper(GT_LOCKADD);
334 node->gtType = TYP_VOID;
335 CheckImmedAndMakeContained(node, node->gtGetOp2());
340 #ifndef _TARGET_ARMARCH_
341 // TODO-ARMARCH-CQ: We should contain this as long as the offset fits.
343 if (node->AsObj()->Addr()->OperIsLocalAddr())
345 node->AsObj()->Addr()->SetContained();
348 #endif // !_TARGET_ARMARCH_
357 /** -- Switch Lowering --
358 * The main idea of switch lowering is to keep transparency of the register requirements of this node
359 * downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT
360 * is represented as a simple tree node, at the time we actually generate code for it we end up
361 * generating instructions that actually modify the flow of execution that imposes complicated
362 * register requirement and lifetimes.
364 * So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually
365 * means and more importantly, which and when do we need a register for each instruction we want to issue
366 * to correctly allocate them downstream.
368 * For this purpose, this procedure performs switch lowering in two different ways:
370 * a) Represent the switch statement as a zero-index jump table construct. This means that for every destination
371 * of the switch, we will store this destination in an array of addresses and the code generator will issue
372 * a data section where this array will live and will emit code that based on the switch index, will indirect and
373 * jump to the destination specified in the jump table.
375 * For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch
376 * node for jump table based switches.
377 * The overall structure of a GT_SWITCH_TABLE is:
380 * |_________ localVar (a temporary local that holds the switch index)
381 * |_________ jumpTable (this is a special node that holds the address of the jump table array)
383 * Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following:
385 * Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH)
386 * |_____ expr (an arbitrarily complex GT_NODE that represents the switch index)
388 * This gets transformed into the following statements inside a BBJ_COND basic block (the target would be
389 * the default case of the switch in case the conditional is evaluated to true).
391 * ----- original block, transformed
392 * GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index)
393 * |_____ expr (the index expression)
398 * |___ Int_Constant (This constant is the index of the default case
399 * that happens to be the highest index in the jump table).
400 * |___ tempLocal (The local variable were we stored the index expression).
402 * ----- new basic block
405 * |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly
406 * and LinearCodeGen will be responsible to generate downstream).
408 * This way there are no implicit temporaries.
410 * b) For small-sized switches, we will actually morph them into a series of conditionals of the form
411 * if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case }
412 * (For the default case conditional, we'll be constructing the exact same code as the jump table case one).
413 * else if (case == firstCase){ goto jumpTable[1]; }
414 * else if (case == secondCase) { goto jumptable[2]; } and so on.
416 * This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer
417 * require internal temporaries to maintain the index we're evaluating plus we're using existing code from
418 * LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and
419 * InstrGroups downstream.
422 GenTree* Lowering::LowerSwitch(GenTree* node)
426 BasicBlock** jumpTab;
428 assert(node->gtOper == GT_SWITCH);
430 // The first step is to build the default case conditional construct that is
431 // shared between both kinds of expansion of the switch node.
433 // To avoid confusion, we'll alias m_block to originalSwitchBB
434 // that represents the node we're morphing.
435 BasicBlock* originalSwitchBB = m_block;
436 LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB);
438 // jumpCnt is the number of elements in the jump table array.
439 // jumpTab is the actual pointer to the jump table array.
440 // targetCnt is the number of unique targets in the jump table array.
441 jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
442 jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
443 targetCnt = originalSwitchBB->NumSucc(comp);
445 // GT_SWITCH must be a top-level node with no use.
449 assert(!switchBBRange.TryGetUse(node, &use));
453 JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt);
455 // Handle a degenerate case: if the switch has only a default case, just convert it
456 // to an unconditional branch. This should only happen in minopts or with debuggable
460 JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
461 noway_assert(comp->opts.OptimizationDisabled());
462 if (originalSwitchBB->bbNext == jumpTab[0])
464 originalSwitchBB->bbJumpKind = BBJ_NONE;
465 originalSwitchBB->bbJumpDest = nullptr;
469 originalSwitchBB->bbJumpKind = BBJ_ALWAYS;
470 originalSwitchBB->bbJumpDest = jumpTab[0];
472 // Remove extra predecessor links if there was more than one case.
473 for (unsigned i = 1; i < jumpCnt; ++i)
475 (void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
478 // We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
479 // the result of the child subtree to a temp.
480 GenTree* rhs = node->gtOp.gtOp1;
482 unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
483 comp->lvaTable[lclNum].lvType = rhs->TypeGet();
485 GenTreeLclVar* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, rhs->TypeGet(), lclNum);
487 store->gtFlags = (rhs->gtFlags & GTF_COMMON_MASK);
488 store->gtFlags |= GTF_VAR_DEF;
490 switchBBRange.InsertAfter(node, store);
491 switchBBRange.Remove(node);
496 noway_assert(jumpCnt >= 2);
498 // Spill the argument to the switch node into a local so that it can be used later.
499 LIR::Use use(switchBBRange, &(node->gtOp.gtOp1), node);
500 ReplaceWithLclVar(use);
502 // GT_SWITCH(indexExpression) is now two statements:
503 // 1. a statement containing 'asg' (for temp = indexExpression)
504 // 2. and a statement with GT_SWITCH(temp)
506 assert(node->gtOper == GT_SWITCH);
507 GenTree* temp = node->gtOp.gtOp1;
508 assert(temp->gtOper == GT_LCL_VAR);
509 unsigned tempLclNum = temp->gtLclVarCommon.gtLclNum;
510 var_types tempLclType = temp->TypeGet();
512 BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
513 BasicBlock* followingBB = originalSwitchBB->bbNext;
515 /* Is the number of cases right for a test and jump switch? */
516 const bool fFirstCaseFollows = (followingBB == jumpTab[0]);
517 const bool fDefaultFollows = (followingBB == defaultBB);
519 unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc
521 // This means really just a single cmp/jcc (aka a simple if/else)
522 if (fFirstCaseFollows || fDefaultFollows)
524 minSwitchTabJumpCnt++;
527 #if defined(_TARGET_ARM_)
528 // On ARM for small switch tables we will
529 // generate a sequence of compare and branch instructions
530 // because the code to load the base of the switch
531 // table is huge and hideous due to the relocation... :(
532 minSwitchTabJumpCnt += 2;
533 #endif // _TARGET_ARM_
535 // Once we have the temporary variable, we construct the conditional branch for
536 // the default case. As stated above, this conditional is being shared between
537 // both GT_SWITCH lowering code paths.
538 // This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; }
539 GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
540 comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType)));
542 // Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
543 // is now less than zero 0 (that would also hit the default case).
544 gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED;
546 GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond);
547 gtDefaultCaseJump->gtFlags = node->gtFlags;
549 LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump);
550 switchBBRange.InsertAtEnd(std::move(condRange));
552 BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode());
554 // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor.
555 // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock
556 // representing the fall-through flow from originalSwitchBB.
557 assert(originalSwitchBB->bbJumpKind == BBJ_NONE);
558 assert(originalSwitchBB->bbNext == afterDefaultCondBlock);
559 assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH);
560 assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault);
561 assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
563 // The GT_SWITCH code is still in originalSwitchBB (it will be removed later).
565 // Turn originalSwitchBB into a BBJ_COND.
566 originalSwitchBB->bbJumpKind = BBJ_COND;
567 originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1];
569 // Fix the pred for the default case: the default block target still has originalSwitchBB
570 // as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point
571 // to afterDefaultCondBlock.
572 flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock);
573 comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge);
575 bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt;
577 #if defined(_TARGET_UNIX_) && defined(_TARGET_ARM_)
578 // Force using an inlined jumping instead switch table generation.
579 // Switch jump table is generated with incorrect values in CoreRT case,
580 // so any large switch will crash after loading to PC any such value.
581 // I think this is due to the fact that we use absolute addressing
582 // instead of relative. But in CoreRT is used as a rule relative
583 // addressing when we generate an executable.
584 // See also https://github.com/dotnet/coreclr/issues/13194
585 // Also https://github.com/dotnet/coreclr/pull/13197
586 useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI);
587 #endif // defined(_TARGET_UNIX_) && defined(_TARGET_ARM_)
589 // If we originally had 2 unique successors, check to see whether there is a unique
590 // non-default case, in which case we can eliminate the switch altogether.
591 // Note that the single unique successor case is handled above.
592 BasicBlock* uniqueSucc = nullptr;
595 uniqueSucc = jumpTab[0];
596 noway_assert(jumpCnt >= 2);
597 for (unsigned i = 1; i < jumpCnt - 1; i++)
599 if (jumpTab[i] != uniqueSucc)
601 uniqueSucc = nullptr;
606 if (uniqueSucc != nullptr)
608 // If the unique successor immediately follows this block, we have nothing to do -
609 // it will simply fall-through after we remove the switch, below.
610 // Otherwise, make this a BBJ_ALWAYS.
611 // Now, fixup the predecessor links to uniqueSucc. In the original jumpTab:
612 // jumpTab[i-1] was the default target, which we handled above,
613 // jumpTab[0] is the first target, and we'll leave that predecessor link.
614 // Remove any additional predecessor links to uniqueSucc.
615 for (unsigned i = 1; i < jumpCnt - 1; ++i)
617 assert(jumpTab[i] == uniqueSucc);
618 (void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock);
620 if (afterDefaultCondBlock->bbNext == uniqueSucc)
622 afterDefaultCondBlock->bbJumpKind = BBJ_NONE;
623 afterDefaultCondBlock->bbJumpDest = nullptr;
627 afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS;
628 afterDefaultCondBlock->bbJumpDest = uniqueSucc;
631 // If the number of possible destinations is small enough, we proceed to expand the switch
632 // into a series of conditional branches, otherwise we follow the jump table based switch
634 else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50))
636 // Lower the switch into a series of compare and branch IR trees.
638 // In this case we will morph the node in the following way:
639 // 1. Generate a JTRUE statement to evaluate the default case. (This happens above.)
640 // 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain
641 // a statement that is responsible for performing a comparison of the table index and conditional
644 JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum);
646 // We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new
647 // blocks. If we end up not needing it at all (say, if all the non-default cases just fall through),
649 bool fUsedAfterDefaultCondBlock = false;
650 BasicBlock* currentBlock = afterDefaultCondBlock;
651 LIR::Range* currentBBRange = &LIR::AsRange(currentBlock);
653 // Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through.
654 // If no case target follows, the last one doesn't need to be a compare/branch: it can be an
655 // unconditional branch.
656 bool fAnyTargetFollows = false;
657 for (unsigned i = 0; i < jumpCnt - 1; ++i)
659 assert(currentBlock != nullptr);
661 // Remove the switch from the predecessor list of this case target's block.
662 // We'll add the proper new predecessor edge later.
663 flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock);
665 if (jumpTab[i] == followingBB)
667 // This case label follows the switch; let it fall through.
668 fAnyTargetFollows = true;
672 // We need a block to put in the new compare and/or branch.
673 // If we haven't used the afterDefaultCondBlock yet, then use that.
674 if (fUsedAfterDefaultCondBlock)
676 BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true);
677 comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
678 currentBlock = newBlock;
679 currentBBRange = &LIR::AsRange(currentBlock);
683 assert(currentBlock == afterDefaultCondBlock);
684 fUsedAfterDefaultCondBlock = true;
687 // We're going to have a branch, either a conditional or unconditional,
688 // to the target. Set the target.
689 currentBlock->bbJumpDest = jumpTab[i];
691 // Wire up the predecessor list for the "branch" case.
692 comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge);
694 if (!fAnyTargetFollows && (i == jumpCnt - 2))
696 // We're processing the last one, and there is no fall through from any case
697 // to the following block, so we can use an unconditional branch to the final
698 // case: there is no need to compare against the case index, since it's
699 // guaranteed to be taken (since the default case was handled first, above).
701 currentBlock->bbJumpKind = BBJ_ALWAYS;
705 // Otherwise, it's a conditional branch. Set the branch kind, then add the
706 // condition statement.
707 currentBlock->bbJumpKind = BBJ_COND;
709 // Now, build the conditional statement for the current case that is
714 // |____ (switchIndex) (The temp variable)
715 // |____ (ICon) (The actual case constant)
716 GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
717 comp->gtNewIconNode(i, tempLclType));
718 GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond);
719 LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch);
720 currentBBRange->InsertAtEnd(std::move(caseRange));
724 if (fAnyTargetFollows)
726 // There is a fall-through to the following block. In the loop
727 // above, we deleted all the predecessor edges from the switch.
728 // In this case, we need to add one back.
729 comp->fgAddRefPred(currentBlock->bbNext, currentBlock);
732 if (!fUsedAfterDefaultCondBlock)
734 // All the cases were fall-through! We don't need this block.
735 // Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag
736 // so fgRemoveBlock() doesn't complain.
737 JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum);
738 assert(currentBlock == afterDefaultCondBlock);
739 assert(currentBlock->bbJumpKind == BBJ_SWITCH);
740 currentBlock->bbJumpKind = BBJ_NONE;
741 currentBlock->bbFlags &= ~BBF_DONT_REMOVE;
742 comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block.
747 // At this point the default case has already been handled and we need to generate a jump
748 // table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both
749 // switch variants need the switch value so create the necessary LclVar node here.
750 GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType);
751 LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock);
752 switchBlockRange.InsertAtEnd(switchValue);
754 // Try generating a bit test based switch first,
755 // if that's not possible a jump table based switch will be generated.
756 if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue))
758 JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum);
760 #ifdef _TARGET_64BIT_
761 if (tempLclType != TYP_I_IMPL)
763 // SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL.
764 // Note that the switch value is unsigned so the cast should be unsigned as well.
765 switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL);
766 switchBlockRange.InsertAtEnd(switchValue);
770 GenTree* switchTable = comp->gtNewJmpTableNode();
771 GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable);
772 switchBlockRange.InsertAfter(switchValue, switchTable, switchJump);
774 // this block no longer branches to the default block
775 afterDefaultCondBlock->bbJumpSwt->removeDefault();
778 comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock);
781 GenTree* next = node->gtNext;
783 // Get rid of the GT_SWITCH(temp).
784 switchBBRange.Remove(node->gtOp.gtOp1);
785 switchBBRange.Remove(node);
790 //------------------------------------------------------------------------
791 // TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test.
794 // jumpTable - The jump table
795 // jumpCount - The number of blocks in the jump table
796 // targetCount - The number of distinct blocks in the jump table
797 // bbSwitch - The switch block
798 // switchValue - A LclVar node that provides the switch value
801 // true if the switch has been lowered to a bit test
804 // If the jump table contains less than 32 (64 on 64 bit targets) entries and there
805 // are at most 2 distinct jump targets then the jump table can be converted to a word
806 // of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the
807 // other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump
808 // to the appropriate target:
809 // mov eax, 245 ; jump table converted to a "bit table"
810 // bt eax, ebx ; ebx is supposed to contain the switch value
815 // Such code is both shorter and faster (in part due to the removal of a memory load)
816 // than the traditional jump table base code. And of course, it also avoids the need
817 // to emit the jump table itself that can reach up to 256 bytes (for 64 entries).
819 bool Lowering::TryLowerSwitchToBitTest(
820 BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue)
822 #ifndef _TARGET_XARCH_
823 // Other architectures may use this if they substitute GT_BT with equivalent code.
826 assert(jumpCount >= 2);
827 assert(targetCount >= 2);
828 assert(bbSwitch->bbJumpKind == BBJ_SWITCH);
829 assert(switchValue->OperIs(GT_LCL_VAR));
832 // Quick check to see if it's worth going through the jump table. The bit test switch supports
833 // up to 2 targets but targetCount also includes the default block so we need to allow 3 targets.
834 // We'll ensure that there are only 2 targets when building the bit table.
843 // The number of bits in the bit table is the same as the number of jump table entries. But the
844 // jump table also includes the default target (at the end) so we need to ignore it. The default
845 // has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates.
848 const unsigned bitCount = jumpCount - 1;
850 if (bitCount > (genTypeSize(TYP_I_IMPL) * 8))
856 // Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to
857 // bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit
858 // table and/or swap the blocks if it's beneficial.
861 BasicBlock* bbCase0 = nullptr;
862 BasicBlock* bbCase1 = jumpTable[0];
865 for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++)
867 if (jumpTable[bitIndex] == bbCase1)
869 bitTable |= (size_t(1) << bitIndex);
871 else if (bbCase0 == nullptr)
873 bbCase0 = jumpTable[bitIndex];
875 else if (jumpTable[bitIndex] != bbCase0)
877 // If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more
878 // than 3 because of the check at the start of the function.
879 assert(targetCount == 3);
885 // One of the case blocks has to follow the switch block. This requirement could be avoided
886 // by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively
887 // impacts register allocation.
890 if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1))
895 #ifdef _TARGET_64BIT_
897 // See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1
898 // then inverting the bit table will make them 0 so that the table now fits in 32 bits.
899 // Note that this does not change the number of bits in the bit table, it just takes
900 // advantage of the fact that loading a 32 bit immediate into a 64 bit register zero
901 // extends the immediate value to 64 bit.
904 if (~bitTable <= UINT32_MAX)
906 bitTable = ~bitTable;
907 std::swap(bbCase0, bbCase1);
912 // Rewire the blocks as needed and figure out the condition to use for JCC.
915 GenCondition bbSwitchCondition;
916 bbSwitch->bbJumpKind = BBJ_COND;
918 comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
919 comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
921 if (bbSwitch->bbNext == bbCase0)
923 // GenCondition::C generates JC so we jump to bbCase1 when the bit is set
924 bbSwitchCondition = GenCondition::C;
925 bbSwitch->bbJumpDest = bbCase1;
927 comp->fgAddRefPred(bbCase0, bbSwitch);
928 comp->fgAddRefPred(bbCase1, bbSwitch);
932 assert(bbSwitch->bbNext == bbCase1);
934 // GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
935 bbSwitchCondition = GenCondition::NC;
936 bbSwitch->bbJumpDest = bbCase0;
938 comp->fgAddRefPred(bbCase0, bbSwitch);
939 comp->fgAddRefPred(bbCase1, bbSwitch);
943 // Append BT(bitTable, switchValue) and JCC(condition) to the switch block.
946 var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG;
947 GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType);
948 GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue);
949 bitTest->gtFlags |= GTF_SET_FLAGS;
950 GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition);
951 jcc->gtFlags |= GTF_USE_FLAGS;
953 LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc);
956 #endif // _TARGET_XARCH_
959 // NOTE: this method deliberately does not update the call arg table. It must only
960 // be used by NewPutArg and LowerArg; these functions are responsible for updating
961 // the call arg table as necessary.
962 void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast)
964 assert(argSlot != nullptr);
965 assert(*argSlot != nullptr);
966 assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST));
968 GenTree* arg = *argSlot;
970 // Replace the argument with the putarg/copy
971 *argSlot = putArgOrBitcast;
972 putArgOrBitcast->gtOp.gtOp1 = arg;
974 // Insert the putarg/copy into the block
975 BlockRange().InsertAfter(arg, putArgOrBitcast);
978 //------------------------------------------------------------------------
979 // NewPutArg: rewrites the tree to put an arg in a register or on the stack.
982 // call - the call whose arg is being rewritten.
983 // arg - the arg being rewritten.
984 // info - the fgArgTabEntry information for the argument.
985 // type - the type of the argument.
988 // The new tree that was created to put the arg in the right place
989 // or the incoming arg if the arg tree was not rewritten.
992 // call, arg, and info must be non-null.
995 // For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
996 // this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
997 // for two eightbyte structs.
999 // For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
1000 // (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
1001 // layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
1002 // (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
1004 GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type)
1006 assert(call != nullptr);
1007 assert(arg != nullptr);
1008 assert(info != nullptr);
1010 GenTree* putArg = nullptr;
1011 bool updateArgTable = true;
1013 bool isOnStack = true;
1014 isOnStack = info->regNum == REG_STK;
1016 #ifdef _TARGET_ARMARCH_
1017 // Mark contained when we pass struct
1018 // GT_FIELD_LIST is always marked contained when it is generated
1019 if (type == TYP_STRUCT)
1021 arg->SetContained();
1022 if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR))
1024 MakeSrcContained(arg, arg->AsObj()->Addr());
1029 #if FEATURE_ARG_SPLIT
1030 // Struct can be split into register(s) and stack on ARM
1033 assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST);
1034 // TODO: Need to check correctness for FastTailCall
1035 if (call->IsFastTailCall())
1038 NYI_ARM("lower: struct argument by fast tail call");
1039 #endif // _TARGET_ARM_
1042 putArg = new (comp, GT_PUTARG_SPLIT)
1043 GenTreePutArgSplit(arg, info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots), info->numRegs,
1044 call->IsFastTailCall(), call);
1046 // If struct argument is morphed to GT_FIELD_LIST node(s),
1047 // we can know GC info by type of each GT_FIELD_LIST node.
1048 // So we skip setting GC Pointer info.
1050 GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit();
1051 for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++)
1053 argSplit->SetRegNumByIdx(info->getRegNum(regIndex), regIndex);
1056 if (arg->OperGet() == GT_OBJ)
1058 BYTE* gcLayout = nullptr;
1059 unsigned numRefs = 0;
1060 GenTreeObj* argObj = arg->AsObj();
1062 if (argObj->IsGCInfoInitialized())
1064 gcLayout = argObj->gtGcPtrs;
1065 numRefs = argObj->GetGcPtrCount();
1069 // Set GC Pointer info
1070 gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots + info->numRegs];
1071 numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
1072 argSplit->setGcPointers(numRefs, gcLayout);
1075 // Set type of registers
1076 for (unsigned index = 0; index < info->numRegs; index++)
1078 var_types regType = comp->getJitGCType(gcLayout[index]);
1079 // Account for the possibility that float fields may be passed in integer registers.
1080 if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index)))
1082 regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
1084 argSplit->m_regType[index] = regType;
1089 GenTreeFieldList* fieldListPtr = arg->AsFieldList();
1090 for (unsigned index = 0; index < info->numRegs; fieldListPtr = fieldListPtr->Rest(), index++)
1092 var_types regType = fieldListPtr->gtGetOp1()->TypeGet();
1093 // Account for the possibility that float fields may be passed in integer registers.
1094 if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index)))
1096 regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
1098 argSplit->m_regType[index] = regType;
1100 // Clear the register assignments on the fieldList nodes, as these are contained.
1101 fieldListPtr->gtRegNum = REG_NA;
1106 #endif // FEATURE_ARG_SPLIT
1110 #if FEATURE_MULTIREG_ARGS
1111 if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
1113 assert(arg->OperGet() == GT_FIELD_LIST);
1115 assert(arg->AsFieldList()->IsFieldListHead());
1116 unsigned int regIndex = 0;
1117 for (GenTreeFieldList* fieldListPtr = arg->AsFieldList(); fieldListPtr != nullptr;
1118 fieldListPtr = fieldListPtr->Rest())
1120 regNumber argReg = info->getRegNum(regIndex);
1121 GenTree* curOp = fieldListPtr->gtOp.gtOp1;
1122 var_types curTyp = curOp->TypeGet();
1124 // Create a new GT_PUTARG_REG node with op1
1125 GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg);
1127 // Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST
1128 ReplaceArgWithPutArgOrBitcast(&fieldListPtr->gtOp.gtOp1, newOper);
1131 // Initialize all the gtRegNum's since the list won't be traversed in an LIR traversal.
1132 fieldListPtr->gtRegNum = REG_NA;
1135 // Just return arg. The GT_FIELD_LIST is not replaced.
1136 // Nothing more to do.
1140 #endif // FEATURE_MULTIREG_ARGS
1142 putArg = comp->gtNewPutArgReg(type, arg, info->regNum);
1147 // Mark this one as tail call arg if it is a fast tail call.
1148 // This provides the info to put this argument in in-coming arg area slot
1149 // instead of in out-going arg area slot.
1151 // Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce
1152 // a result. So the type of its operand must be the correct type to push on the stack.
1153 // For a FIELD_LIST, this will be the type of the field (not the type of the arg),
1154 // but otherwise it is generally the type of the operand.
1155 info->checkIsStruct();
1156 if ((arg->OperGet() != GT_FIELD_LIST))
1158 #if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
1159 if (type == TYP_SIMD12)
1161 assert(info->numSlots == 3);
1164 #endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
1166 assert(genActualType(arg->TypeGet()) == type);
1171 new (comp, GT_PUTARG_STK) GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg,
1172 info->slotNum PUT_STRUCT_ARG_STK_ONLY_ARG(info->numSlots),
1173 call->IsFastTailCall(), call);
1175 #ifdef FEATURE_PUT_STRUCT_ARG_STK
1176 // If the ArgTabEntry indicates that this arg is a struct
1177 // get and store the number of slots that are references.
1178 // This is later used in the codegen for PUT_ARG_STK implementation
1179 // for struct to decide whether and how many single eight-byte copies
1180 // to be done (only for reference slots), so gcinfo is emitted.
1181 // For non-reference slots faster/smaller size instructions are used -
1182 // pair copying using XMM registers or rep mov instructions.
1185 // We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments.
1186 if (arg->OperIsLocal())
1188 // This must have a type with a known size (SIMD or has been morphed to a primitive type).
1189 assert(arg->TypeGet() != TYP_STRUCT);
1191 else if (arg->OperIs(GT_OBJ))
1193 unsigned numRefs = 0;
1194 BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots];
1195 assert(!varTypeIsSIMD(arg));
1196 numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
1197 putArg->AsPutArgStk()->setGcPointers(numRefs, gcLayout);
1200 // On x86 VM lies about the type of a struct containing a pointer sized
1201 // integer field by returning the type of its field as the type of struct.
1202 // Such struct can be passed in a register depending its position in
1203 // parameter list. VM does this unwrapping only one level and therefore
1204 // a type like Struct Foo { Struct Bar { int f}} awlays needs to be
1205 // passed on stack. Also, VM doesn't lie about type of such a struct
1206 // when it is a field of another struct. That is VM doesn't lie about
1207 // the type of Foo.Bar
1209 // We now support the promotion of fields that are of type struct.
1210 // However we only support a limited case where the struct field has a
1211 // single field and that single field must be a scalar type. Say Foo.Bar
1212 // field is getting passed as a parameter to a call, Since it is a TYP_STRUCT,
1213 // as per x86 ABI it should always be passed on stack. Therefore GenTree
1214 // node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where
1215 // local v1 could be a promoted field standing for Foo.Bar. Note that
1216 // the type of v1 will be the type of field of Foo.Bar.f when Foo is
1217 // promoted. That is v1 will be a scalar type. In this case we need to
1218 // pass v1 on stack instead of in a register.
1220 // TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is
1221 // a scalar type and the width of GT_OBJ matches the type size of v1.
1222 // Note that this cannot be done till call node arguments are morphed
1223 // because we should not lose the fact that the type of argument is
1224 // a struct so that the arg gets correctly marked to be passed on stack.
1225 GenTree* objOp1 = arg->gtGetOp1();
1226 if (objOp1->OperGet() == GT_LCL_VAR_ADDR)
1228 unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum();
1229 if (comp->lvaTable[lclNum].lvType != TYP_STRUCT)
1231 comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_VMNeedsStackAddr));
1234 #endif // _TARGET_X86_
1236 else if (!arg->OperIs(GT_FIELD_LIST))
1238 assert(varTypeIsSIMD(arg) || (info->numSlots == 1));
1241 #endif // FEATURE_PUT_STRUCT_ARG_STK
1245 JITDUMP("new node is : ");
1249 if (arg->gtFlags & GTF_LATE_ARG)
1251 putArg->gtFlags |= GTF_LATE_ARG;
1253 else if (updateArgTable)
1255 info->node = putArg;
1260 //------------------------------------------------------------------------
1261 // LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between
1262 // the argument evaluation and the call. This is the point at which the source is
1263 // consumed and the value transitions from control of the register allocator to the calling
1267 // call - The call node
1268 // ppArg - Pointer to the call argument pointer. We might replace the call argument by
1274 void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg)
1276 GenTree* arg = *ppArg;
1278 JITDUMP("lowering arg : ");
1281 // No assignments should remain by Lowering.
1282 assert(!arg->OperIs(GT_ASG));
1283 assert(!arg->OperIsPutArgStk());
1285 // Assignments/stores at this level are not really placing an argument.
1286 // They are setting up temporary locals that will later be placed into
1287 // outgoing regs or stack.
1288 // Note that atomic ops may be stores and still produce a value.
1289 if (!arg->IsValue())
1291 assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
1292 arg->OperIsCopyBlkOp());
1296 fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg);
1297 assert(info->node == arg);
1298 var_types type = arg->TypeGet();
1300 if (varTypeIsSmall(type))
1302 // Normalize 'type', it represents the item that we will be storing in the Outgoing Args
1306 #if defined(FEATURE_SIMD)
1307 #if defined(_TARGET_X86_)
1308 // Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their
1309 // allocated size (see lvSize()). However, when passing the variables as arguments, and
1310 // storing the variables to the outgoing argument area on the stack, we must use their
1311 // actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written.
1312 if (type == TYP_SIMD16)
1314 if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR))
1316 unsigned varNum = arg->AsLclVarCommon()->GetLclNum();
1317 LclVarDsc* varDsc = &comp->lvaTable[varNum];
1318 type = varDsc->lvType;
1320 else if (arg->OperGet() == GT_SIMD)
1322 assert((arg->AsSIMD()->gtSIMDSize == 16) || (arg->AsSIMD()->gtSIMDSize == 12));
1324 if (arg->AsSIMD()->gtSIMDSize == 12)
1330 #elif defined(_TARGET_AMD64_)
1331 // TYP_SIMD8 parameters that are passed as longs
1332 if (type == TYP_SIMD8 && genIsValidIntReg(info->regNum))
1334 GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, TYP_LONG, arg, nullptr);
1335 BlockRange().InsertAfter(arg, bitcast);
1337 info->node = *ppArg = arg = bitcast;
1340 #endif // defined(_TARGET_X86_)
1341 #endif // defined(FEATURE_SIMD)
1343 // If we hit this we are probably double-lowering.
1344 assert(!arg->OperIsPutArg());
1346 #if !defined(_TARGET_64BIT_)
1347 if (varTypeIsLong(type))
1349 bool isReg = (info->regNum != REG_STK);
1352 noway_assert(arg->OperGet() == GT_LONG);
1353 assert(info->numRegs == 2);
1355 GenTree* argLo = arg->gtGetOp1();
1356 GenTree* argHi = arg->gtGetOp2();
1358 GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(argLo, 0, TYP_INT, nullptr);
1359 // Only the first fieldList node (GTF_FIELD_LIST_HEAD) is in the instruction sequence.
1360 (void)new (comp, GT_FIELD_LIST) GenTreeFieldList(argHi, 4, TYP_INT, fieldList);
1361 GenTree* putArg = NewPutArg(call, fieldList, info, type);
1363 BlockRange().InsertBefore(arg, putArg);
1364 BlockRange().Remove(arg);
1366 info->node = fieldList;
1370 assert(arg->OperGet() == GT_LONG);
1371 // For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK.
1372 // Although the hi argument needs to be pushed first, that will be handled by the general case,
1373 // in which the fields will be reversed.
1374 assert(info->numSlots == 2);
1375 GenTree* argLo = arg->gtGetOp1();
1376 GenTree* argHi = arg->gtGetOp2();
1377 GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList(argLo, 0, TYP_INT, nullptr);
1378 // Only the first fieldList node (GTF_FIELD_LIST_HEAD) is in the instruction sequence.
1379 (void)new (comp, GT_FIELD_LIST) GenTreeFieldList(argHi, 4, TYP_INT, fieldList);
1380 GenTree* putArg = NewPutArg(call, fieldList, info, type);
1381 putArg->gtRegNum = info->regNum;
1383 // We can't call ReplaceArgWithPutArgOrBitcast here because it presumes that we are keeping the original
1385 BlockRange().InsertBefore(arg, fieldList, putArg);
1386 BlockRange().Remove(arg);
1391 #endif // !defined(_TARGET_64BIT_)
1394 #ifdef _TARGET_ARMARCH_
1395 if (call->IsVarargs() || comp->opts.compUseSoftFP)
1397 // For vararg call or on armel, reg args should be all integer.
1398 // Insert copies as needed to move float value to integer register.
1399 GenTree* newNode = LowerFloatArg(ppArg, info);
1400 if (newNode != nullptr)
1402 type = newNode->TypeGet();
1405 #endif // _TARGET_ARMARCH_
1407 GenTree* putArg = NewPutArg(call, arg, info, type);
1409 // In the case of register passable struct (in one or two registers)
1410 // the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.)
1411 // If an extra node is returned, splice it in the right place in the tree.
1414 ReplaceArgWithPutArgOrBitcast(ppArg, putArg);
1419 #ifdef _TARGET_ARMARCH_
1420 //------------------------------------------------------------------------
1421 // LowerFloatArg: Lower float call arguments on the arm platform.
1424 // arg - The arg node
1425 // info - call argument info
1428 // Return nullptr, if no transformation was done;
1429 // return arg if there was in place transformation;
1430 // return a new tree if the root was changed.
1433 // This must handle scalar float arguments as well as GT_FIELD_LISTs
1434 // with floating point fields.
1436 GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info)
1438 GenTree* arg = *pArg;
1439 if (info->regNum != REG_STK)
1441 if (arg->OperIsFieldList())
1443 GenTreeFieldList* currListNode = arg->AsFieldList();
1444 regNumber currRegNumber = info->regNum;
1446 // Transform fields that are passed as registers in place.
1447 unsigned fieldRegCount;
1448 for (unsigned i = 0; i < info->numRegs; i += fieldRegCount)
1450 assert(currListNode != nullptr);
1451 GenTree* node = currListNode->Current();
1452 if (varTypeIsFloating(node))
1454 GenTree* intNode = LowerFloatArgReg(node, currRegNumber);
1455 assert(intNode != nullptr);
1457 ReplaceArgWithPutArgOrBitcast(currListNode->pCurrent(), intNode);
1458 currListNode->ChangeType(intNode->TypeGet());
1461 if (node->TypeGet() == TYP_DOUBLE)
1463 currRegNumber = REG_NEXT(REG_NEXT(currRegNumber));
1468 currRegNumber = REG_NEXT(currRegNumber);
1471 currListNode = currListNode->Rest();
1473 // List fields were replaced in place.
1476 else if (varTypeIsFloating(arg))
1478 GenTree* intNode = LowerFloatArgReg(arg, info->regNum);
1479 assert(intNode != nullptr);
1480 ReplaceArgWithPutArgOrBitcast(pArg, intNode);
1487 //------------------------------------------------------------------------
1488 // LowerFloatArgReg: Lower the float call argument node that is passed via register.
1491 // arg - The arg node
1492 // regNum - register number
1495 // Return new bitcast node, that moves float to int register.
1497 GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum)
1499 var_types floatType = arg->TypeGet();
1500 assert(varTypeIsFloating(floatType));
1501 var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
1502 GenTree* intArg = comp->gtNewBitCastNode(intType, arg);
1503 intArg->gtRegNum = regNum;
1505 if (floatType == TYP_DOUBLE)
1507 regNumber nextReg = REG_NEXT(regNum);
1508 intArg->AsMultiRegOp()->gtOtherReg = nextReg;
1515 // do lowering steps for each arg of a call
1516 void Lowering::LowerArgsForCall(GenTreeCall* call)
1518 JITDUMP("objp:\n======\n");
1519 if (call->gtCallObjp)
1521 LowerArg(call, &call->gtCallObjp);
1524 GenTreeArgList* args = call->gtCallArgs;
1526 JITDUMP("\nargs:\n======\n");
1527 for (; args; args = args->Rest())
1529 LowerArg(call, &args->Current());
1532 JITDUMP("\nlate:\n======\n");
1533 for (args = call->gtCallLateArgs; args; args = args->Rest())
1535 LowerArg(call, &args->Current());
1539 // helper that create a node representing a relocatable physical address computation
1540 GenTree* Lowering::AddrGen(ssize_t addr)
1542 // this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
1543 GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
1547 // variant that takes a void*
1548 GenTree* Lowering::AddrGen(void* addr)
1550 return AddrGen((ssize_t)addr);
1553 // do lowering steps for a call
1555 // - adding the placement nodes (either stack or register variety) for arguments
1556 // - lowering the expression that calculates the target address
1557 // - adding nodes for other operations that occur after the call sequence starts and before
1558 // control transfer occurs (profiling and tail call helpers, pinvoke incantations)
1560 void Lowering::LowerCall(GenTree* node)
1562 GenTreeCall* call = node->AsCall();
1564 JITDUMP("lowering call (before):\n");
1565 DISPTREERANGE(BlockRange(), call);
1568 call->ClearOtherRegs();
1569 LowerArgsForCall(call);
1571 // note that everything generated from this point on runs AFTER the outgoing args are placed
1572 GenTree* controlExpr = nullptr;
1574 // for x86, this is where we record ESP for checking later to make sure stack is balanced
1576 // Check for Delegate.Invoke(). If so, we inline it. We get the
1577 // target-object and target-function from the delegate-object, and do
1578 // an indirect call.
1579 if (call->IsDelegateInvoke())
1581 controlExpr = LowerDelegateInvoke(call);
1585 // Virtual and interface calls
1586 switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
1588 case GTF_CALL_VIRT_STUB:
1589 controlExpr = LowerVirtualStubCall(call);
1592 case GTF_CALL_VIRT_VTABLE:
1593 // stub dispatching is off or this is not a virtual call (could be a tailcall)
1594 controlExpr = LowerVirtualVtableCall(call);
1597 case GTF_CALL_NONVIRT:
1598 if (call->IsUnmanaged())
1600 controlExpr = LowerNonvirtPinvokeCall(call);
1602 else if (call->gtCallType == CT_INDIRECT)
1604 controlExpr = LowerIndirectNonvirtCall(call);
1608 controlExpr = LowerDirectCall(call);
1613 noway_assert(!"strange call type");
1618 if (call->IsTailCallViaHelper())
1620 // Either controlExpr or gtCallAddr must contain real call target.
1621 if (controlExpr == nullptr)
1623 assert(call->gtCallType == CT_INDIRECT);
1624 assert(call->gtCallAddr != nullptr);
1625 controlExpr = call->gtCallAddr;
1628 controlExpr = LowerTailCallViaHelper(call, controlExpr);
1631 if (controlExpr != nullptr)
1633 LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr);
1635 JITDUMP("results of lowering call:\n");
1636 DISPRANGE(controlExprRange);
1638 GenTree* insertionPoint = call;
1639 if (!call->IsTailCallViaHelper())
1641 // The controlExpr should go before the gtCallCookie and the gtCallAddr, if they exist
1643 // TODO-LIR: find out what's really required here, as this is currently a tree order
1645 if (call->gtCallType == CT_INDIRECT)
1647 bool isClosed = false;
1648 if (call->gtCallCookie != nullptr)
1651 GenTree* firstCallAddrNode = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
1653 assert(call->gtCallCookie->Precedes(firstCallAddrNode));
1656 insertionPoint = BlockRange().GetTreeRange(call->gtCallCookie, &isClosed).FirstNode();
1659 else if (call->gtCallAddr != nullptr)
1661 insertionPoint = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
1667 ContainCheckRange(controlExprRange);
1668 BlockRange().InsertBefore(insertionPoint, std::move(controlExprRange));
1670 call->gtControlExpr = controlExpr;
1672 if (call->IsFastTailCall())
1674 // Lower fast tail call can introduce new temps to set up args correctly for Callee.
1675 // This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args
1676 // and replacing them with a new temp. Control expr also can contain nodes that need
1678 // Therefore lower fast tail call must be done after controlExpr is inserted into LIR.
1679 // There is one side effect which is flipping the order of PME and control expression
1680 // since LowerFastTailCall calls InsertPInvokeMethodEpilog.
1681 LowerFastTailCall(call);
1684 if (comp->opts.IsJit64Compat())
1686 CheckVSQuirkStackPaddingNeeded(call);
1689 ContainCheckCallOperands(call);
1690 JITDUMP("lowering call (after):\n");
1691 DISPTREERANGE(BlockRange(), call);
1695 // Though the below described issue gets fixed in intellitrace dll of VS2015 (a.k.a Dev14),
1696 // we still need this quirk for desktop so that older version of VS (e.g. VS2010/2012)
1697 // continues to work.
1698 // This quirk is excluded from other targets that have no back compat burden.
1700 // Quirk for VS debug-launch scenario to work:
1701 // See if this is a PInvoke call with exactly one param that is the address of a struct local.
1702 // In such a case indicate to frame-layout logic to add 16-bytes of padding
1703 // between save-reg area and locals. This is to protect against the buffer
1704 // overrun bug in microsoft.intellitrace.11.0.0.dll!ProfilerInterop.InitInterop().
1706 // A work-around to this bug is to disable IntelliTrace debugging
1707 // (VS->Tools->Options->IntelliTrace->Enable IntelliTrace - uncheck this option).
1708 // The reason why this works on Jit64 is that at the point of AV the call stack is
1710 // GetSystemInfo() Native call
1711 // IL_Stub generated for PInvoke declaration.
1712 // ProfilerInterface::InitInterop()
1713 // ProfilerInterface.Cctor()
1716 // The cctor body has just the call to InitInterop(). VM asm worker is holding
1717 // something in rbx that is used immediately after the Cctor call. Jit64 generated
1718 // InitInterop() method is pushing the registers in the following order
1728 // Due to buffer overrun, rbx doesn't get impacted. Whereas RyuJIT jitted code of
1729 // the same method is pushing regs in the following order
1737 // Therefore as a fix, we add padding between save-reg area and locals to
1738 // make this scenario work against JB.
1740 // Note: If this quirk gets broken due to other JIT optimizations, we should consider
1741 // more tolerant fix. One such fix is to padd the struct.
1742 void Lowering::CheckVSQuirkStackPaddingNeeded(GenTreeCall* call)
1744 assert(comp->opts.IsJit64Compat());
1746 #ifdef _TARGET_AMD64_
1747 // Confine this to IL stub calls which aren't marked as unmanaged.
1748 if (call->IsPInvoke() && !call->IsUnmanaged())
1750 bool paddingNeeded = false;
1751 GenTree* firstPutArgReg = nullptr;
1752 for (GenTreeArgList* args = call->gtCallLateArgs; args; args = args->Rest())
1754 GenTree* tmp = args->Current();
1755 if (tmp->OperGet() == GT_PUTARG_REG)
1757 if (firstPutArgReg == nullptr)
1759 firstPutArgReg = tmp;
1760 GenTree* op1 = firstPutArgReg->gtOp.gtOp1;
1762 if (op1->OperGet() == GT_LCL_VAR_ADDR)
1764 unsigned lclNum = op1->AsLclVarCommon()->GetLclNum();
1765 // TODO-1stClassStructs: This is here to duplicate previous behavior,
1766 // but is not needed because the scenario being quirked did not involve
1767 // a SIMD or enregisterable struct.
1768 // if(comp->lvaTable[lclNum].TypeGet() == TYP_STRUCT)
1769 if (varTypeIsStruct(comp->lvaTable[lclNum].TypeGet()))
1771 // First arg is addr of a struct local.
1772 paddingNeeded = true;
1776 // Not a struct local.
1777 assert(paddingNeeded == false);
1783 // First arg is not a local var addr.
1784 assert(paddingNeeded == false);
1790 // Has more than one arg.
1791 paddingNeeded = false;
1799 comp->compVSQuirkStackPaddingNeeded = VSQUIRK_STACK_PAD;
1802 #endif // _TARGET_AMD64_
1805 // Inserts profiler hook, GT_PROF_HOOK for a tail call node.
1808 // We need to insert this after all nested calls, but before all the arguments to this call have been set up.
1809 // To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before
1810 // that. If there are no args, then it should be inserted before the call node.
1813 // * stmtExpr void (top level) (IL 0x000...0x010)
1814 // arg0 SETUP | /--* argPlace ref REG NA $c5
1815 // this in rcx | | /--* argPlace ref REG NA $c1
1816 // | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2
1817 // arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2
1818 // | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2
1819 // arg1 in rdx | | +--* putarg_reg ref REG NA
1820 // | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80
1821 // this in rcx | | +--* putarg_reg ref REG NA
1822 // | | /--* call nullcheck ref System.String.ToLower $c5
1823 // | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? )
1824 // | | { \--* prof_hook void REG NA
1825 // arg0 in rcx | +--* putarg_reg ref REG NA
1826 // control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA
1827 // \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void
1829 // In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call
1830 // (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call.
1833 // Insert the profiler hook immediately before the call. The profiler hook will preserve
1834 // all argument registers (ECX, EDX), but nothing else.
1837 // callNode - tail call node
1838 // insertionPoint - if non-null, insert the profiler hook before this point.
1839 // If null, insert the profiler hook before args are setup
1840 // but after all arg side effects are computed.
1842 void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint)
1844 assert(call->IsTailCall());
1845 assert(comp->compIsProfilerHookNeeded());
1847 #if defined(_TARGET_X86_)
1849 if (insertionPoint == nullptr)
1851 insertionPoint = call;
1854 #else // !defined(_TARGET_X86_)
1856 if (insertionPoint == nullptr)
1858 GenTree* tmp = nullptr;
1859 for (GenTreeArgList* args = call->gtCallArgs; args; args = args->Rest())
1861 tmp = args->Current();
1862 assert(tmp->OperGet() != GT_PUTARG_REG); // We don't expect to see these in gtCallArgs
1863 if (tmp->OperGet() == GT_PUTARG_STK)
1866 insertionPoint = tmp;
1871 if (insertionPoint == nullptr)
1873 for (GenTreeArgList* args = call->gtCallLateArgs; args; args = args->Rest())
1875 tmp = args->Current();
1876 if ((tmp->OperGet() == GT_PUTARG_REG) || (tmp->OperGet() == GT_PUTARG_STK))
1879 insertionPoint = tmp;
1884 // If there are no args, insert before the call node
1885 if (insertionPoint == nullptr)
1887 insertionPoint = call;
1892 #endif // !defined(_TARGET_X86_)
1894 assert(insertionPoint != nullptr);
1895 GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
1896 BlockRange().InsertBefore(insertionPoint, profHookNode);
1899 // Lower fast tail call implemented as epilog+jmp.
1900 // Also inserts PInvoke method epilog if required.
1901 void Lowering::LowerFastTailCall(GenTreeCall* call)
1903 #if FEATURE_FASTTAILCALL
1904 // Tail call restrictions i.e. conditions under which tail prefix is ignored.
1905 // Most of these checks are already done by importer or fgMorphTailCall().
1906 // This serves as a double sanity check.
1907 assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
1908 assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
1909 assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
1910 assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
1912 #ifdef _TARGET_AMD64_
1913 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
1914 #endif // _TARGET_AMD64_
1916 // We expect to see a call that meets the following conditions
1917 assert(call->IsFastTailCall());
1919 // VM cannot use return address hijacking when A() and B() tail call each
1920 // other in mutual recursion. Therefore, this block is reachable through
1921 // a GC-safe point or the whole method is marked as fully interruptible.
1924 // optReachWithoutCall() depends on the fact that loop headers blocks
1925 // will have a block number > fgLastBB. These loop headers gets added
1926 // after dominator computation and get skipped by OptReachWithoutCall().
1927 // The below condition cannot be asserted in lower because fgSimpleLowering()
1928 // can add a new basic block for range check failure which becomes
1929 // fgLastBB with block number > loop header block number.
1930 // assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
1931 // !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->genInterruptible);
1933 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
1934 // a method returns. This is a case of caller method has both PInvokes and tail calls.
1935 if (comp->info.compCallUnmanaged)
1937 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
1940 // Args for tail call are setup in incoming arg area. The gc-ness of args of
1941 // caller and callee (which being tail called) may not match. Therefore, everything
1942 // from arg setup until the epilog need to be non-interuptible by GC. This is
1943 // achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node
1944 // of call is setup. Note that once a stack arg is setup, it cannot have nested
1945 // calls subsequently in execution order to setup other args, because the nested
1946 // call could over-write the stack arg that is setup earlier.
1947 GenTree* firstPutArgStk = nullptr;
1948 GenTreeArgList* args;
1949 ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack));
1951 for (args = call->gtCallArgs; args; args = args->Rest())
1953 GenTree* tmp = args->Current();
1954 if (tmp->OperGet() == GT_PUTARG_STK)
1960 for (args = call->gtCallLateArgs; args; args = args->Rest())
1962 GenTree* tmp = args->Current();
1963 if (tmp->OperGet() == GT_PUTARG_STK)
1969 if (!putargs.Empty())
1971 firstPutArgStk = putargs.Bottom();
1974 // If we have a putarg_stk node, also count the number of non-standard args the
1975 // call node has. Note that while determining whether a tail call can be fast
1976 // tail called, we don't count non-standard args (passed in R10 or R11) since they
1977 // don't contribute to outgoing arg space. These non-standard args are not
1978 // accounted in caller's arg count but accounted in callee's arg count after
1979 // fgMorphArgs(). Therefore, exclude callee's non-standard args while mapping
1980 // callee's stack arg num to corresponding caller's stack arg num.
1981 unsigned calleeNonStandardArgCount = call->GetNonStandardAddedArgCount(comp);
1983 // Say Caller(a, b, c, d, e) fast tail calls Callee(e, d, c, b, a)
1984 // i.e. passes its arguments in reverse to Callee. During call site
1985 // setup, after computing argument side effects, stack args are setup
1986 // first and reg args next. In the above example, both Callers and
1987 // Callee stack args (e and a respectively) share the same stack slot
1988 // and are alive at the same time. The act of setting up Callee's
1989 // stack arg will over-write the stack arg of Caller and if there are
1990 // further uses of Caller stack arg we have to make sure that we move
1991 // it to a temp before over-writing its slot and use temp in place of
1992 // the corresponding Caller stack arg.
1994 // For the above example, conceptually this is what is done
1996 // Stack slot of e = a
1997 // R9 = b, R8 = c, RDx = d
2000 // The below logic is meant to detect cases like this and introduce
2001 // temps to set up args correctly for Callee.
2003 for (int i = 0; i < putargs.Height(); i++)
2005 GenTree* putArgStkNode = putargs.Bottom(i);
2007 assert(putArgStkNode->OperGet() == GT_PUTARG_STK);
2009 // Get the caller arg num corresponding to this callee arg.
2010 // Note that these two args share the same stack slot. Therefore,
2011 // if there are further uses of corresponding caller arg, we need
2012 // to move it to a temp and use the temp in this call tree.
2014 // Note that Caller is guaranteed to have a param corresponding to
2015 // this Callee's arg since fast tail call mechanism counts the
2016 // stack slots required for both Caller and Callee for passing params
2017 // and allow fast tail call only if stack slots required by Caller >=
2019 fgArgTabEntry* argTabEntry = comp->gtArgEntryByNode(call, putArgStkNode);
2020 assert(argTabEntry);
2021 unsigned callerArgNum = argTabEntry->argNum - calleeNonStandardArgCount;
2022 noway_assert(callerArgNum < comp->info.compArgsCount);
2024 unsigned callerArgLclNum = callerArgNum;
2025 LclVarDsc* callerArgDsc = comp->lvaTable + callerArgLclNum;
2026 if (callerArgDsc->lvPromoted)
2029 callerArgDsc->lvFieldLclStart; // update the callerArgNum to the promoted struct field's lclNum
2030 callerArgDsc = comp->lvaTable + callerArgLclNum;
2032 noway_assert(callerArgDsc->lvIsParam);
2034 // Start searching in execution order list till we encounter call node
2035 unsigned tmpLclNum = BAD_VAR_NUM;
2036 var_types tmpType = TYP_UNDEF;
2037 for (GenTree* treeNode = putArgStkNode->gtNext; treeNode != call; treeNode = treeNode->gtNext)
2039 if (treeNode->OperIsLocal() || treeNode->OperIsLocalAddr())
2041 // This should not be a GT_PHI_ARG.
2042 assert(treeNode->OperGet() != GT_PHI_ARG);
2044 GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
2046 // Fast tail calling criteria permits passing of structs of size 1, 2, 4 and 8 as args.
2047 // It is possible that the callerArgLclNum corresponds to such a struct whose stack slot
2048 // is getting over-written by setting up of a stack arg and there are further uses of
2049 // any of its fields if such a struct is type-dependently promoted. In this case too
2050 // we need to introduce a temp.
2051 if ((lcl->gtLclNum == callerArgNum) || (lcl->gtLclNum == callerArgLclNum))
2053 // Create tmp and use it in place of callerArgDsc
2054 if (tmpLclNum == BAD_VAR_NUM)
2056 // Set tmpType first before calling lvaGrabTemp, as that call invalidates callerArgDsc
2057 tmpType = genActualType(callerArgDsc->lvaArgType());
2058 tmpLclNum = comp->lvaGrabTemp(
2059 true DEBUGARG("Fast tail call lowering is creating a new local variable"));
2061 comp->lvaTable[tmpLclNum].lvType = tmpType;
2062 comp->lvaTable[tmpLclNum].lvDoNotEnregister = comp->lvaTable[lcl->gtLclNum].lvDoNotEnregister;
2065 lcl->SetLclNum(tmpLclNum);
2070 // If we have created a temp, insert an embedded assignment stmnt before
2071 // the first putargStkNode i.e.
2072 // tmpLcl = CallerArg
2073 if (tmpLclNum != BAD_VAR_NUM)
2075 assert(tmpType != TYP_UNDEF);
2076 GenTreeLclVar* local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, tmpType, callerArgLclNum);
2077 GenTree* assignExpr = comp->gtNewTempAssign(tmpLclNum, local);
2078 ContainCheckRange(local, assignExpr);
2079 BlockRange().InsertBefore(firstPutArgStk, LIR::SeqTree(comp, assignExpr));
2083 // Insert GT_START_NONGC node before the first GT_PUTARG_STK node.
2084 // Note that if there are no args to be setup on stack, no need to
2085 // insert GT_START_NONGC node.
2086 GenTree* startNonGCNode = nullptr;
2087 if (firstPutArgStk != nullptr)
2089 startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
2090 BlockRange().InsertBefore(firstPutArgStk, startNonGCNode);
2092 // Gc-interruptability in the following case:
2093 // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
2094 // bar(a, b, c, d, e) { foo(a, b, d, d, e); }
2096 // Since the instruction group starting from the instruction that sets up first
2097 // stack arg to the end of the tail call is marked as non-gc interruptible,
2098 // this will form a non-interruptible tight loop causing gc-starvation. To fix
2099 // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
2100 // has a single basic block and is not a GC-safe point. The presence of a single
2101 // nop outside non-gc interruptible region will prevent gc starvation.
2102 if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT))
2104 assert(comp->fgFirstBB == comp->compCurBB);
2105 GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
2106 BlockRange().InsertBefore(startNonGCNode, noOp);
2110 // Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be
2111 // inserted before the args are setup but after the side effects of args are
2112 // computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC
2113 // node if one exists.
2114 if (comp->compIsProfilerHookNeeded())
2116 InsertProfTailCallHook(call, startNonGCNode);
2119 #else // !FEATURE_FASTTAILCALL
2121 // Platform choose not to implement fast tail call mechanism.
2122 // In such a case we should never be reaching this method as
2123 // the expectation is that IsTailCallViaHelper() will always
2124 // be true on such a platform.
2129 //------------------------------------------------------------------------
2130 // LowerTailCallViaHelper: lower a call via the tailcall helper. Morph
2131 // has already inserted tailcall helper special arguments. This function
2132 // inserts actual data for some placeholders.
2134 // For ARM32, AMD64, lower
2135 // tail.call(void* copyRoutine, void* dummyArg, ...)
2137 // Jit_TailCall(void* copyRoutine, void* callTarget, ...)
2140 // tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg)
2142 // JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
2144 // Note that the special arguments are on the stack, whereas the function arguments follow the normal convention.
2146 // Also inserts PInvoke method epilog if required.
2149 // call - The call node
2150 // callTarget - The real call target. This is used to replace the dummyArg during lowering.
2153 // Returns control expression tree for making a call to helper Jit_TailCall.
2155 GenTree* Lowering::LowerTailCallViaHelper(GenTreeCall* call, GenTree* callTarget)
2157 // Tail call restrictions i.e. conditions under which tail prefix is ignored.
2158 // Most of these checks are already done by importer or fgMorphTailCall().
2159 // This serves as a double sanity check.
2160 assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
2161 assert(!comp->opts.compNeedSecurityCheck); // tail call from methods that need security check
2162 assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
2163 assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
2165 #ifdef _TARGET_AMD64_
2166 assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
2167 #endif // _TARGET_AMD64_
2169 // We expect to see a call that meets the following conditions
2170 assert(call->IsTailCallViaHelper());
2171 assert(callTarget != nullptr);
2173 // The TailCall helper call never returns to the caller and is not GC interruptible.
2174 // Therefore the block containing the tail call should be a GC safe point to avoid
2175 // GC starvation. It is legal for the block to be unmarked iff the entry block is a
2176 // GC safe point, as the entry block trivially dominates every reachable block.
2177 assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT));
2179 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
2180 // a method returns. This is a case of caller method has both PInvokes and tail calls.
2181 if (comp->info.compCallUnmanaged)
2183 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
2186 // Remove gtCallAddr from execution order if present.
2187 if (call->gtCallType == CT_INDIRECT)
2189 assert(call->gtCallAddr != nullptr);
2192 LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed);
2195 BlockRange().Remove(std::move(callAddrRange));
2198 // The callTarget tree needs to be sequenced.
2199 LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget);
2201 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
2203 // For ARM32 and AMD64, first argument is CopyRoutine and second argument is a place holder node.
2204 fgArgTabEntry* argEntry;
2207 argEntry = comp->gtArgEntryByArgNum(call, 0);
2208 assert(argEntry != nullptr);
2209 assert(argEntry->node->gtOper == GT_PUTARG_REG);
2210 GenTree* firstArg = argEntry->node->gtOp.gtOp1;
2211 assert(firstArg->gtOper == GT_CNS_INT);
2214 // Replace second arg by callTarget.
2215 argEntry = comp->gtArgEntryByArgNum(call, 1);
2216 assert(argEntry != nullptr);
2217 assert(argEntry->node->gtOper == GT_PUTARG_REG);
2218 GenTree* secondArg = argEntry->node->gtOp.gtOp1;
2220 ContainCheckRange(callTargetRange);
2221 BlockRange().InsertAfter(secondArg, std::move(callTargetRange));
2224 LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(secondArg, &isClosed);
2227 BlockRange().Remove(std::move(secondArgRange));
2229 argEntry->node->gtOp.gtOp1 = callTarget;
2231 #elif defined(_TARGET_X86_)
2233 // Verify the special args are what we expect, and replace the dummy args with real values.
2234 // We need to figure out the size of the outgoing stack arguments, not including the special args.
2235 // The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes.
2236 // This number is exactly the next slot number in the call's argument info struct.
2237 unsigned nNewStkArgsWords = call->fgArgInfo->GetNextSlotNum();
2238 assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args.
2239 nNewStkArgsWords -= 4;
2241 unsigned numArgs = call->fgArgInfo->ArgCount();
2243 fgArgTabEntry* argEntry;
2245 // arg 0 == callTarget.
2246 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1);
2247 assert(argEntry != nullptr);
2248 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2249 GenTree* arg0 = argEntry->node->gtOp.gtOp1;
2251 ContainCheckRange(callTargetRange);
2252 BlockRange().InsertAfter(arg0, std::move(callTargetRange));
2255 LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed);
2257 BlockRange().Remove(std::move(secondArgRange));
2259 argEntry->node->gtOp.gtOp1 = callTarget;
2262 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2);
2263 assert(argEntry != nullptr);
2264 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2265 GenTree* arg1 = argEntry->node->gtOp.gtOp1;
2266 assert(arg1->gtOper == GT_CNS_INT);
2268 ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
2269 (call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
2270 arg1->gtIntCon.gtIconVal = tailCallHelperFlags;
2272 // arg 2 == numberOfNewStackArgsWords
2273 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3);
2274 assert(argEntry != nullptr);
2275 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2276 GenTree* arg2 = argEntry->node->gtOp.gtOp1;
2277 assert(arg2->gtOper == GT_CNS_INT);
2279 arg2->gtIntCon.gtIconVal = nNewStkArgsWords;
2282 // arg 3 == numberOfOldStackArgsWords
2283 argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4);
2284 assert(argEntry != nullptr);
2285 assert(argEntry->node->gtOper == GT_PUTARG_STK);
2286 GenTree* arg3 = argEntry->node->gtOp.gtOp1;
2287 assert(arg3->gtOper == GT_CNS_INT);
2291 NYI("LowerTailCallViaHelper");
2294 // Transform this call node into a call to Jit tail call helper.
2295 call->gtCallType = CT_HELPER;
2296 call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL);
2297 call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
2299 // Lower this as if it were a pure helper call.
2300 call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER);
2301 GenTree* result = LowerDirectCall(call);
2303 // Now add back tail call flags for identifying this node as tail call dispatched via helper.
2304 call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_HELPER;
2306 #ifdef PROFILING_SUPPORTED
2307 // Insert profiler tail call hook if needed.
2308 // Since we don't know the insertion point, pass null for second param.
2309 if (comp->compIsProfilerHookNeeded())
2311 InsertProfTailCallHook(call, nullptr);
2313 #endif // PROFILING_SUPPORTED
2315 assert(call->IsTailCallViaHelper());
2320 #ifndef _TARGET_64BIT_
2321 //------------------------------------------------------------------------
2322 // Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node.
2325 // cmp - the compare node
2328 // The next node to lower.
2331 // This is done during lowering because DecomposeLongs handles only nodes
2332 // that produce TYP_LONG values. Compare nodes may consume TYP_LONG values
2333 // but produce TYP_INT values.
2335 GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
2337 assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG);
2339 GenTree* src1 = cmp->gtGetOp1();
2340 GenTree* src2 = cmp->gtGetOp2();
2341 assert(src1->OperIs(GT_LONG));
2342 assert(src2->OperIs(GT_LONG));
2343 GenTree* loSrc1 = src1->gtGetOp1();
2344 GenTree* hiSrc1 = src1->gtGetOp2();
2345 GenTree* loSrc2 = src2->gtGetOp1();
2346 GenTree* hiSrc2 = src2->gtGetOp2();
2347 BlockRange().Remove(src1);
2348 BlockRange().Remove(src2);
2350 genTreeOps condition = cmp->OperGet();
2354 if (cmp->OperIs(GT_EQ, GT_NE))
2357 // Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can
2358 // be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we
2359 // don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction.
2361 // XOR is used rather than SUB because it is commutative and thus allows swapping the operands when
2362 // the first happens to be a constant. Usually only the second compare operand is a constant but it's
2363 // still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast
2364 // then hiSrc1 would be 0.
2367 if (loSrc1->OperIs(GT_CNS_INT))
2369 std::swap(loSrc1, loSrc2);
2372 if (loSrc2->IsIntegralConst(0))
2374 BlockRange().Remove(loSrc2);
2379 loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2);
2380 BlockRange().InsertBefore(cmp, loCmp);
2381 ContainCheckBinary(loCmp->AsOp());
2384 if (hiSrc1->OperIs(GT_CNS_INT))
2386 std::swap(hiSrc1, hiSrc2);
2389 if (hiSrc2->IsIntegralConst(0))
2391 BlockRange().Remove(hiSrc2);
2396 hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2);
2397 BlockRange().InsertBefore(cmp, hiCmp);
2398 ContainCheckBinary(hiCmp->AsOp());
2401 hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp);
2402 BlockRange().InsertBefore(cmp, hiCmp);
2403 ContainCheckBinary(hiCmp->AsOp());
2407 assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT));
2410 // If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0).
2411 // If the compare is unsigned we can still use SUB but we need to check the Carry flag,
2412 // not the actual result. In both cases we can simply check the appropiate condition flags
2413 // and ignore the actual result:
2414 // SUB_LO loSrc1, loSrc2
2415 // SUB_HI hiSrc1, hiSrc2
2416 // SETCC|JCC (signed|unsigned LT|GE)
2417 // If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can
2418 // be turned into a CMP because the first SUB would have set carry to 0. This effectively
2419 // transforms a long compare against 0 into an int compare of the high part against 0.
2421 // (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value
2422 // is greater than 0 is not so easy. We need to turn this into a positive/negative check
2423 // like the one we get for LT|GE compares, this can be achieved by swapping the compare:
2424 // (x LE|GT y) becomes (y GE|LT x)
2426 // Having to swap operands is problematic when the second operand is a constant. The constant
2427 // moves to the first operand where it cannot be contained and thus needs a register. This can
2428 // be avoided by changing the constant such that LE|GT becomes LT|GE:
2429 // (x LE|GT 41) becomes (x LT|GE 42)
2432 if (cmp->OperIs(GT_LE, GT_GT))
2434 bool mustSwap = true;
2436 if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT))
2438 uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue());
2439 uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue());
2440 uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32);
2441 uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX;
2443 if (value != maxValue)
2446 loValue = value & UINT32_MAX;
2447 hiValue = (value >> 32) & UINT32_MAX;
2448 loSrc2->AsIntCon()->SetIconValue(loValue);
2449 hiSrc2->AsIntCon()->SetIconValue(hiValue);
2451 condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE;
2458 std::swap(loSrc1, loSrc2);
2459 std::swap(hiSrc1, hiSrc2);
2460 condition = GenTree::SwapRelop(condition);
2464 assert((condition == GT_LT) || (condition == GT_GE));
2466 if (loSrc2->IsIntegralConst(0))
2468 BlockRange().Remove(loSrc2);
2470 // Very conservative dead code removal... but it helps.
2472 if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD))
2474 BlockRange().Remove(loSrc1);
2478 loSrc1->SetUnusedValue();
2481 hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2);
2482 BlockRange().InsertBefore(cmp, hiCmp);
2483 ContainCheckCompare(hiCmp->AsOp());
2487 loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2);
2488 hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2);
2489 BlockRange().InsertBefore(cmp, loCmp, hiCmp);
2490 ContainCheckCompare(loCmp->AsOp());
2491 ContainCheckBinary(hiCmp->AsOp());
2494 // Try to move the first SUB_HI operands right in front of it, this allows using
2495 // a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do
2496 // this only for locals as they won't change condition flags. Note that we could
2497 // move constants (except 0 which generates XOR reg, reg) but it's extremly rare
2498 // to have a constant as the first operand.
2501 if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD))
2503 BlockRange().Remove(hiSrc1);
2504 BlockRange().InsertBefore(hiCmp, hiSrc1);
2509 hiCmp->gtFlags |= GTF_SET_FLAGS;
2510 if (hiCmp->IsValue())
2512 hiCmp->SetUnusedValue();
2516 if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
2518 BlockRange().Remove(cmp);
2520 GenTree* jcc = cmpUse.User();
2521 jcc->gtOp.gtOp1 = nullptr;
2522 jcc->ChangeOper(GT_JCC);
2523 jcc->gtFlags |= GTF_USE_FLAGS;
2524 jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
2528 cmp->gtOp.gtOp1 = nullptr;
2529 cmp->gtOp.gtOp2 = nullptr;
2530 cmp->ChangeOper(GT_SETCC);
2531 cmp->gtFlags |= GTF_USE_FLAGS;
2532 cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
2537 #endif // !_TARGET_64BIT_
2539 //------------------------------------------------------------------------
2540 // Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations.
2543 // cmp - the compare node
2546 // The original compare node if lowering should proceed as usual or the next node
2547 // to lower if the compare node was changed in such a way that lowering is no
2551 // - Narrow operands to enable memory operand containment (XARCH specific).
2552 // - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could
2553 // be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added).
2554 // - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific)
2555 // - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the
2556 // condition flags appropriately (XARCH/ARM64 specific but could be extended
2557 // to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS).
2559 GenTree* Lowering::OptimizeConstCompare(GenTree* cmp)
2561 assert(cmp->gtGetOp2()->IsIntegralConst());
2563 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
2564 GenTree* op1 = cmp->gtGetOp1();
2565 GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
2566 ssize_t op2Value = op2->IconValue();
2568 #ifdef _TARGET_XARCH_
2569 var_types op1Type = op1->TypeGet();
2570 if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genSmallTypeCanRepresentValue(op1Type, op2Value))
2573 // If op1's type is small then try to narrow op2 so it has the same type as op1.
2574 // Small types are usually used by memory loads and if both compare operands have
2575 // the same type then the memory load can be contained. In certain situations
2576 // (e.g "cmp ubyte, 200") we also get a smaller instruction encoding.
2579 op2->gtType = op1Type;
2583 if (op1->OperIs(GT_CAST) && !op1->gtOverflow())
2585 GenTreeCast* cast = op1->AsCast();
2586 var_types castToType = cast->CastToType();
2587 GenTree* castOp = cast->gtGetOp1();
2589 if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value))
2592 // Since we're going to remove the cast we need to be able to narrow the cast operand
2593 // to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR).
2594 // Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but
2595 // doing so would produce incorrect results (e.g. RSZ, RSH).
2597 // The below list of handled opers is conservative but enough to handle the most common
2598 // situations. In particular this include CALL, sometimes the JIT unnecessarilly widens
2599 // the result of bool returning calls.
2602 #ifdef _TARGET_ARM64_
2603 (op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) &&
2605 (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical()
2606 #ifdef _TARGET_XARCH_
2607 || IsContainableMemoryOp(castOp)
2613 assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
2615 #ifdef _TARGET_ARM64_
2616 bool cmpEq = cmp->OperIs(GT_EQ);
2618 cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE);
2619 op2->SetIconValue(0xff);
2620 op2->gtType = castOp->gtType;
2622 castOp->gtType = castToType;
2623 op2->gtType = castToType;
2625 // If we have any contained memory ops on castOp, they must now not be contained.
2626 if (castOp->OperIsLogical())
2628 GenTree* op1 = castOp->gtGetOp1();
2629 if ((op1 != nullptr) && !op1->IsCnsIntOrI())
2631 op1->ClearContained();
2633 GenTree* op2 = castOp->gtGetOp2();
2634 if ((op2 != nullptr) && !op2->IsCnsIntOrI())
2636 op2->ClearContained();
2639 cmp->gtOp.gtOp1 = castOp;
2641 BlockRange().Remove(cast);
2645 else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE))
2648 // Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible.
2651 GenTree* andOp1 = op1->gtGetOp1();
2652 GenTree* andOp2 = op1->gtGetOp2();
2657 // If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
2658 // into ((x AND mask) NE|EQ 0) when mask is a single bit.
2661 if (isPow2(static_cast<size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
2664 op2->SetIconValue(0);
2665 cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
2671 BlockRange().Remove(op1);
2672 BlockRange().Remove(op2);
2674 cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE);
2675 cmp->gtOp.gtOp1 = andOp1;
2676 cmp->gtOp.gtOp2 = andOp2;
2677 // We will re-evaluate containment below
2678 andOp1->ClearContained();
2679 andOp2->ClearContained();
2681 #ifdef _TARGET_XARCH_
2682 if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
2685 // For "test" we only care about the bits that are set in the second operand (mask).
2686 // If the mask fits in a small type then we can narrow both operands to generate a "test"
2687 // instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid
2688 // a widening load in some cases.
2690 // For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches
2691 // the behavior of a previous implementation and avoids adding more cases where we generate
2692 // 16 bit instructions that require a length changing prefix (0x66). These suffer from
2693 // significant decoder stalls on Intel CPUs.
2695 // We could also do this for 64 bit masks that fit into 32 bit but it doesn't help.
2696 // In such cases morph narrows down the existing GT_AND by inserting a cast between it and
2697 // the memory operand so we'd need to add more code to recognize and eliminate that cast.
2700 size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue());
2702 if (FitsIn<UINT8>(mask))
2704 andOp1->gtType = TYP_UBYTE;
2705 andOp2->gtType = TYP_UBYTE;
2707 else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2)
2709 andOp1->gtType = TYP_USHORT;
2710 andOp2->gtType = TYP_USHORT;
2717 if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE))
2719 #ifdef _TARGET_XARCH_
2721 // Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT
2722 // results in smaller and faster code. It also doesn't have special register
2723 // requirements, unlike LSH that requires the shift count to be in ECX.
2724 // Note that BT has the same behavior as LSH when the bit index exceeds the
2725 // operand bit size - it uses (bit_index MOD bit_size).
2728 GenTree* lsh = cmp->gtGetOp2();
2731 if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) &&
2732 BlockRange().TryGetUse(cmp, &cmpUse))
2734 GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC;
2736 cmp->SetOper(GT_BT);
2737 cmp->gtType = TYP_VOID;
2738 cmp->gtFlags |= GTF_SET_FLAGS;
2739 cmp->gtOp.gtOp2 = lsh->gtGetOp2();
2740 cmp->gtGetOp2()->ClearContained();
2742 BlockRange().Remove(lsh->gtGetOp1());
2743 BlockRange().Remove(lsh);
2747 if (cmpUse.User()->OperIs(GT_JTRUE))
2749 cmpUse.User()->ChangeOper(GT_JCC);
2750 cc = cmpUse.User()->AsCC();
2751 cc->gtCondition = condition;
2755 cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
2756 BlockRange().InsertAfter(cmp, cc);
2757 cmpUse.ReplaceWith(comp, cc);
2760 cc->gtFlags |= GTF_USE_FLAGS;
2764 #endif // _TARGET_XARCH_
2766 else if (cmp->OperIs(GT_EQ, GT_NE))
2768 GenTree* op1 = cmp->gtGetOp1();
2769 GenTree* op2 = cmp->gtGetOp2();
2771 // TODO-CQ: right now the below peep is inexpensive and gets the benefit in most
2772 // cases because in majority of cases op1, op2 and cmp would be in that order in
2773 // execution. In general we should be able to check that all the nodes that come
2774 // after op1 do not modify the flags so that it is safe to avoid generating a
2775 // test instruction.
2777 if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) &&
2778 #ifdef _TARGET_XARCH_
2779 op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG))
2780 #else // _TARGET_ARM64_
2781 op1->OperIs(GT_AND, GT_ADD, GT_SUB))
2784 op1->gtFlags |= GTF_SET_FLAGS;
2785 op1->SetUnusedValue();
2787 BlockRange().Remove(op2);
2789 GenTree* next = cmp->gtNext;
2794 // Fast check for the common case - relop used by a JTRUE that immediately follows it.
2795 if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp))
2800 BlockRange().Remove(cmp);
2802 else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
2807 BlockRange().Remove(cmp);
2809 else // The relop is not used by a JTRUE or it is not used at all.
2811 // Transform the relop node it into a SETCC. If it's not used we could remove
2812 // it completely but that means doing more work to handle a rare case.
2817 GenCondition condition = GenCondition::FromIntegralRelop(cmp);
2818 cc->ChangeOper(ccOp);
2819 cc->AsCC()->gtCondition = condition;
2820 cc->gtFlags |= GTF_USE_FLAGS;
2825 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
2830 //------------------------------------------------------------------------
2831 // Lowering::LowerCompare: Lowers a compare node.
2834 // cmp - the compare node
2837 // The next node to lower.
2839 GenTree* Lowering::LowerCompare(GenTree* cmp)
2841 #ifndef _TARGET_64BIT_
2842 if (cmp->gtGetOp1()->TypeGet() == TYP_LONG)
2844 return DecomposeLongCompare(cmp);
2848 if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts())
2850 GenTree* next = OptimizeConstCompare(cmp);
2852 // If OptimizeConstCompare return the compare node as "next" then we need to continue lowering.
2859 #ifdef _TARGET_XARCH_
2860 if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet())
2862 if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet()))
2865 // If both operands have the same type then codegen will use the common operand type to
2866 // determine the instruction type. For small types this would result in performing a
2867 // signed comparison of two small unsigned values without zero extending them to TYP_INT
2868 // which is incorrect. Note that making the comparison unsigned doesn't imply that codegen
2869 // has to generate a small comparison, it can still correctly generate a TYP_INT comparison.
2872 cmp->gtFlags |= GTF_UNSIGNED;
2875 #endif // _TARGET_XARCH_
2876 ContainCheckCompare(cmp->AsOp());
2880 //------------------------------------------------------------------------
2881 // Lowering::LowerJTrue: Lowers a JTRUE node.
2884 // jtrue - the JTRUE node
2887 // The next node to lower (usually nullptr).
2890 // On ARM64 this may remove the JTRUE node and transform its associated
2891 // relop into a JCMP node.
2893 GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
2895 #ifdef _TARGET_ARM64_
2896 GenTree* relop = jtrue->gtGetOp1();
2897 GenTree* relopOp2 = relop->gtOp.gtGetOp2();
2899 if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI())
2901 bool useJCMP = false;
2904 if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0))
2906 // Codegen will use cbz or cbnz in codegen which do not affect the flag register
2907 flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : 0;
2910 else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue()))
2912 // Codegen will use tbz or tbnz in codegen which do not affect the flag register
2913 flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : 0);
2919 relop->SetOper(GT_JCMP);
2920 relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ);
2921 relop->gtFlags |= flags;
2922 relop->gtType = TYP_VOID;
2924 relopOp2->SetContained();
2926 BlockRange().Remove(jtrue);
2928 assert(relop->gtNext == nullptr);
2932 #endif // _TARGET_ARM64_
2934 ContainCheckJTrue(jtrue);
2936 assert(jtrue->gtNext == nullptr);
2940 // Lower "jmp <method>" tail call to insert PInvoke method epilog if required.
2941 void Lowering::LowerJmpMethod(GenTree* jmp)
2943 assert(jmp->OperGet() == GT_JMP);
2945 JITDUMP("lowering GT_JMP\n");
2947 JITDUMP("============");
2949 // If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
2950 // a method returns.
2951 if (comp->info.compCallUnmanaged)
2953 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp));
2957 // Lower GT_RETURN node to insert PInvoke method epilog if required.
2958 void Lowering::LowerRet(GenTree* ret)
2960 assert(ret->OperGet() == GT_RETURN);
2962 JITDUMP("lowering GT_RETURN\n");
2964 JITDUMP("============");
2966 #if defined(_TARGET_AMD64_) && defined(FEATURE_SIMD)
2967 GenTreeUnOp* const unOp = ret->AsUnOp();
2968 if ((unOp->TypeGet() == TYP_LONG) && (unOp->gtOp1->TypeGet() == TYP_SIMD8))
2970 GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, TYP_LONG, unOp->gtOp1, nullptr);
2971 unOp->gtOp1 = bitcast;
2972 BlockRange().InsertBefore(unOp, bitcast);
2974 #endif // _TARGET_AMD64_
2976 // Method doing PInvokes has exactly one return block unless it has tail calls.
2977 if (comp->info.compCallUnmanaged && (comp->compCurBB == comp->genReturnBB))
2979 InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret));
2981 ContainCheckRet(ret->AsOp());
2984 GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
2986 noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
2988 // Don't support tail calling helper methods.
2989 // But we might encounter tail calls dispatched via JIT helper appear as a tail call to helper.
2990 noway_assert(!call->IsTailCall() || call->IsTailCallViaHelper() || call->gtCallType == CT_USER_FUNC);
2992 // Non-virtual direct/indirect calls: Work out if the address of the
2993 // call is known at JIT time. If not it is either an indirect call
2994 // or the address must be accessed via an single/double indirection.
2997 InfoAccessType accessType;
2998 CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd);
3000 #ifdef FEATURE_READYTORUN_COMPILER
3001 if (call->gtEntryPoint.addr != nullptr)
3003 accessType = call->gtEntryPoint.accessType;
3004 addr = call->gtEntryPoint.addr;
3008 if (call->gtCallType == CT_HELPER)
3010 noway_assert(helperNum != CORINFO_HELP_UNDEF);
3012 // the convention on getHelperFtn seems to be (it's not documented)
3013 // that it returns an address or if it returns null, pAddr is set to
3014 // another address, which requires an indirection
3016 addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr);
3018 if (addr != nullptr)
3020 assert(pAddr == nullptr);
3021 accessType = IAT_VALUE;
3025 accessType = IAT_PVALUE;
3031 noway_assert(helperNum == CORINFO_HELP_UNDEF);
3033 CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
3035 if (call->IsSameThis())
3037 aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
3040 if (!call->NeedsNullCheck())
3042 aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
3045 CORINFO_CONST_LOOKUP addrInfo;
3046 comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags);
3048 accessType = addrInfo.accessType;
3049 addr = addrInfo.addr;
3052 GenTree* result = nullptr;
3056 // Non-virtual direct call to known address
3057 if (!IsCallTargetInRange(addr) || call->IsTailCall())
3059 result = AddrGen(addr);
3063 // a direct call within range of hardware relative call instruction
3064 // stash the address for codegen
3065 call->gtDirectCallAddress = addr;
3071 // Non-virtual direct calls to addresses accessed by
3072 // a single indirection.
3073 GenTree* cellAddr = AddrGen(addr);
3074 GenTree* indir = Ind(cellAddr);
3080 // Non-virtual direct calls to addresses accessed by
3081 // a double indirection.
3083 // Double-indirection. Load the address into a register
3084 // and call indirectly through the register
3085 noway_assert(helperNum == CORINFO_HELP_UNDEF);
3086 result = AddrGen(addr);
3087 result = Ind(Ind(result));
3092 // Non-virtual direct calls to addresses accessed by
3093 // a single relative indirection.
3094 GenTree* cellAddr = AddrGen(addr);
3095 GenTree* indir = Ind(cellAddr);
3096 result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr));
3101 noway_assert(!"Bad accessType");
3108 GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
3110 noway_assert(call->gtCallType == CT_USER_FUNC);
3112 assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) &
3113 (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
3115 GenTree* thisArgNode;
3116 if (call->IsTailCallViaHelper())
3118 #ifdef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
3119 const unsigned argNum = 0;
3120 #else // !_TARGET_X86_
3121 // In case of helper dispatched tail calls, "thisptr" will be the third arg.
3122 // The first two args are: real call target and addr of args copy routine.
3123 const unsigned argNum = 2;
3124 #endif // !_TARGET_X86_
3126 fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum);
3127 thisArgNode = thisArgTabEntry->node;
3131 thisArgNode = comp->gtGetThisArg(call);
3134 assert(thisArgNode->gtOper == GT_PUTARG_REG);
3135 GenTree* originalThisExpr = thisArgNode->gtOp.gtOp1;
3136 GenTree* thisExpr = originalThisExpr;
3138 // We're going to use the 'this' expression multiple times, so make a local to copy it.
3143 if (call->IsTailCallViaHelper() && originalThisExpr->IsLocal())
3145 // For ordering purposes for the special tailcall arguments on x86, we forced the
3146 // 'this' pointer in this case to a local in Compiler::fgMorphTailCall().
3147 // We could possibly use this case to remove copies for all architectures and non-tailcall
3148 // calls by creating a new lcl var or lcl field reference, as is done in the
3149 // LowerVirtualVtableCall() code.
3150 assert(originalThisExpr->OperGet() == GT_LCL_VAR);
3151 lclNum = originalThisExpr->AsLclVarCommon()->GetLclNum();
3154 #endif // _TARGET_X86_
3156 unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
3158 LIR::Use thisExprUse(BlockRange(), &thisArgNode->gtOp.gtOp1, thisArgNode);
3159 ReplaceWithLclVar(thisExprUse, delegateInvokeTmp);
3161 thisExpr = thisExprUse.Def(); // it's changed; reload it.
3162 lclNum = delegateInvokeTmp;
3165 // replace original expression feeding into thisPtr with
3166 // [originalThis + offsetOfDelegateInstance]
3168 GenTree* newThisAddr = new (comp, GT_LEA)
3169 GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance);
3171 GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
3173 BlockRange().InsertAfter(thisExpr, newThisAddr, newThis);
3175 thisArgNode->gtOp.gtOp1 = newThis;
3176 ContainCheckIndir(newThis->AsIndir());
3178 // the control target is
3179 // [originalThis + firstTgtOffs]
3181 GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(originalThisExpr->TypeGet(), lclNum);
3183 unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget;
3184 GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
3185 GenTree* callTarget = Ind(result);
3187 // don't need to sequence and insert this tree, caller will do it
3192 GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call)
3195 if (call->gtCallCookie != nullptr)
3197 NYI_X86("Morphing indirect non-virtual call with non-standard args");
3201 // Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args.
3202 // Hence we should never see this type of call in lower.
3204 noway_assert(call->gtCallCookie == nullptr);
3209 //------------------------------------------------------------------------
3210 // CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke
3211 // epilogs to invoke a GC under a condition. The return trap checks some global
3212 // location (the runtime tells us where that is and how many indirections to make),
3213 // then, based on the result, conditionally calls a GC helper. We use a special node
3214 // for this because at this time (late in the compilation phases), introducing flow
3215 // is tedious/difficult.
3217 // This is used for PInvoke inlining.
3220 // Code tree to perform the action.
3222 GenTree* Lowering::CreateReturnTrapSeq()
3224 // The GT_RETURNTRAP node expands to this:
3225 // if (g_TrapReturningThreads)
3227 // RareDisablePreemptiveGC();
3230 // The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'.
3232 void* pAddrOfCaptureThreadGlobal = nullptr;
3233 LONG* addrOfCaptureThreadGlobal = comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal);
3236 if (addrOfCaptureThreadGlobal != nullptr)
3238 testTree = Ind(AddrGen(addrOfCaptureThreadGlobal));
3242 testTree = Ind(Ind(AddrGen(pAddrOfCaptureThreadGlobal)));
3244 return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, testTree);
3247 //------------------------------------------------------------------------
3248 // SetGCState: Create a tree that stores the given constant (0 or 1) into the
3249 // thread's GC state field.
3251 // This is used for PInvoke inlining.
3254 // state - constant (0 or 1) to store into the thread's GC state field.
3257 // Code tree to perform the action.
3259 GenTree* Lowering::SetGCState(int state)
3261 // Thread.offsetOfGcState = 0/1
3263 assert(state == 0 || state == 1);
3265 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3267 GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(TYP_I_IMPL, comp->info.compLvFrameListRoot);
3269 GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state);
3270 GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState);
3271 GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode);
3272 return storeGcState;
3275 //------------------------------------------------------------------------
3276 // CreateFrameLinkUpdate: Create a tree that either links or unlinks the
3277 // locally-allocated InlinedCallFrame from the Frame list.
3279 // This is used for PInvoke inlining.
3282 // action - whether to link (push) or unlink (pop) the Frame
3285 // Code tree to perform the action.
3287 GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
3289 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3290 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
3292 GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
3295 GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
3297 GenTree* data = nullptr;
3299 if (action == PushFrame)
3301 // Thread->m_pFrame = &inlinedCallFrame;
3302 data = new (comp, GT_LCL_FLD_ADDR)
3303 GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
3307 assert(action == PopFrame);
3308 // Thread->m_pFrame = inlinedCallFrame.m_pNext;
3310 data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar,
3311 pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
3313 GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
3317 //------------------------------------------------------------------------
3318 // InsertPInvokeMethodProlog: Create the code that runs at the start of
3319 // every method that has PInvoke calls.
3321 // Initialize the TCB local and the InlinedCallFrame object. Then link ("push")
3322 // the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame
3323 // is defined in vm/frames.h. See also vm/jitinterface.cpp for more information.
3324 // The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo().
3326 // The (current) layout is as follows:
3328 // 64-bit 32-bit CORINFO_EE_INFO
3329 // offset offset field name offset when set
3330 // -----------------------------------------------------------------------------------------
3331 // +00h +00h GS cookie offsetOfGSCookie
3332 // +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog
3333 // +10h +08h m_Next offsetOfFrameLink method prolog
3334 // +18h +0Ch m_Datum offsetOfCallTarget call site
3335 // +20h n/a m_StubSecretArg not set by JIT
3336 // +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method
3338 // non-x86: method prolog (SP remains
3339 // constant in function, after prolog: no
3340 // localloc and PInvoke in same function)
3341 // +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site
3342 // +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT
3343 // +1Ch JIT retval spill area (int) before call_gc ???
3344 // +20h JIT retval spill area (long) before call_gc ???
3345 // +24h Saved value of EBP method prolog ???
3347 // Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points
3348 // to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before*
3349 // the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location,
3350 // and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie.
3355 void Lowering::InsertPInvokeMethodProlog()
3357 noway_assert(comp->info.compCallUnmanaged);
3358 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3360 if (comp->opts.ShouldUsePInvokeHelpers())
3365 JITDUMP("======= Inserting PInvoke method prolog\n");
3367 // The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog.
3368 assert(comp->fgFirstBBisScratch());
3370 LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB);
3372 const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
3373 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
3375 // First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr
3377 GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR)
3378 GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
3380 // Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list:
3381 // TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg);
3382 // for x86, don't pass the secretArg.
3383 CLANG_FORMAT_COMMENT_ANCHOR;
3385 #if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
3386 GenTreeArgList* argList = comp->gtNewArgList(frameAddr);
3388 GenTreeArgList* argList = comp->gtNewArgList(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
3391 GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList);
3393 // some sanity checks on the frame list root vardsc
3394 LclVarDsc* varDsc = &comp->lvaTable[comp->info.compLvFrameListRoot];
3395 noway_assert(!varDsc->lvIsParam);
3396 noway_assert(varDsc->lvType == TYP_I_IMPL);
3399 new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
3400 store->gtOp.gtOp1 = call;
3401 store->gtFlags |= GTF_VAR_DEF;
3403 GenTree* const insertionPoint = firstBlockRange.FirstNonPhiOrCatchArgNode();
3405 comp->fgMorphTree(store);
3406 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store));
3407 DISPTREERANGE(firstBlockRange, store);
3409 #if !defined(_TARGET_X86_) && !defined(_TARGET_ARM_)
3410 // For x86, this step is done at the call site (due to stack pointer not being static in the function).
3411 // For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
3413 // --------------------------------------------------------
3414 // InlinedCallFrame.m_pCallSiteSP = @RSP;
3416 GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD)
3417 GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
3418 storeSP->gtOp1 = PhysReg(REG_SPBASE);
3419 storeSP->gtFlags |= GTF_VAR_DEF;
3421 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP));
3422 DISPTREERANGE(firstBlockRange, storeSP);
3424 #endif // !defined(_TARGET_X86_) && !defined(_TARGET_ARM_)
3426 #if !defined(_TARGET_ARM_)
3427 // For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
3429 // --------------------------------------------------------
3430 // InlinedCallFrame.m_pCalleeSavedEBP = @RBP;
3432 GenTreeLclFld* storeFP =
3433 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3434 callFrameInfo.offsetOfCalleeSavedFP);
3435 storeFP->gtOp1 = PhysReg(REG_FPBASE);
3436 storeFP->gtFlags |= GTF_VAR_DEF;
3438 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP));
3439 DISPTREERANGE(firstBlockRange, storeFP);
3440 #endif // !defined(_TARGET_ARM_)
3442 // --------------------------------------------------------
3443 // On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto
3444 // the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame.
3445 CLANG_FORMAT_COMMENT_ANCHOR;
3447 #ifdef _TARGET_64BIT_
3448 if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3450 // Push a frame - if we are NOT in an IL stub, this is done right before the call
3451 // The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack
3452 GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
3453 firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
3454 ContainCheckStoreIndir(frameUpd->AsIndir());
3455 DISPTREERANGE(firstBlockRange, frameUpd);
3457 #endif // _TARGET_64BIT_
3460 //------------------------------------------------------------------------
3461 // InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method
3462 // that has PInvoke inlines. This needs to be inserted any place you can exit the
3463 // function: returns, tailcalls and jmps.
3466 // returnBB - basic block from which a method can return
3467 // lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg)
3470 // Code tree to perform the action.
3472 void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr))
3474 assert(returnBB != nullptr);
3475 assert(comp->info.compCallUnmanaged);
3477 if (comp->opts.ShouldUsePInvokeHelpers())
3482 JITDUMP("======= Inserting PInvoke method epilog\n");
3484 // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls.
3485 assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) ||
3486 returnBB->endsWithTailCallOrJmp(comp));
3488 LIR::Range& returnBlockRange = LIR::AsRange(returnBB);
3490 GenTree* insertionPoint = returnBlockRange.LastNode();
3491 assert(insertionPoint == lastExpr);
3493 // Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution
3494 // order so that it is guaranteed that there will be no further PInvokes after that point in the method.
3496 // Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be
3497 // Op1, PME, GT_RETURN
3499 // Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be
3500 // arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL
3501 // After inserting PME execution order would be:
3502 // arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL
3504 // Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP
3505 // That is after PME, args for GT_JMP call will be setup.
3507 // TODO-Cleanup: setting GCState to 1 seems to be redundant as InsertPInvokeCallProlog will set it to zero before a
3508 // PInvoke call and InsertPInvokeCallEpilog() will set it back to 1 after the PInvoke. Though this is redundant,
3510 // Note that liveness is artificially extending the life of compLvFrameListRoot var if the method being compiled has
3511 // PInvokes. Deleting the below stmnt would cause an an assert in lsra.cpp::SetLastUses() since compLvFrameListRoot
3512 // will be live-in to a BBJ_RETURN block without any uses. Long term we need to fix liveness for x64 case to
3513 // properly extend the life of compLvFrameListRoot var.
3515 // Thread.offsetOfGcState = 0/1
3516 // That is [tcb + offsetOfGcState] = 1
3517 GenTree* storeGCState = SetGCState(1);
3518 returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeGCState));
3519 ContainCheckStoreIndir(storeGCState->AsIndir());
3521 // Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do
3522 // this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call.
3523 CLANG_FORMAT_COMMENT_ANCHOR;
3525 #ifdef _TARGET_64BIT_
3526 if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3527 #endif // _TARGET_64BIT_
3529 GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame);
3530 returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
3531 ContainCheckStoreIndir(frameUpd->AsIndir());
3535 //------------------------------------------------------------------------
3536 // InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code.
3537 // It does all the necessary call-site setup of the InlinedCallFrame.
3540 // call - the call for which we are inserting the PInvoke prolog.
3545 void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
3547 JITDUMP("======= Inserting PInvoke call prolog\n");
3549 GenTree* insertBefore = call;
3550 if (call->gtCallType == CT_INDIRECT)
3553 insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
3557 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
3559 gtCallTypes callType = (gtCallTypes)call->gtCallType;
3561 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3563 if (comp->opts.ShouldUsePInvokeHelpers())
3565 // First argument is the address of the frame variable.
3566 GenTree* frameAddr =
3567 new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
3569 // Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN
3570 GenTree* helperCall =
3571 comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, comp->gtNewArgList(frameAddr));
3573 comp->fgMorphTree(helperCall);
3574 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall));
3575 LowerNode(helperCall); // helper call is inserted before current node and should be lowered here.
3579 // Emit the following sequence:
3581 // InlinedCallFrame.callTarget = methodHandle // stored in m_Datum
3582 // InlinedCallFrame.m_pCallSiteSP = SP // x86 only
3583 // InlinedCallFrame.m_pCallerReturnAddress = return address
3584 // GT_START_PREEEMPTC
3585 // Thread.gcState = 0
3586 // (non-stub) - update top Frame on TCB // 64-bit targets only
3588 // ----------------------------------------------------------------------------------
3589 // Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it).
3590 // The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings.
3592 GenTree* src = nullptr;
3594 if (callType == CT_INDIRECT)
3596 #if !defined(_TARGET_64BIT_)
3597 // On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum.
3598 const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotNum() * TARGET_POINTER_SIZE;
3600 src = comp->gtNewIconNode(numStkArgBytes, TYP_INT);
3602 // On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum.
3603 // If the stub parameter value is not needed, m_Datum will be initialized by the VM.
3604 if (comp->info.compPublishStubParam)
3606 src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL);
3608 #endif // !defined(_TARGET_64BIT_)
3612 assert(callType == CT_USER_FUNC);
3614 void* pEmbedMethodHandle = nullptr;
3615 CORINFO_METHOD_HANDLE embedMethodHandle =
3616 comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle);
3618 noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle));
3620 if (embedMethodHandle != nullptr)
3622 // InlinedCallFrame.callSiteTarget = methodHandle
3623 src = AddrGen(embedMethodHandle);
3627 // InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle
3628 src = Ind(AddrGen(pEmbedMethodHandle));
3634 // Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget.
3635 GenTreeLclFld* store =
3636 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3637 callFrameInfo.offsetOfCallTarget);
3639 store->gtFlags |= GTF_VAR_DEF;
3641 InsertTreeBeforeAndContainCheck(insertBefore, store);
3646 // ----------------------------------------------------------------------------------
3647 // InlinedCallFrame.m_pCallSiteSP = SP
3649 GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD)
3650 GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
3652 storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE);
3653 storeCallSiteSP->gtFlags |= GTF_VAR_DEF;
3655 InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP);
3659 // ----------------------------------------------------------------------------------
3660 // InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call)
3662 GenTreeLclFld* storeLab =
3663 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3664 callFrameInfo.offsetOfReturnAddress);
3666 storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
3667 storeLab->gtFlags |= GTF_VAR_DEF;
3669 InsertTreeBeforeAndContainCheck(insertBefore, storeLab);
3671 // Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method
3672 // contains PInvokes; on 64-bit targets this is necessary in non-stubs.
3673 CLANG_FORMAT_COMMENT_ANCHOR;
3675 #ifdef _TARGET_64BIT_
3676 if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3678 // Set the TCB's frame to be the one we just created.
3679 // Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
3680 // has prepended it to the linked list to maintain the stack of Frames.
3682 // Stubs do this once per stub, not once per call.
3683 GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
3684 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd));
3685 ContainCheckStoreIndir(frameUpd->AsIndir());
3687 #endif // _TARGET_64BIT_
3689 // IMPORTANT **** This instruction must be the last real instruction ****
3690 // It changes the thread's state to Preemptive mode
3691 // ----------------------------------------------------------------------------------
3692 // [tcb + offsetOfGcState] = 0
3693 GenTree* storeGCState = SetGCState(0);
3694 BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState));
3695 ContainCheckStoreIndir(storeGCState->AsIndir());
3697 // Indicate that codegen has switched this thread to preemptive GC.
3698 // This tree node doesn't generate any code, but impacts LSRA and gc reporting.
3699 // This tree node is simple so doesn't require sequencing.
3700 GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID);
3701 BlockRange().InsertBefore(insertBefore, preemptiveGCNode);
3704 //------------------------------------------------------------------------
3705 // InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call.
3708 // call - the call for which we are inserting the PInvoke epilog.
3713 void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
3715 JITDUMP("======= Inserting PInvoke call epilog\n");
3717 if (comp->opts.ShouldUsePInvokeHelpers())
3719 noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
3721 // First argument is the address of the frame variable.
3722 GenTree* frameAddr =
3723 new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
3724 frameAddr->SetOperRaw(GT_LCL_VAR_ADDR);
3726 // Insert call to CORINFO_HELP_JIT_PINVOKE_END
3727 GenTreeCall* helperCall =
3728 comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewArgList(frameAddr));
3730 comp->fgMorphTree(helperCall);
3731 BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall));
3732 ContainCheckCallOperands(helperCall);
3737 GenTree* insertionPoint = call->gtNext;
3739 GenTree* tree = SetGCState(1);
3740 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3741 ContainCheckStoreIndir(tree->AsIndir());
3743 tree = CreateReturnTrapSeq();
3744 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3745 ContainCheckReturnTrap(tree->AsOp());
3747 // Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi
3748 // happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive.
3749 CLANG_FORMAT_COMMENT_ANCHOR;
3751 #ifdef _TARGET_64BIT_
3752 if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
3754 tree = CreateFrameLinkUpdate(PopFrame);
3755 BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
3756 ContainCheckStoreIndir(tree->AsIndir());
3759 const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
3761 // ----------------------------------------------------------------------------------
3762 // InlinedCallFrame.m_pCallerReturnAddress = nullptr
3764 GenTreeLclFld* const storeCallSiteTracker =
3765 new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
3766 callFrameInfo.offsetOfReturnAddress);
3768 GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
3770 storeCallSiteTracker->gtOp1 = constantZero;
3771 storeCallSiteTracker->gtFlags |= GTF_VAR_DEF;
3773 BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker);
3774 ContainCheckStoreLoc(storeCallSiteTracker);
3775 #endif // _TARGET_64BIT_
3778 //------------------------------------------------------------------------
3779 // LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call
3782 // call - The call to lower.
3785 // The lowered call tree.
3787 GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
3789 // PInvoke lowering varies depending on the flags passed in by the EE. By default,
3790 // GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified,
3791 // GC transitions are instead performed using helper calls. Examples of each case are given
3792 // below. Note that the data structure that is used to store information about a call frame
3793 // containing any P/Invoke calls is initialized in the method prolog (see
3794 // InsertPInvokeMethod{Prolog,Epilog} for details).
3796 // Inline transitions:
3797 // InlinedCallFrame inlinedCallFrame;
3801 // // Set up frame information
3802 // inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum
3803 // inlinedCallFrame.m_pCallSiteSP = SP; // x86 only
3804 // inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the
3806 // Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only)
3808 // // Switch the thread's GC mode to preemptive mode
3809 // thread->m_fPreemptiveGCDisabled = 0;
3811 // // Call the unmanaged method
3814 // // Switch the thread's GC mode back to cooperative mode
3815 // thread->m_fPreemptiveGCDisabled = 1;
3817 // // Rendezvous with a running collection if necessary
3818 // if (g_TrapReturningThreads)
3819 // RareDisablePreemptiveGC();
3821 // Transistions using helpers:
3823 // OpaqueFrame opaqueFrame;
3827 // // Call the JIT_PINVOKE_BEGIN helper
3828 // JIT_PINVOKE_BEGIN(&opaqueFrame);
3830 // // Call the unmanaged method
3833 // // Call the JIT_PINVOKE_END helper
3834 // JIT_PINVOKE_END(&opaqueFrame);
3836 // Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target
3837 // platform. They may be changed in the future such that they preserve all register values.
3839 GenTree* result = nullptr;
3841 // assert we have seen one of these
3842 noway_assert(comp->info.compCallUnmanaged != 0);
3844 // All code generated by this function must not contain the randomly-inserted NOPs
3845 // that we insert to inhibit JIT spraying in partial trust scenarios.
3846 // The PINVOKE_PROLOG op signals this to the code generator/emitter.
3848 GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID);
3849 BlockRange().InsertBefore(call, prolog);
3851 InsertPInvokeCallProlog(call);
3853 if (call->gtCallType != CT_INDIRECT)
3855 noway_assert(call->gtCallType == CT_USER_FUNC);
3856 CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
3858 CORINFO_CONST_LOOKUP lookup;
3859 comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup);
3861 void* addr = lookup.addr;
3862 switch (lookup.accessType)
3865 if (!IsCallTargetInRange(addr))
3867 result = AddrGen(addr);
3871 // a direct call within range of hardware relative call instruction
3872 // stash the address for codegen
3873 call->gtDirectCallAddress = addr;
3874 #ifdef FEATURE_READYTORUN_COMPILER
3875 call->gtEntryPoint.addr = nullptr;
3876 call->gtEntryPoint.accessType = IAT_VALUE;
3882 result = Ind(AddrGen(addr));
3886 result = Ind(Ind(AddrGen(addr)));
3894 InsertPInvokeCallEpilog(call);
3899 // Expand the code necessary to calculate the control target.
3900 // Returns: the expression needed to calculate the control target
3901 // May insert embedded statements
3902 GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
3904 noway_assert(call->gtCallType == CT_USER_FUNC);
3906 // If this is a tail call via helper, thisPtr will be the third argument.
3908 regNumber thisPtrArgReg;
3910 #ifndef _TARGET_X86_ // x86 tailcall via helper follows normal calling convention, but with extra stack args.
3911 if (call->IsTailCallViaHelper())
3914 thisPtrArgReg = REG_ARG_2;
3917 #endif // !_TARGET_X86_
3920 thisPtrArgReg = comp->codeGen->genGetThisArgReg(call);
3923 // get a reference to the thisPtr being passed
3924 fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, thisPtrArgNum);
3925 assert(argEntry->regNum == thisPtrArgReg);
3926 assert(argEntry->node->gtOper == GT_PUTARG_REG);
3927 GenTree* thisPtr = argEntry->node->gtOp.gtOp1;
3929 // If what we are passing as the thisptr is not already a local, make a new local to place it in
3930 // because we will be creating expressions based on it.
3932 if (thisPtr->IsLocal())
3934 lclNum = thisPtr->gtLclVarCommon.gtLclNum;
3938 // Split off the thisPtr and store to a temporary variable.
3939 if (vtableCallTemp == BAD_VAR_NUM)
3941 vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call"));
3944 LIR::Use thisPtrUse(BlockRange(), &(argEntry->node->gtOp.gtOp1), argEntry->node);
3945 ReplaceWithLclVar(thisPtrUse, vtableCallTemp);
3947 lclNum = vtableCallTemp;
3950 // Get hold of the vtable offset (note: this might be expensive)
3951 unsigned vtabOffsOfIndirection;
3952 unsigned vtabOffsAfterIndirection;
3954 comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
3955 &vtabOffsAfterIndirection, &isRelative);
3957 // If the thisPtr is a local field, then construct a local field type node
3959 if (thisPtr->isLclField())
3961 local = new (comp, GT_LCL_FLD)
3962 GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->gtLclOffs);
3966 local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum);
3969 // pointer to virtual table = [REG_CALL_THIS + offs]
3970 GenTree* result = Ind(Offset(local, VPTR_OFFS));
3972 // Get the appropriate vtable chunk
3973 if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
3977 // MethodTable offset is a relative pointer.
3979 // Additional temporary variable is used to store virtual table pointer.
3980 // Address of method is obtained by the next computations:
3982 // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
3983 // vtable-1st-level-indirection):
3986 // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
3987 // result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
3990 // If relative pointers are also in second level indirection, additional temporary is used:
3992 // tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection]
3993 // result = tmp2 + [tmp2]
3995 unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
3996 unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2"));
3998 GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
4000 GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet());
4001 tmpTree = Offset(tmpTree, vtabOffsOfIndirection);
4003 tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false);
4004 GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT);
4005 result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs);
4007 GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1);
4008 GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base);
4010 LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
4011 JITDUMP("result of obtaining pointer to virtual table:\n");
4013 BlockRange().InsertBefore(call, std::move(range));
4015 LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2);
4016 JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n");
4018 BlockRange().InsertAfter(lclvNodeStore, std::move(range2));
4020 result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
4022 comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
4026 // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
4027 result = Ind(Offset(result, vtabOffsOfIndirection));
4032 assert(!isRelative);
4035 // Load the function address
4036 // result = [reg+vtabOffs]
4039 result = Ind(Offset(result, vtabOffsAfterIndirection));
4045 // Lower stub dispatched virtual calls.
4046 GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
4048 assert(call->IsVirtualStub());
4050 // An x86 JIT which uses full stub dispatch must generate only
4051 // the following stub dispatch calls:
4053 // (1) isCallRelativeIndirect:
4054 // call dword ptr [rel32] ; FF 15 ---rel32----
4055 // (2) isCallRelative:
4056 // call abc ; E8 ---rel32----
4057 // (3) isCallRegisterIndirect:
4059 // call dword ptr [eax] ; FF 10
4061 // THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
4062 // vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
4064 GenTree* result = nullptr;
4066 #ifdef _TARGET_64BIT_
4067 // Non-tail calls: Jump Stubs are not taken into account by VM for mapping an AV into a NullRef
4068 // exception. Therefore, JIT needs to emit an explicit null check. Note that Jit64 too generates
4069 // an explicit null check.
4071 // Tail calls: fgMorphTailCall() materializes null check explicitly and hence no need to emit
4074 // Non-64-bit: No need to null check the this pointer - the dispatch code will deal with this.
4075 // The VM considers exceptions that occur in stubs on 64-bit to be not managed exceptions and
4076 // it would be difficult to change this in a way so that it affects only the right stubs.
4078 if (!call->IsTailCallViaHelper())
4080 call->gtFlags |= GTF_CALL_NULLCHECK;
4084 // This is code to set up an indirect call to a stub address computed
4085 // via dictionary lookup.
4086 if (call->gtCallType == CT_INDIRECT)
4088 // The importer decided we needed a stub call via a computed
4089 // stub dispatch address, i.e. an address which came from a dictionary lookup.
4090 // - The dictionary lookup produces an indirected address, suitable for call
4091 // via "call [VirtualStubParam.reg]"
4093 // This combination will only be generated for shared generic code and when
4094 // stub dispatch is active.
4096 // fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg.
4097 // All we have to do here is add an indirection to generate the actual call target.
4099 GenTree* ind = Ind(call->gtCallAddr);
4100 BlockRange().InsertAfter(call->gtCallAddr, ind);
4101 call->gtCallAddr = ind;
4103 ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG;
4105 ContainCheckIndir(ind->AsIndir());
4109 // Direct stub call.
4110 // Get stub addr. This will return NULL if virtual call stubs are not active
4111 void* stubAddr = call->gtStubCallStubAddr;
4112 noway_assert(stubAddr != nullptr);
4114 // If not CT_INDIRECT, then it should always be relative indir call.
4115 // This is ensured by VM.
4116 noway_assert(call->IsVirtualStubRelativeIndir());
4118 // Direct stub calls, though the stubAddr itself may still need to be
4119 // accessed via an indirection.
4120 GenTree* addr = AddrGen(stubAddr);
4123 // On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as
4124 // the target address, and we set a flag that it's a VSD call. The helper then
4125 // handles any necessary indirection.
4126 if (call->IsTailCallViaHelper())
4130 #endif // _TARGET_X86_
4132 if (result == nullptr)
4138 // TODO-Cleanup: start emitting random NOPS
4142 //------------------------------------------------------------------------
4143 // AddrModeCleanupHelper: Remove the nodes that are no longer used after an
4144 // addressing mode is constructed
4147 // addrMode - A pointer to a new GenTreeAddrMode
4148 // node - The node currently being considered for removal
4154 // 'addrMode' and 'node' must be contained in the current block
4156 void Lowering::AddrModeCleanupHelper(GenTreeAddrMode* addrMode, GenTree* node)
4158 if (node == addrMode->Base() || node == addrMode->Index())
4163 // TODO-LIR: change this to use the LIR mark bit and iterate instead of recursing
4164 node->VisitOperands([this, addrMode](GenTree* operand) -> GenTree::VisitResult {
4165 AddrModeCleanupHelper(addrMode, operand);
4166 return GenTree::VisitResult::Continue;
4169 BlockRange().Remove(node);
4172 //------------------------------------------------------------------------
4173 // Lowering::AreSourcesPossibleModifiedLocals:
4174 // Given two nodes which will be used in an addressing mode (base,
4175 // index), check to see if they are lclVar reads, and if so, walk
4176 // backwards from the use until both reads have been visited to
4177 // determine if they are potentially modified in that range.
4180 // addr - the node that uses the base and index nodes
4181 // base - the base node
4182 // index - the index node
4184 // Returns: true if either the base or index may be modified between the
4187 bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index)
4189 assert(addr != nullptr);
4191 SideEffectSet baseSideEffects;
4192 if (base != nullptr)
4194 if (base->OperIsLocalRead())
4196 baseSideEffects.AddNode(comp, base);
4204 SideEffectSet indexSideEffects;
4205 if (index != nullptr)
4207 if (index->OperIsLocalRead())
4209 indexSideEffects.AddNode(comp, index);
4217 for (GenTree* cursor = addr;; cursor = cursor->gtPrev)
4219 assert(cursor != nullptr);
4226 if (cursor == index)
4231 if ((base == nullptr) && (index == nullptr))
4236 m_scratchSideEffects.Clear();
4237 m_scratchSideEffects.AddNode(comp, cursor);
4238 if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false))
4243 if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false))
4250 //------------------------------------------------------------------------
4251 // TryCreateAddrMode: recognize trees which can be implemented using an
4252 // addressing mode and transform them to a GT_LEA
4255 // use: the use of the address we want to transform
4256 // isIndir: true if this addressing mode is the child of an indir
4259 // The created LEA node or the original address node if an LEA could
4262 GenTree* Lowering::TryCreateAddrMode(LIR::Use&& use, bool isIndir)
4264 GenTree* addr = use.Def();
4265 GenTree* base = nullptr;
4266 GenTree* index = nullptr;
4271 // TODO-1stClassStructs: This logic is here to preserve prior behavior. Note that previously
4272 // block ops were not considered for addressing modes, but an add under it may have been.
4273 // This should be replaced with logic that more carefully determines when an addressing mode
4274 // would be beneficial for a block op.
4277 GenTree* indir = use.User();
4278 if (indir->TypeGet() == TYP_STRUCT)
4282 else if (varTypeIsStruct(indir))
4284 // We can have an indirection on the rhs of a block copy (it is the source
4285 // object). This is not a "regular" indirection.
4286 // (Note that the user check could be costly.)
4288 if (BlockRange().TryGetUse(indir, &indirUse) && indirUse.User()->OperIsIndir())
4294 isIndir = !indir->OperIsBlk();
4299 // Find out if an addressing mode can be constructed
4300 bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address
4302 &rev, // reverse ops
4304 &index, // index val
4305 #if SCALED_ADDR_MODES
4307 #endif // SCALED_ADDR_MODES
4308 &offset); // displacement
4317 // this is just a reg-const add
4318 if (index == nullptr)
4323 // this is just a reg-reg add
4324 if (scale == 1 && offset == 0)
4330 // make sure there are not any side effects between def of leaves and use
4331 if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index))
4333 JITDUMP("No addressing mode:\n ");
4338 JITDUMP("Addressing mode:\n");
4339 JITDUMP(" Base\n ");
4341 if (index != nullptr)
4343 JITDUMP(" + Index * %u + %d\n ", scale, offset);
4348 JITDUMP(" + %d\n", offset);
4351 var_types addrModeType = addr->TypeGet();
4352 if (addrModeType == TYP_REF)
4354 addrModeType = TYP_BYREF;
4357 GenTreeAddrMode* addrMode = new (comp, GT_LEA) GenTreeAddrMode(addrModeType, base, index, scale, offset);
4359 // Neither the base nor the index should now be contained.
4360 if (base != nullptr)
4362 base->ClearContained();
4364 if (index != nullptr)
4366 index->ClearContained();
4368 addrMode->gtFlags |= (addr->gtFlags & GTF_IND_FLAGS);
4369 addrMode->gtFlags &= ~GTF_ALL_EFFECT; // LEAs are side-effect-free.
4371 JITDUMP("New addressing mode node:\n");
4375 BlockRange().InsertAfter(addr, addrMode);
4377 // Now we need to remove all the nodes subsumed by the addrMode
4378 AddrModeCleanupHelper(addrMode, addr);
4380 // Replace the original address node with the addrMode.
4381 use.ReplaceWith(comp, addrMode);
4386 //------------------------------------------------------------------------
4387 // LowerAdd: turn this add into a GT_LEA if that would be profitable
4390 // node - the node we care about
4393 // The next node to lower if we have transformed the ADD; nullptr otherwise.
4395 GenTree* Lowering::LowerAdd(GenTree* node)
4397 #ifndef _TARGET_ARMARCH_
4398 if (varTypeIsIntegralOrI(node))
4401 if (BlockRange().TryGetUse(node, &use))
4403 // If this is a child of an indir, let the parent handle it.
4404 // If there is a chain of adds, only look at the topmost one.
4405 GenTree* parent = use.User();
4406 if (!parent->OperIsIndir() && (parent->gtOper != GT_ADD))
4408 GenTree* addr = TryCreateAddrMode(std::move(use), false);
4411 return addr->gtNext;
4416 #endif // !_TARGET_ARMARCH_
4421 //------------------------------------------------------------------------
4422 // LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node.
4425 // divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered
4428 // Returns a boolean indicating whether the node was transformed.
4431 // - Transform UDIV/UMOD by power of 2 into RSZ/AND
4432 // - Transform UDIV by constant >= 2^(N-1) into GE
4433 // - Transform UDIV/UMOD by constant >= 3 into "magic division"
4436 bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
4438 assert(divMod->OperIs(GT_UDIV, GT_UMOD));
4440 #if defined(USE_HELPERS_FOR_INT_DIV)
4441 if (!varTypeIsIntegral(divMod->TypeGet()))
4443 assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls");
4445 assert(varTypeIsFloating(divMod->TypeGet()));
4446 #endif // USE_HELPERS_FOR_INT_DIV
4447 #if defined(_TARGET_ARM64_)
4448 assert(divMod->OperGet() != GT_UMOD);
4449 #endif // _TARGET_ARM64_
4451 GenTree* dividend = divMod->gtGetOp1();
4452 GenTree* divisor = divMod->gtGetOp2();
4454 #if !defined(_TARGET_64BIT_)
4455 if (dividend->OperIs(GT_LONG))
4461 if (!divisor->IsCnsIntOrI())
4466 if (dividend->IsCnsIntOrI())
4468 // We shouldn't see a divmod with constant operands here but if we do then it's likely
4469 // because optimizations are disabled or it's a case that's supposed to throw an exception.
4470 // Don't optimize this.
4474 const var_types type = divMod->TypeGet();
4475 assert((type == TYP_INT) || (type == TYP_I_IMPL));
4477 size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue());
4479 if (type == TYP_INT)
4481 // Clear up the upper 32 bits of the value, they may be set to 1 because constants
4482 // are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
4483 divisorValue &= UINT32_MAX;
4486 if (divisorValue == 0)
4491 const bool isDiv = divMod->OperIs(GT_UDIV);
4493 if (isPow2(divisorValue))
4500 divisorValue = genLog2(divisorValue);
4508 divMod->SetOper(newOper);
4509 divisor->gtIntCon.SetIconValue(divisorValue);
4510 ContainCheckNode(divMod);
4515 // If the divisor is greater or equal than 2^(N - 1) then the result is 1
4516 // iff the dividend is greater or equal than the divisor.
4517 if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) ||
4518 ((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2))))
4520 divMod->SetOper(GT_GE);
4521 divMod->gtFlags |= GTF_UNSIGNED;
4522 ContainCheckNode(divMod);
4527 // TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
4528 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
4529 if (!comp->opts.MinOpts() && (divisorValue >= 3))
4535 if (type == TYP_INT)
4537 magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &add, &shift);
4541 #ifdef _TARGET_64BIT_
4542 magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &add, &shift);
4548 // Depending on the "add" flag returned by GetUnsignedMagicNumberForDivide we need to generate:
4549 // add == false (when divisor == 3 for example):
4550 // div = (dividend MULHI magic) RSZ shift
4551 // add == true (when divisor == 7 for example):
4552 // mulhi = dividend MULHI magic
4553 // div = (((dividend SUB mulhi) RSZ 1) ADD mulhi)) RSZ (shift - 1)
4554 const bool requiresAdjustment = add;
4555 const bool requiresDividendMultiuse = requiresAdjustment || !isDiv;
4556 const unsigned curBBWeight = m_block->getBBWeight(comp);
4558 if (requiresDividendMultiuse)
4560 LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod);
4561 dividend = ReplaceWithLclVar(dividendUse);
4564 // Insert a new GT_MULHI node before the existing GT_UDIV/GT_UMOD node.
4565 // The existing node will later be transformed into a GT_RSZ/GT_SUB that
4566 // computes the final result. This way don't need to find and change the use
4567 // of the existing node.
4568 GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, dividend, divisor);
4569 mulhi->gtFlags |= GTF_UNSIGNED;
4570 divisor->AsIntCon()->SetIconValue(magic);
4571 BlockRange().InsertBefore(divMod, mulhi);
4572 GenTree* firstNode = mulhi;
4574 if (requiresAdjustment)
4576 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4577 GenTree* sub = comp->gtNewOperNode(GT_SUB, type, dividend, mulhi);
4578 BlockRange().InsertBefore(divMod, dividend, sub);
4580 GenTree* one = comp->gtNewIconNode(1, TYP_INT);
4581 GenTree* rsz = comp->gtNewOperNode(GT_RSZ, type, sub, one);
4582 BlockRange().InsertBefore(divMod, one, rsz);
4584 LIR::Use mulhiUse(BlockRange(), &sub->gtOp.gtOp2, sub);
4585 mulhi = ReplaceWithLclVar(mulhiUse);
4587 mulhi = comp->gtNewLclvNode(mulhi->AsLclVar()->GetLclNum(), mulhi->TypeGet());
4588 GenTree* add = comp->gtNewOperNode(GT_ADD, type, rsz, mulhi);
4589 BlockRange().InsertBefore(divMod, mulhi, add);
4595 GenTree* shiftBy = comp->gtNewIconNode(shift, TYP_INT);
4596 BlockRange().InsertBefore(divMod, shiftBy);
4600 divMod->SetOper(GT_RSZ);
4601 divMod->gtOp1 = mulhi;
4602 divMod->gtOp2 = shiftBy;
4606 GenTree* div = comp->gtNewOperNode(GT_RSZ, type, mulhi, shiftBy);
4608 // divisor UMOD dividend = dividend SUB (div MUL divisor)
4609 GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
4610 GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
4611 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4613 divMod->SetOper(GT_SUB);
4614 divMod->gtOp1 = dividend;
4615 divMod->gtOp2 = mul;
4617 BlockRange().InsertBefore(divMod, div, divisor, mul, dividend);
4619 ContainCheckRange(firstNode, divMod);
4627 // LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2
4628 // const divisor into equivalent but faster sequences.
4631 // node - pointer to the DIV or MOD node
4634 // nullptr if no transformation is done, or the next node in the transformed node sequence that
4635 // needs to be lowered.
4637 GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node)
4639 assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
4640 GenTree* divMod = node;
4641 GenTree* dividend = divMod->gtGetOp1();
4642 GenTree* divisor = divMod->gtGetOp2();
4644 const var_types type = divMod->TypeGet();
4645 assert((type == TYP_INT) || (type == TYP_LONG));
4647 #if defined(USE_HELPERS_FOR_INT_DIV)
4648 assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls");
4649 #endif // USE_HELPERS_FOR_INT_DIV
4650 #if defined(_TARGET_ARM64_)
4651 assert(node->OperGet() != GT_MOD);
4652 #endif // _TARGET_ARM64_
4654 if (!divisor->IsCnsIntOrI())
4656 return nullptr; // no transformations to make
4659 if (dividend->IsCnsIntOrI())
4661 // We shouldn't see a divmod with constant operands here but if we do then it's likely
4662 // because optimizations are disabled or it's a case that's supposed to throw an exception.
4663 // Don't optimize this.
4667 ssize_t divisorValue = divisor->gtIntCon.IconValue();
4669 if (divisorValue == -1 || divisorValue == 0)
4671 // x / 0 and x % 0 can't be optimized because they are required to throw an exception.
4673 // x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
4675 // x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is
4676 // the minimum representable integer. However, the C# spec says that an exception "is" thrown in this
4677 // case so optimizing this case would break C# code.
4679 // A runtime check could be used to handle this case but it's probably too rare to matter.
4683 bool isDiv = divMod->OperGet() == GT_DIV;
4687 if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN))
4689 // If the divisor is the minimum representable integer value then we can use a compare,
4690 // the result is 1 iff the dividend equals divisor.
4691 divMod->SetOper(GT_EQ);
4696 size_t absDivisorValue =
4697 (divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
4699 if (!isPow2(absDivisorValue))
4701 if (comp->opts.MinOpts())
4706 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
4710 if (type == TYP_INT)
4712 magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift);
4716 #ifdef _TARGET_64BIT_
4717 magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift);
4718 #else // !_TARGET_64BIT_
4720 #endif // !_TARGET_64BIT_
4723 divisor->gtIntConCommon.SetIconValue(magic);
4725 // Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node.
4726 // The existing node will later be transformed into a GT_ADD/GT_SUB that
4727 // computes the final result. This way don't need to find and change the
4728 // use of the existing node.
4729 GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend);
4730 BlockRange().InsertBefore(divMod, mulhi);
4732 // mulhi was the easy part. Now we need to generate different code depending
4733 // on the divisor value:
4735 // div = signbit(mulhi) + mulhi
4737 // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
4739 // mulhi += dividend ; requires add adjust
4740 // div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust
4742 // mulhi -= dividend ; requires sub adjust
4743 // div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
4744 bool requiresAddSubAdjust = signum(divisorValue) != signum(magic);
4745 bool requiresShiftAdjust = shift != 0;
4746 bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv;
4747 unsigned curBBWeight = comp->compCurBB->getBBWeight(comp);
4749 if (requiresDividendMultiuse)
4751 LIR::Use dividendUse(BlockRange(), &mulhi->gtOp.gtOp2, mulhi);
4752 dividend = ReplaceWithLclVar(dividendUse);
4757 if (requiresAddSubAdjust)
4759 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4760 adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend);
4761 BlockRange().InsertBefore(divMod, dividend, adjusted);
4768 GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type);
4769 GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy);
4770 BlockRange().InsertBefore(divMod, shiftBy, signBit);
4772 LIR::Use adjustedUse(BlockRange(), &signBit->gtOp.gtOp1, signBit);
4773 adjusted = ReplaceWithLclVar(adjustedUse);
4774 adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet());
4775 BlockRange().InsertBefore(divMod, adjusted);
4777 if (requiresShiftAdjust)
4779 shiftBy = comp->gtNewIconNode(shift, TYP_INT);
4780 adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy);
4781 BlockRange().InsertBefore(divMod, shiftBy, adjusted);
4786 divMod->SetOperRaw(GT_ADD);
4787 divMod->gtOp.gtOp1 = adjusted;
4788 divMod->gtOp.gtOp2 = signBit;
4792 GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit);
4794 dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
4796 // divisor % dividend = dividend - divisor x div
4797 GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
4798 GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
4799 BlockRange().InsertBefore(divMod, dividend, div, divisor, mul);
4801 divMod->SetOperRaw(GT_SUB);
4802 divMod->gtOp.gtOp1 = dividend;
4803 divMod->gtOp.gtOp2 = mul;
4807 #elif defined(_TARGET_ARM_)
4808 // Currently there's no GT_MULHI for ARM32
4811 #error Unsupported or unset target architecture
4815 // We're committed to the conversion now. Go find the use if any.
4817 if (!BlockRange().TryGetUse(node, &use))
4822 // We need to use the dividend node multiple times so its value needs to be
4823 // computed once and stored in a temp variable.
4824 LIR::Use opDividend(BlockRange(), &divMod->gtOp.gtOp1, divMod);
4825 dividend = ReplaceWithLclVar(opDividend);
4827 GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
4829 if (absDivisorValue == 2)
4831 // If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1.
4832 // We can get the same result by using GT_RSZ instead of GT_RSH.
4833 adjustment->SetOper(GT_RSZ);
4837 adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type));
4840 GenTree* adjustedDividend =
4841 comp->gtNewOperNode(GT_ADD, type, adjustment,
4842 comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()));
4848 // perform the division by right shifting the adjusted dividend
4849 divisor->gtIntCon.SetIconValue(genLog2(absDivisorValue));
4851 newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor);
4852 ContainCheckShiftRotate(newDivMod->AsOp());
4854 if (divisorValue < 0)
4856 // negate the result if the divisor is negative
4857 newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod);
4858 ContainCheckNode(newDivMod);
4863 // divisor % dividend = dividend - divisor x (dividend / divisor)
4864 // divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor)
4865 // which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1)
4866 divisor->gtIntCon.SetIconValue(~(absDivisorValue - 1));
4868 newDivMod = comp->gtNewOperNode(GT_SUB, type,
4869 comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()),
4870 comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor));
4873 // Remove the divisor and dividend nodes from the linear order,
4874 // since we have reused them and will resequence the tree
4875 BlockRange().Remove(divisor);
4876 BlockRange().Remove(dividend);
4878 // linearize and insert the new tree before the original divMod node
4879 InsertTreeBeforeAndContainCheck(divMod, newDivMod);
4880 BlockRange().Remove(divMod);
4882 // replace the original divmod node with the new divmod tree
4883 use.ReplaceWith(comp, newDivMod);
4885 return newDivMod->gtNext;
4887 //------------------------------------------------------------------------
4888 // LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
4889 // const divisor into equivalent but faster sequences.
4892 // node - the DIV or MOD node
4895 // The next node to lower.
4897 GenTree* Lowering::LowerSignedDivOrMod(GenTree* node)
4899 assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
4900 GenTree* next = node->gtNext;
4902 if (varTypeIsIntegral(node->TypeGet()))
4904 // LowerConstIntDivOrMod will return nullptr if it doesn't transform the node.
4905 GenTree* newNode = LowerConstIntDivOrMod(node);
4906 if (newNode != nullptr)
4911 ContainCheckDivOrMod(node->AsOp());
4916 //------------------------------------------------------------------------
4917 // LowerShift: Lower shift nodes
4920 // shift - the shift node (GT_LSH, GT_RSH or GT_RSZ)
4923 // Remove unnecessary shift count masking, xarch shift instructions
4924 // mask the shift count to 5 bits (or 6 bits for 64 bit operations).
4926 void Lowering::LowerShift(GenTreeOp* shift)
4928 assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ));
4931 #ifdef _TARGET_64BIT_
4932 if (varTypeIsLong(shift->TypeGet()))
4937 assert(!varTypeIsLong(shift->TypeGet()));
4940 for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1())
4942 GenTree* maskOp = andOp->gtGetOp2();
4944 if (!maskOp->IsCnsIntOrI())
4949 if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask)
4954 shift->gtOp2 = andOp->gtGetOp1();
4955 BlockRange().Remove(andOp);
4956 BlockRange().Remove(maskOp);
4957 // The parent was replaced, clear contain and regOpt flag.
4958 shift->gtOp2->ClearContained();
4960 ContainCheckShiftRotate(shift);
4963 void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node)
4966 if (node->TypeGet() == TYP_SIMD12)
4969 // RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
4970 // to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
4971 // reading and writing purposes.
4974 // RyuJit backend is making another implicit assumption that Vector3 type args when passed in
4975 // registers or on stack, the upper most 4-bytes will be zero.
4977 // For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
4978 // that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
4981 // RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
4982 // bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
4983 // passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
4984 // there is no need to clear upper 4-bytes of Vector3 type args.
4986 // RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
4987 // Vector3 return values are returned two return registers and Caller assembles them into a
4988 // single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
4989 // type args in prolog and Vector3 type return value of a call
4991 // RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments
4992 // are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed
4993 // as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear
4996 unsigned varNum = node->AsLclVarCommon()->GetLclNum();
4997 LclVarDsc* varDsc = &comp->lvaTable[varNum];
4999 if (comp->lvaMapSimd12ToSimd16(varDsc))
5001 JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n");
5003 JITDUMP("============");
5005 node->gtType = TYP_SIMD16;
5008 #endif // FEATURE_SIMD
5011 //------------------------------------------------------------------------
5012 // LowerArrElem: Lower a GT_ARR_ELEM node
5015 // node - the GT_ARR_ELEM node to lower.
5018 // The next node to lower.
5021 // pTree points to a pointer to a GT_ARR_ELEM node.
5024 // This performs the following lowering. We start with a node of the form:
5030 // First, we create temps for arrObj if it is not already a lclVar, and for any of the index
5031 // expressions that have side-effects.
5032 // We then transform the tree into:
5033 // <offset is null - no accumulated offset for the first index>
5036 // /--* ArrIndex[i, ]
5038 // /--| arrOffs[i, ]
5041 // +--* ArrIndex[*,j]
5043 // /--| arrOffs[*,j]
5044 // +--* lclVar NewTemp
5045 // /--* lea (scale = element size, offset = offset of first element)
5047 // The new stmtExpr may be omitted if the <arrObj> is a lclVar.
5048 // The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for
5049 // the statement containing the original arrMD.
5050 // Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second
5051 // reference to NewTemp), because that provides more accurate lifetimes.
5052 // There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively.
5054 GenTree* Lowering::LowerArrElem(GenTree* node)
5056 // This will assert if we don't have an ArrElem node
5057 GenTreeArrElem* arrElem = node->AsArrElem();
5058 const unsigned char rank = arrElem->gtArrElem.gtArrRank;
5060 JITDUMP("Lowering ArrElem\n");
5061 JITDUMP("============\n");
5062 DISPTREERANGE(BlockRange(), arrElem);
5065 assert(arrElem->gtArrObj->TypeGet() == TYP_REF);
5067 // We need to have the array object in a lclVar.
5068 if (!arrElem->gtArrObj->IsLocal())
5070 LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem);
5071 ReplaceWithLclVar(arrObjUse);
5074 GenTree* arrObjNode = arrElem->gtArrObj;
5075 assert(arrObjNode->IsLocal());
5077 GenTree* insertionPoint = arrElem;
5079 // The first ArrOffs node will have 0 for the offset of the previous dimension.
5080 GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
5081 BlockRange().InsertBefore(insertionPoint, prevArrOffs);
5082 GenTree* nextToLower = prevArrOffs;
5084 for (unsigned char dim = 0; dim < rank; dim++)
5086 GenTree* indexNode = arrElem->gtArrElem.gtArrInds[dim];
5088 // Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones.
5089 GenTree* idxArrObjNode;
5092 idxArrObjNode = arrObjNode;
5096 idxArrObjNode = comp->gtClone(arrObjNode);
5097 BlockRange().InsertBefore(insertionPoint, idxArrObjNode);
5100 // Next comes the GT_ARR_INDEX node.
5101 GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX)
5102 GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElem.gtArrElemType);
5103 arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT);
5104 BlockRange().InsertBefore(insertionPoint, arrMDIdx);
5106 GenTree* offsArrObjNode = comp->gtClone(arrObjNode);
5107 BlockRange().InsertBefore(insertionPoint, offsArrObjNode);
5109 GenTreeArrOffs* arrOffs =
5110 new (comp, GT_ARR_OFFSET) GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank,
5111 arrElem->gtArrElem.gtArrElemType);
5112 arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
5113 BlockRange().InsertBefore(insertionPoint, arrOffs);
5115 prevArrOffs = arrOffs;
5118 // Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the
5120 unsigned scale = arrElem->gtArrElem.gtArrElemSize;
5121 unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrElem.gtArrElemType, arrElem->gtArrElem.gtArrRank);
5123 GenTree* leaIndexNode = prevArrOffs;
5124 if (!jitIsScaleIndexMul(scale))
5126 // We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are
5128 GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
5129 GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
5130 BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode);
5131 leaIndexNode = mulNode;
5135 GenTree* leaBase = comp->gtClone(arrObjNode);
5136 BlockRange().InsertBefore(insertionPoint, leaBase);
5138 GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
5140 BlockRange().InsertBefore(insertionPoint, leaNode);
5142 LIR::Use arrElemUse;
5143 if (BlockRange().TryGetUse(arrElem, &arrElemUse))
5145 arrElemUse.ReplaceWith(comp, leaNode);
5149 leaNode->SetUnusedValue();
5152 BlockRange().Remove(arrElem);
5154 JITDUMP("Results of lowering ArrElem:\n");
5155 DISPTREERANGE(BlockRange(), leaNode);
5161 void Lowering::DoPhase()
5163 // If we have any PInvoke calls, insert the one-time prolog code. We'll inserted the epilog code in the
5164 // appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke
5165 // data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination.
5166 if (comp->info.compCallUnmanaged)
5168 InsertPInvokeMethodProlog();
5171 #if !defined(_TARGET_64BIT_)
5172 DecomposeLongs decomp(comp); // Initialize the long decomposition class.
5173 if (comp->compLongUsed)
5175 decomp.PrepareForDecomposition();
5177 #endif // !defined(_TARGET_64BIT_)
5179 for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
5181 /* Make the block publicly available */
5182 comp->compCurBB = block;
5184 #if !defined(_TARGET_64BIT_)
5185 if (comp->compLongUsed)
5187 decomp.DecomposeBlock(block);
5189 #endif //!_TARGET_64BIT_
5195 JITDUMP("Lower has completed modifying nodes.\n");
5198 comp->fgDispBasicBlocks(true);
5202 // Recompute local var ref counts before potentially sorting for liveness.
5203 // Note this does minimal work in cases where we are not going to sort.
5204 const bool isRecompute = true;
5205 const bool setSlotNumbers = false;
5206 comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
5208 comp->fgLocalVarLiveness();
5209 // local var liveness can delete code, which may create empty blocks
5210 if (comp->opts.OptimizationEnabled())
5212 comp->optLoopsMarked = false;
5213 bool modified = comp->fgUpdateFlowGraph();
5216 JITDUMP("had to run another liveness pass:\n");
5217 comp->fgLocalVarLiveness();
5221 // Recompute local var ref counts again after liveness to reflect
5222 // impact of any dead code removal. Note this may leave us with
5223 // tracked vars that have zero refs.
5224 comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
5227 JITDUMP("Liveness pass finished after lowering, IR:\n");
5230 comp->fgDispBasicBlocks(true);
5233 for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
5235 assert(LIR::AsRange(block).CheckLIR(comp, true));
5242 //------------------------------------------------------------------------
5243 // Lowering::CheckCallArg: check that a call argument is in an expected
5244 // form after lowering.
5247 // arg - the argument to check.
5249 void Lowering::CheckCallArg(GenTree* arg)
5251 if (!arg->IsValue() && !arg->OperIsPutArgStk())
5253 assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
5254 arg->OperIsCopyBlkOp());
5258 switch (arg->OperGet())
5262 GenTreeFieldList* list = arg->AsFieldList();
5263 assert(list->isContained());
5264 assert(list->IsFieldListHead());
5266 for (; list != nullptr; list = list->Rest())
5268 assert(list->Current()->OperIsPutArg());
5274 assert(arg->OperIsPutArg());
5279 //------------------------------------------------------------------------
5280 // Lowering::CheckCall: check that a call is in an expected form after
5281 // lowering. Currently this amounts to checking its
5282 // arguments, but could be expanded to verify more
5283 // properties in the future.
5286 // call - the call to check.
5288 void Lowering::CheckCall(GenTreeCall* call)
5290 if (call->gtCallObjp != nullptr)
5292 CheckCallArg(call->gtCallObjp);
5295 for (GenTreeArgList* args = call->gtCallArgs; args != nullptr; args = args->Rest())
5297 CheckCallArg(args->Current());
5300 for (GenTreeArgList* args = call->gtCallLateArgs; args != nullptr; args = args->Rest())
5302 CheckCallArg(args->Current());
5306 //------------------------------------------------------------------------
5307 // Lowering::CheckNode: check that an LIR node is in an expected form
5311 // compiler - the compiler context.
5312 // node - the node to check.
5314 void Lowering::CheckNode(Compiler* compiler, GenTree* node)
5316 switch (node->OperGet())
5319 CheckCall(node->AsCall());
5324 assert(node->TypeGet() != TYP_SIMD12);
5326 #ifdef _TARGET_64BIT_
5328 case GT_STORE_LCL_VAR:
5330 unsigned lclNum = node->AsLclVarCommon()->GetLclNum();
5331 LclVarDsc* lclVar = &compiler->lvaTable[lclNum];
5332 assert(node->TypeGet() != TYP_SIMD12 || compiler->lvaIsFieldOfDependentlyPromotedStruct(lclVar));
5335 #endif // _TARGET_64BIT_
5343 //------------------------------------------------------------------------
5344 // Lowering::CheckBlock: check that the contents of an LIR block are in an
5345 // expected form after lowering.
5348 // compiler - the compiler context.
5349 // block - the block to check.
5351 bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block)
5353 assert(block->isEmpty() || block->IsLIR());
5355 LIR::Range& blockRange = LIR::AsRange(block);
5356 for (GenTree* node : blockRange)
5358 CheckNode(compiler, node);
5361 assert(blockRange.CheckLIR(compiler, true));
5366 void Lowering::LowerBlock(BasicBlock* block)
5368 assert(block == comp->compCurBB); // compCurBB must already be set.
5369 assert(block->isEmpty() || block->IsLIR());
5373 // NOTE: some of the lowering methods insert calls before the node being
5374 // lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In
5375 // general, any code that is inserted before the current node should be
5376 // "pre-lowered" as they won't be subject to further processing.
5377 // Lowering::CheckBlock() runs some extra checks on call arguments in
5378 // order to help catch unlowered nodes.
5380 GenTree* node = BlockRange().FirstNode();
5381 while (node != nullptr)
5383 node = LowerNode(node);
5386 assert(CheckBlock(comp, block));
5389 /** Verifies if both of these trees represent the same indirection.
5390 * Used by Lower to annotate if CodeGen generate an instruction of the
5391 * form *addrMode BinOp= expr
5393 * Preconditions: both trees are children of GT_INDs and their underlying children
5394 * have the same gtOper.
5396 * This is a first iteration to actually recognize trees that can be code-generated
5397 * as a single read-modify-write instruction on AMD64/x86. For now
5398 * this method only supports the recognition of simple addressing modes (through GT_LEA)
5399 * or local var indirections. Local fields, array access and other more complex nodes are
5400 * not yet supported.
5402 * TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize
5403 * arbitrary complex trees and support much more addressing patterns.
5405 bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd)
5407 assert(candidate->OperGet() == GT_IND);
5408 assert(storeInd->OperGet() == GT_STOREIND);
5410 // We should check the size of the indirections. If they are
5411 // different, say because of a cast, then we can't call them equivalent. Doing so could cause us
5413 // Signed-ness difference is okay and expected since a store indirection must always
5414 // be signed based on the CIL spec, but a load could be unsigned.
5415 if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType))
5420 GenTree* pTreeA = candidate->gtGetOp1();
5421 GenTree* pTreeB = storeInd->gtGetOp1();
5423 // This method will be called by codegen (as well as during lowering).
5424 // After register allocation, the sources may have been spilled and reloaded
5425 // to a different register, indicated by an inserted GT_RELOAD node.
5426 pTreeA = pTreeA->gtSkipReloadOrCopy();
5427 pTreeB = pTreeB->gtSkipReloadOrCopy();
5431 if (pTreeA->OperGet() != pTreeB->OperGet())
5436 oper = pTreeA->OperGet();
5440 case GT_LCL_VAR_ADDR:
5441 case GT_CLS_VAR_ADDR:
5443 return NodesAreEquivalentLeaves(pTreeA, pTreeB);
5447 GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode();
5448 GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode();
5449 return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) &&
5450 NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
5451 (gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset());
5454 // We don't handle anything that is not either a constant,
5455 // a local var or LEA.
5460 /** Test whether the two given nodes are the same leaves.
5461 * Right now, only constant integers and local variables are supported
5463 bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
5465 if (tree1 == nullptr && tree2 == nullptr)
5470 // both null, they are equivalent, otherwise if either is null not equivalent
5471 if (tree1 == nullptr || tree2 == nullptr)
5476 tree1 = tree1->gtSkipReloadOrCopy();
5477 tree2 = tree2->gtSkipReloadOrCopy();
5479 if (tree1->TypeGet() != tree2->TypeGet())
5484 if (tree1->OperGet() != tree2->OperGet())
5489 if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf())
5494 switch (tree1->OperGet())
5497 return tree1->gtIntCon.gtIconVal == tree2->gtIntCon.gtIconVal &&
5498 tree1->IsIconHandle() == tree2->IsIconHandle();
5500 case GT_LCL_VAR_ADDR:
5501 return tree1->gtLclVarCommon.gtLclNum == tree2->gtLclVarCommon.gtLclNum;
5502 case GT_CLS_VAR_ADDR:
5503 return tree1->gtClsVar.gtClsVarHnd == tree2->gtClsVar.gtClsVarHnd;
5509 //------------------------------------------------------------------------
5510 // Containment Analysis
5511 //------------------------------------------------------------------------
5512 void Lowering::ContainCheckNode(GenTree* node)
5514 switch (node->gtOper)
5516 case GT_STORE_LCL_VAR:
5517 case GT_STORE_LCL_FLD:
5518 ContainCheckStoreLoc(node->AsLclVarCommon());
5531 ContainCheckCompare(node->AsOp());
5535 ContainCheckJTrue(node->AsOp());
5540 #if !defined(_TARGET_64BIT_)
5549 ContainCheckBinary(node->AsOp());
5552 #if defined(_TARGET_X86_)
5557 ContainCheckMul(node->AsOp());
5563 ContainCheckDivOrMod(node->AsOp());
5570 #ifndef _TARGET_64BIT_
5574 ContainCheckShiftRotate(node->AsOp());
5577 ContainCheckArrOffset(node->AsArrOffs());
5580 ContainCheckLclHeap(node->AsOp());
5583 ContainCheckRet(node->AsOp());
5586 ContainCheckReturnTrap(node->AsOp());
5589 ContainCheckStoreIndir(node->AsIndir());
5591 ContainCheckIndir(node->AsIndir());
5595 #if FEATURE_ARG_SPLIT
5596 case GT_PUTARG_SPLIT:
5597 #endif // FEATURE_ARG_SPLIT
5598 // The regNum must have been set by the lowering of the call.
5599 assert(node->gtRegNum != REG_NA);
5601 #ifdef _TARGET_XARCH_
5603 ContainCheckIntrinsic(node->AsOp());
5605 #endif // _TARGET_XARCH_
5608 ContainCheckSIMD(node->AsSIMD());
5610 #endif // FEATURE_SIMD
5611 #ifdef FEATURE_HW_INTRINSICS
5612 case GT_HWIntrinsic:
5613 ContainCheckHWIntrinsic(node->AsHWIntrinsic());
5615 #endif // FEATURE_HW_INTRINSICS
5621 //------------------------------------------------------------------------
5622 // ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained.
5625 // node - pointer to the GT_RETURNTRAP node
5627 void Lowering::ContainCheckReturnTrap(GenTreeOp* node)
5629 #ifdef _TARGET_XARCH_
5630 assert(node->OperIs(GT_RETURNTRAP));
5631 // This just turns into a compare of its child with an int + a conditional call
5632 if (node->gtOp1->isIndir())
5634 MakeSrcContained(node, node->gtOp1);
5636 #endif // _TARGET_XARCH_
5639 //------------------------------------------------------------------------
5640 // ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained.
5643 // node - pointer to the GT_ARR_OFFSET node
5645 void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node)
5647 assert(node->OperIs(GT_ARR_OFFSET));
5648 // we don't want to generate code for this
5649 if (node->gtOffset->IsIntegralConst(0))
5651 MakeSrcContained(node, node->gtArrOffs.gtOffset);
5655 //------------------------------------------------------------------------
5656 // ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained.
5659 // node - pointer to the node
5661 void Lowering::ContainCheckLclHeap(GenTreeOp* node)
5663 assert(node->OperIs(GT_LCLHEAP));
5664 GenTree* size = node->gtOp.gtOp1;
5665 if (size->IsCnsIntOrI())
5667 MakeSrcContained(node, size);
5671 //------------------------------------------------------------------------
5672 // ContainCheckRet: determine whether the source of a node should be contained.
5675 // node - pointer to the node
5677 void Lowering::ContainCheckRet(GenTreeOp* ret)
5679 assert(ret->OperIs(GT_RETURN));
5681 #if !defined(_TARGET_64BIT_)
5682 if (ret->TypeGet() == TYP_LONG)
5684 GenTree* op1 = ret->gtGetOp1();
5685 noway_assert(op1->OperGet() == GT_LONG);
5686 MakeSrcContained(ret, op1);
5688 #endif // !defined(_TARGET_64BIT_)
5689 #if FEATURE_MULTIREG_RET
5690 if (varTypeIsStruct(ret))
5692 GenTree* op1 = ret->gtGetOp1();
5693 // op1 must be either a lclvar or a multi-reg returning call
5694 if (op1->OperGet() == GT_LCL_VAR)
5696 GenTreeLclVarCommon* lclVarCommon = op1->AsLclVarCommon();
5697 LclVarDsc* varDsc = &(comp->lvaTable[lclVarCommon->gtLclNum]);
5698 assert(varDsc->lvIsMultiRegRet);
5700 // Mark var as contained if not enregistrable.
5701 if (!varTypeIsEnregisterableStruct(op1))
5703 MakeSrcContained(ret, op1);
5707 #endif // FEATURE_MULTIREG_RET
5710 //------------------------------------------------------------------------
5711 // ContainCheckJTrue: determine whether the source of a JTRUE should be contained.
5714 // node - pointer to the node
5716 void Lowering::ContainCheckJTrue(GenTreeOp* node)
5718 // The compare does not need to be generated into a register.
5719 GenTree* cmp = node->gtGetOp1();
5720 cmp->gtType = TYP_VOID;
5721 cmp->gtFlags |= GTF_SET_FLAGS;