1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
8 XX Lowering for ARM and ARM64 common code XX
10 XX This encapsulates common logic for lowering trees for the ARM and ARM64 XX
11 XX architectures. For a more detailed view of what is lowering, please XX
12 XX take a look at Lower.cpp XX
14 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
15 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #ifndef LEGACY_BACKEND // This file is ONLY used for the RyuJIT backend that uses the linear scan register allocator
25 #ifdef _TARGET_ARMARCH_ // This file is ONLY used for ARM and ARM64 architectures
28 #include "sideeffects.h"
32 #ifdef FEATURE_HW_INTRINSICS
33 #include "hwintrinsicArm64.h"
36 //------------------------------------------------------------------------
37 // IsCallTargetInRange: Can a call target address be encoded in-place?
40 // True if the addr fits into the range.
42 bool Lowering::IsCallTargetInRange(void* addr)
45 // TODO-ARM64-CQ: This is a workaround to unblock the JIT from getting calls working.
46 // Currently, we'll be generating calls using blr and manually loading an absolute
47 // call target in a register using a sequence of load immediate instructions.
49 // As you can expect, this is inefficient and it's not the recommended way as per the
50 // ARM64 ABI Manual but will get us getting things done for now.
51 // The work to get this right would be to implement PC-relative calls, the bl instruction
52 // can only address things -128 + 128MB away, so this will require getting some additional
53 // code to get jump thunks working.
55 #elif defined(_TARGET_ARM_)
56 return comp->codeGen->validImmForBL((ssize_t)addr);
60 //------------------------------------------------------------------------
61 // IsContainableImmed: Is an immediate encodable in-place?
64 // True if the immediate can be folded into an instruction,
65 // for example small enough and non-relocatable.
66 bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
68 if (varTypeIsFloating(parentNode->TypeGet()))
70 // We can contain a floating point 0.0 constant in a compare instruction
71 switch (parentNode->OperGet())
82 if (childNode->IsIntegralConst(0))
84 // TODO-ARM-Cleanup: not tested yet.
85 NYI_ARM("ARM IsContainableImmed for floating point type");
94 // Make sure we have an actual immediate
95 if (!childNode->IsCnsIntOrI())
97 if (childNode->IsIconHandle() && comp->opts.compReloc)
100 ssize_t immVal = childNode->gtIntCon.gtIconVal;
101 emitAttr attr = emitActualTypeSize(childNode->TypeGet());
102 emitAttr size = EA_SIZE(attr);
104 insFlags flags = parentNode->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
107 switch (parentNode->OperGet())
114 #ifdef _TARGET_ARM64_
118 return emitter::emitIns_valid_imm_for_add(immVal, size);
119 #elif defined(_TARGET_ARM_)
120 return emitter::emitIns_valid_imm_for_add(immVal, flags);
124 #ifdef _TARGET_ARM64_
131 return emitter::emitIns_valid_imm_for_cmp(immVal, size);
138 return emitter::emitIns_valid_imm_for_alu(immVal, size);
141 assert(((parentNode->gtFlags & GTF_JCMP_TST) == 0) ? (immVal == 0) : isPow2(immVal));
144 #elif defined(_TARGET_ARM_)
155 return emitter::emitIns_valid_imm_for_alu(immVal);
157 #endif // _TARGET_ARM_
159 #ifdef _TARGET_ARM64_
160 case GT_STORE_LCL_VAR:
171 //------------------------------------------------------------------------
172 // LowerStoreLoc: Lower a store of a lclVar
175 // storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
179 // - Widening operations of unsigneds.
181 void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
183 // Try to widen the ops if they are going into a local var.
184 GenTree* op1 = storeLoc->gtGetOp1();
185 if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
187 GenTreeIntCon* con = op1->AsIntCon();
188 ssize_t ival = con->gtIconVal;
189 unsigned varNum = storeLoc->gtLclNum;
190 LclVarDsc* varDsc = comp->lvaTable + varNum;
192 if (varDsc->lvIsSIMDType())
194 noway_assert(storeLoc->gtType != TYP_STRUCT);
196 unsigned size = genTypeSize(storeLoc);
197 // If we are storing a constant into a local variable
198 // we extend the size of the store here
199 if ((size < 4) && !varTypeIsStruct(varDsc))
201 if (!varTypeIsUnsigned(varDsc))
203 if (genTypeSize(storeLoc) == 1)
205 if ((ival & 0x7f) != ival)
207 ival = ival | 0xffffff00;
212 assert(genTypeSize(storeLoc) == 2);
213 if ((ival & 0x7fff) != ival)
215 ival = ival | 0xffff0000;
220 // A local stack slot is at least 4 bytes in size, regardless of
221 // what the local var is typed as, so auto-promote it here
222 // unless it is a field of a promoted struct
223 // TODO-CQ: if the field is promoted shouldn't we also be able to do this?
224 if (!varDsc->lvIsStructField)
226 storeLoc->gtType = TYP_INT;
227 con->SetIconValue(ival);
231 ContainCheckStoreLoc(storeLoc);
234 //------------------------------------------------------------------------
235 // LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained.
238 // node - The indirect store node (GT_STORE_IND) of interest
243 void Lowering::LowerStoreIndir(GenTreeIndir* node)
245 ContainCheckStoreIndir(node);
248 //------------------------------------------------------------------------
249 // LowerBlockStore: Set block store type
252 // blkNode - The block store node of interest
257 void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
259 GenTree* dstAddr = blkNode->Addr();
260 unsigned size = blkNode->gtBlkSize;
261 GenTree* source = blkNode->Data();
262 Compiler* compiler = comp;
264 // Sources are dest address and initVal or source.
265 GenTree* srcAddrOrFill = nullptr;
266 bool isInitBlk = blkNode->OperIsInitBlkOp();
270 // CopyObj or CopyBlk
271 if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
273 blkNode->SetOper(GT_STORE_BLK);
275 if (source->gtOper == GT_IND)
277 srcAddrOrFill = blkNode->Data()->gtGetOp1();
283 GenTree* initVal = source;
284 if (initVal->OperIsInitVal())
286 initVal->SetContained();
287 initVal = initVal->gtGetOp1();
289 srcAddrOrFill = initVal;
291 #ifdef _TARGET_ARM64_
292 if ((size != 0) && (size <= INITBLK_UNROLL_LIMIT) && initVal->IsCnsIntOrI())
294 // TODO-ARM-CQ: Currently we generate a helper call for every
295 // initblk we encounter. Later on we should implement loop unrolling
296 // code sequences to improve CQ.
297 // For reference see the code in LowerXArch.cpp.
298 NYI_ARM("initblk loop unrolling is currently not implemented.");
300 // The fill value of an initblk is interpreted to hold a
301 // value of (unsigned int8) however a constant of any size
302 // may practically reside on the evaluation stack. So extract
303 // the lower byte out of the initVal constant and replicate
304 // it to a larger constant whose size is sufficient to support
305 // the largest width store of the desired inline expansion.
307 ssize_t fill = initVal->gtIntCon.gtIconVal & 0xFF;
310 MakeSrcContained(blkNode, source);
312 else if (size < REGSIZE_BYTES)
314 initVal->gtIntCon.gtIconVal = 0x01010101 * fill;
318 initVal->gtIntCon.gtIconVal = 0x0101010101010101LL * fill;
319 initVal->gtType = TYP_LONG;
321 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
324 #endif // _TARGET_ARM64_
326 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
331 // CopyObj or CopyBlk
332 // Sources are src and dest and size if not constant.
334 if (blkNode->OperGet() == GT_STORE_OBJ)
337 GenTreeObj* objNode = blkNode->AsObj();
339 unsigned slots = objNode->gtSlots;
342 // CpObj must always have at least one GC-Pointer as a member.
343 assert(objNode->gtGcPtrCount > 0);
345 assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
347 CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass;
348 size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
349 size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
351 // Currently, the EE always round up a class data structure so
352 // we are not handling the case where we have a non multiple of pointer sized
353 // struct. This behavior may change in the future so in order to keeps things correct
354 // let's assert it just to be safe. Going forward we should simply
356 assert(classSize == blkSize);
357 assert((blkSize / TARGET_POINTER_SIZE) == slots);
358 assert(objNode->HasGCPtr());
361 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
365 // In case of a CpBlk with a constant size and less than CPBLK_UNROLL_LIMIT size
366 // we should unroll the loop to improve CQ.
367 // For reference see the code in lowerxarch.cpp.
369 if ((size != 0) && (size <= CPBLK_UNROLL_LIMIT))
371 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
375 // In case we have a constant integer this means we went beyond
376 // CPBLK_UNROLL_LIMIT bytes of size, still we should never have the case of
377 // any GC-Pointers in the src struct.
378 blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
381 // CopyObj or CopyBlk
382 if (source->gtOper == GT_IND)
384 MakeSrcContained(blkNode, source);
386 else if (!source->IsMultiRegCall() && !source->OperIsSIMD())
388 assert(source->IsLocal());
389 MakeSrcContained(blkNode, source);
394 //------------------------------------------------------------------------
395 // LowerCast: Lower GT_CAST(srcType, DstType) nodes.
398 // tree - GT_CAST node to be lowered
404 // Casts from float/double to a smaller int type are transformed as follows:
405 // GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
406 // GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
407 // GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
408 // GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
410 // Note that for the overflow conversions we still depend on helper calls and
411 // don't expect to see them here.
412 // i) GT_CAST(float/double, int type with overflow detection)
414 void Lowering::LowerCast(GenTree* tree)
416 assert(tree->OperGet() == GT_CAST);
418 JITDUMP("LowerCast for: ");
422 GenTree* op1 = tree->gtOp.gtOp1;
423 var_types dstType = tree->CastToType();
424 var_types srcType = genActualType(op1->TypeGet());
425 var_types tmpType = TYP_UNDEF;
427 if (varTypeIsFloating(srcType))
429 noway_assert(!tree->gtOverflow());
432 assert(!varTypeIsSmall(srcType));
434 // case of src is a floating point type and dst is a small type.
435 if (varTypeIsFloating(srcType) && varTypeIsSmall(dstType))
437 NYI_ARM("Lowering for cast from float to small type"); // Not tested yet.
441 if (tmpType != TYP_UNDEF)
443 GenTree* tmp = comp->gtNewCastNode(tmpType, op1, tmpType);
444 tmp->gtFlags |= (tree->gtFlags & (GTF_UNSIGNED | GTF_OVERFLOW | GTF_EXCEPT));
446 tree->gtFlags &= ~GTF_UNSIGNED;
447 tree->gtOp.gtOp1 = tmp;
448 BlockRange().InsertAfter(op1, tmp);
451 // Now determine if we have operands that should be contained.
452 ContainCheckCast(tree->AsCast());
455 //------------------------------------------------------------------------
456 // LowerRotate: Lower GT_ROL and GT_ROR nodes.
459 // tree - the node to lower
464 void Lowering::LowerRotate(GenTree* tree)
466 if (tree->OperGet() == GT_ROL)
468 // There is no ROL instruction on ARM. Convert ROL into ROR.
469 GenTree* rotatedValue = tree->gtOp.gtOp1;
470 unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
471 GenTree* rotateLeftIndexNode = tree->gtOp.gtOp2;
473 if (rotateLeftIndexNode->IsCnsIntOrI())
475 ssize_t rotateLeftIndex = rotateLeftIndexNode->gtIntCon.gtIconVal;
476 ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
477 rotateLeftIndexNode->gtIntCon.gtIconVal = rotateRightIndex;
481 GenTree* tmp = comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
482 BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
483 tree->gtOp.gtOp2 = tmp;
485 tree->ChangeOper(GT_ROR);
487 ContainCheckShiftRotate(tree->AsOp());
491 //----------------------------------------------------------------------------------------------
492 // Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node.
495 // simdNode - The SIMD intrinsic node.
497 void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
499 assert(simdNode->gtType != TYP_SIMD32);
501 if (simdNode->TypeGet() == TYP_SIMD12)
503 // GT_SIMD node requiring to produce TYP_SIMD12 in fact
504 // produces a TYP_SIMD16 result
505 simdNode->gtType = TYP_SIMD16;
508 ContainCheckSIMD(simdNode);
510 #endif // FEATURE_SIMD
512 #ifdef FEATURE_HW_INTRINSICS
513 //----------------------------------------------------------------------------------------------
514 // Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
517 // node - The hardware intrinsic node.
519 void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
521 auto intrinsicID = node->gtHWIntrinsicId;
522 auto intrinsicInfo = comp->getHWIntrinsicInfo(node->gtHWIntrinsicId);
524 if ((intrinsicInfo.flags & HWIntrinsicInfo::LowerCmpUZero) && varTypeIsUnsigned(node->gtSIMDBaseType))
526 auto setAllVector = node->gtSIMDSize > 8 ? NI_ARM64_SIMD_SetAllVector128 : NI_ARM64_SIMD_SetAllVector64;
528 auto origOp1 = node->gtOp.gtOp1;
532 case NI_ARM64_SIMD_GE_ZERO:
534 node->gtHWIntrinsicId = setAllVector;
535 node->gtOp.gtOp1 = comp->gtNewLconNode(~0ULL);
536 BlockRange().InsertBefore(node, node->gtOp.gtOp1);
537 BlockRange().Remove(origOp1);
539 case NI_ARM64_SIMD_GT_ZERO:
542 comp->gtNewSimdHWIntrinsicNode(node->TypeGet(), node->gtOp.gtOp1, NI_ARM64_SIMD_EQ_ZERO,
543 node->gtSIMDBaseType, node->gtSIMDSize);
544 node->gtHWIntrinsicId = NI_ARM64_SIMD_BitwiseNot;
545 BlockRange().InsertBefore(node, node->gtOp.gtOp1);
547 case NI_ARM64_SIMD_LE_ZERO:
549 node->gtHWIntrinsicId = NI_ARM64_SIMD_EQ_ZERO;
551 case NI_ARM64_SIMD_LT_ZERO:
553 node->gtHWIntrinsicId = setAllVector;
554 node->gtOp.gtOp1 = comp->gtNewIconNode(0);
555 BlockRange().InsertBefore(node, node->gtOp.gtOp1);
556 BlockRange().Remove(origOp1);
559 assert(!"Unhandled LowerCmpUZero case");
563 ContainCheckHWIntrinsic(node);
565 #endif // FEATURE_HW_INTRINSICS
567 //------------------------------------------------------------------------
568 // Containment analysis
569 //------------------------------------------------------------------------
571 //------------------------------------------------------------------------
572 // ContainCheckCallOperands: Determine whether operands of a call should be contained.
575 // call - The call node of interest
580 void Lowering::ContainCheckCallOperands(GenTreeCall* call)
582 // There are no contained operands for arm.
585 //------------------------------------------------------------------------
586 // ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained.
589 // node - pointer to the node
591 void Lowering::ContainCheckStoreIndir(GenTreeIndir* node)
593 #ifdef _TARGET_ARM64_
594 GenTree* src = node->gtOp.gtOp2;
595 if (!varTypeIsFloating(src->TypeGet()) && src->IsIntegralConst(0))
597 // an integer zero for 'src' can be contained.
598 MakeSrcContained(node, src);
600 #endif // _TARGET_ARM64_
601 ContainCheckIndir(node);
604 //------------------------------------------------------------------------
605 // ContainCheckIndir: Determine whether operands of an indir should be contained.
608 // indirNode - The indirection node of interest
611 // This is called for both store and load indirections.
616 void Lowering::ContainCheckIndir(GenTreeIndir* indirNode)
618 // If this is the rhs of a block copy it will be handled when we handle the store.
619 if (indirNode->TypeGet() == TYP_STRUCT)
625 // If indirTree is of TYP_SIMD12, don't mark addr as contained
626 // so that it always get computed to a register. This would
627 // mean codegen side logic doesn't need to handle all possible
628 // addr expressions that could be contained.
630 // TODO-ARM64-CQ: handle other addr mode expressions that could be marked
632 if (indirNode->TypeGet() == TYP_SIMD12)
636 #endif // FEATURE_SIMD
638 GenTree* addr = indirNode->Addr();
639 bool makeContained = true;
640 if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr))
642 GenTreeAddrMode* lea = addr->AsAddrMode();
643 GenTree* base = lea->Base();
644 GenTree* index = lea->Index();
645 int cns = lea->Offset();
648 // ARM floating-point load/store doesn't support a form similar to integer
649 // ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported
650 // form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm.
651 if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns))
653 if (indirNode->OperGet() == GT_STOREIND)
655 if (varTypeIsFloating(indirNode->AsStoreInd()->Data()))
657 makeContained = false;
660 else if (indirNode->OperGet() == GT_IND)
662 if (varTypeIsFloating(indirNode))
664 makeContained = false;
671 MakeSrcContained(indirNode, addr);
676 //------------------------------------------------------------------------
677 // ContainCheckBinary: Determine whether a binary op's operands should be contained.
680 // node - the node we care about
682 void Lowering::ContainCheckBinary(GenTreeOp* node)
684 // Check and make op2 contained (if it is a containable immediate)
685 CheckImmedAndMakeContained(node, node->gtOp2);
688 //------------------------------------------------------------------------
689 // ContainCheckMul: Determine whether a mul op's operands should be contained.
692 // node - the node we care about
694 void Lowering::ContainCheckMul(GenTreeOp* node)
696 ContainCheckBinary(node);
699 //------------------------------------------------------------------------
700 // ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
703 // node - the node we care about
705 void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
707 GenTree* shiftBy = node->gtOp2;
710 GenTree* source = node->gtOp1;
711 if (node->OperIs(GT_LSH_HI, GT_RSH_LO))
713 assert(source->OperGet() == GT_LONG);
714 MakeSrcContained(node, source);
716 #else // !_TARGET_ARM_
717 assert(node->OperIsShiftOrRotate());
718 #endif // !_TARGET_ARM_
720 if (shiftBy->IsCnsIntOrI())
722 MakeSrcContained(node, shiftBy);
726 //------------------------------------------------------------------------
727 // ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained.
730 // node - pointer to the node
732 void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc)
734 assert(storeLoc->OperIsLocalStore());
735 GenTree* op1 = storeLoc->gtGetOp1();
738 if (varTypeIsSIMD(storeLoc))
740 if (op1->IsIntegralConst(0))
742 // For an InitBlk we want op1 to be contained
743 MakeSrcContained(storeLoc, op1);
747 #endif // FEATURE_SIMD
749 // If the source is a containable immediate, make it contained, unless it is
750 // an int-size or larger store of zero to memory, because we can generate smaller code
751 // by zeroing a register and then storing it.
752 if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(storeLoc)))
754 MakeSrcContained(storeLoc, op1);
757 else if (op1->OperGet() == GT_LONG)
759 MakeSrcContained(storeLoc, op1);
761 #endif // _TARGET_ARM_
764 //------------------------------------------------------------------------
765 // ContainCheckCast: determine whether the source of a CAST node should be contained.
768 // node - pointer to the node
770 void Lowering::ContainCheckCast(GenTreeCast* node)
773 GenTree* castOp = node->CastOp();
774 var_types castToType = node->CastToType();
775 var_types srcType = castOp->TypeGet();
777 if (varTypeIsLong(castOp))
779 assert(castOp->OperGet() == GT_LONG);
780 MakeSrcContained(node, castOp);
782 #endif // _TARGET_ARM_
785 //------------------------------------------------------------------------
786 // ContainCheckCompare: determine whether the sources of a compare node should be contained.
789 // node - pointer to the node
791 void Lowering::ContainCheckCompare(GenTreeOp* cmp)
793 CheckImmedAndMakeContained(cmp, cmp->gtOp2);
796 //------------------------------------------------------------------------
797 // ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained.
800 // node - pointer to the node
802 void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
804 assert(node->OperIsBoundsCheck());
806 if (!CheckImmedAndMakeContained(node, node->gtIndex))
808 CheckImmedAndMakeContained(node, node->gtArrLen);
813 //----------------------------------------------------------------------------------------------
814 // ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node.
817 // simdNode - The SIMD intrinsic node.
819 void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
821 switch (simdNode->gtSIMDIntrinsicID)
826 case SIMDIntrinsicInit:
827 op1 = simdNode->gtOp.gtOp1;
828 if (op1->IsIntegralConst(0))
830 MakeSrcContained(simdNode, op1);
834 case SIMDIntrinsicInitArray:
835 // We have an array and an index, which may be contained.
836 CheckImmedAndMakeContained(simdNode, simdNode->gtGetOp2());
839 case SIMDIntrinsicOpEquality:
840 case SIMDIntrinsicOpInEquality:
841 // TODO-ARM64-CQ Support containing 0
844 case SIMDIntrinsicGetItem:
846 // This implements get_Item method. The sources are:
847 // - the source SIMD struct
848 // - index (which element to get)
849 // The result is baseType of SIMD struct.
850 op1 = simdNode->gtOp.gtOp1;
851 op2 = simdNode->gtOp.gtOp2;
853 // If the index is a constant, mark it as contained.
854 if (op2->IsCnsIntOrI())
856 MakeSrcContained(simdNode, op2);
859 if (IsContainableMemoryOp(op1))
861 MakeSrcContained(simdNode, op1);
862 if (op1->OperGet() == GT_IND)
864 op1->AsIndir()->Addr()->ClearContained();
874 #endif // FEATURE_SIMD
876 #ifdef FEATURE_HW_INTRINSICS
877 //----------------------------------------------------------------------------------------------
878 // ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
881 // node - The hardware intrinsic node.
883 void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
885 NamedIntrinsic intrinsicID = node->gtHWIntrinsicId;
886 GenTree* op1 = node->gtOp.gtOp1;
887 GenTree* op2 = node->gtOp.gtOp2;
889 switch (comp->getHWIntrinsicInfo(node->gtHWIntrinsicId).form)
891 case HWIntrinsicInfo::SimdExtractOp:
892 if (op2->IsCnsIntOrI())
894 MakeSrcContained(node, op2);
902 #endif // FEATURE_HW_INTRINSICS
904 #endif // _TARGET_ARMARCH_
906 #endif // !LEGACY_BACKEND