/// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
/// index parameter when calculating addresses.
///
-/// SIGNED_SCALED Addr = Base + ((signed)Index * sizeof(element))
-/// SIGNED_UNSCALED Addr = Base + (signed)Index
-/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * sizeof(element))
-/// UNSIGNED_UNSCALED Addr = Base + (unsigned)Index
-enum MemIndexType {
- SIGNED_SCALED = 0,
- SIGNED_UNSCALED,
- UNSIGNED_SCALED,
- UNSIGNED_UNSCALED
-};
+/// SIGNED_SCALED Addr = Base + ((signed)Index * Scale)
+/// UNSIGNED_SCALED Addr = Base + ((unsigned)Index * Scale)
+///
+/// NOTE: The value of Scale is typically only known to the node owning the
+/// IndexType, with a value of 1 the equivalent of being unscaled.
+enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };
-static const int LAST_MEM_INDEX_TYPE = UNSIGNED_UNSCALED + 1;
-
-inline bool isIndexTypeScaled(MemIndexType IndexType) {
- return IndexType == SIGNED_SCALED || IndexType == UNSIGNED_SCALED;
-}
+static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;
inline bool isIndexTypeSigned(MemIndexType IndexType) {
- return IndexType == SIGNED_SCALED || IndexType == SIGNED_UNSCALED;
-}
-
-inline MemIndexType getSignedIndexType(MemIndexType IndexType) {
- return isIndexTypeScaled(IndexType) ? SIGNED_SCALED : SIGNED_UNSCALED;
-}
-
-inline MemIndexType getUnsignedIndexType(MemIndexType IndexType) {
- return isIndexTypeScaled(IndexType) ? UNSIGNED_SCALED : UNSIGNED_UNSCALED;
-}
-
-inline MemIndexType getUnscaledIndexType(MemIndexType IndexType) {
- return isIndexTypeSigned(IndexType) ? SIGNED_UNSCALED : UNSIGNED_UNSCALED;
+ return IndexType == SIGNED_SCALED;
}
//===--------------------------------------------------------------------===//
ISD::MemIndexType getIndexType() const {
return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
}
- bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); }
+ bool isIndexScaled() const {
+ return !cast<ConstantSDNode>(getScale())->isOne();
+ }
bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); }
// In the both nodes address is Op1, mask is Op2:
ISD::MemIndexType getIndexType() const {
return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
}
- bool isIndexScaled() const { return isIndexTypeScaled(getIndexType()); }
+ bool isIndexScaled() const {
+ return !cast<ConstantSDNode>(getScale())->isOne();
+ }
bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); }
// In the both nodes address is Op1, mask is Op2:
// combiner can fold the new nodes.
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;
- /// Give targets the chance to reduce the number of distinct addresing modes.
- ISD::MemIndexType getCanonicalIndexType(ISD::MemIndexType IndexType,
- EVT MemVT, SDValue Offsets) const;
-
private:
SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
const SDLoc &DL, DAGCombinerInfo &DCI) const;
if (Index.getOpcode() == ISD::ZERO_EXTEND) {
SDValue Op = Index.getOperand(0);
if (TLI.shouldRemoveExtendFromGSIndex(Op.getValueType())) {
- IndexType = ISD::getUnsignedIndexType(IndexType);
+ IndexType = ISD::UNSIGNED_SCALED;
Index = Op;
return true;
}
if (ISD::isIndexTypeSigned(IndexType)) {
- IndexType = ISD::getUnsignedIndexType(IndexType);
+ IndexType = ISD::UNSIGNED_SCALED;
return true;
}
}
return SDValue(E, 0);
}
- IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
VTs, MemVT, MMO, IndexType, ExtTy);
createOperands(N, Ops);
return SDValue(E, 0);
}
- IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
VTs, MemVT, MMO, IndexType, IsTrunc);
createOperands(N, Ops);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_SCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_SCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
if (!UniformBase) {
Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(PtrOperand);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_SCALED;
Scale =
DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
}
if (!UniformBase) {
Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(PtrOperand);
- IndexType = ISD::SIGNED_UNSCALED;
+ IndexType = ISD::SIGNED_SCALED;
Scale =
DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
}
return SDValue();
}
-// Convert redundant addressing modes (e.g. scaling is redundant
-// when accessing bytes).
-ISD::MemIndexType
-TargetLowering::getCanonicalIndexType(ISD::MemIndexType IndexType, EVT MemVT,
- SDValue Offsets) const {
- // Scaling is unimportant for bytes, canonicalize to unscaled.
- if (ISD::isIndexTypeScaled(IndexType) && MemVT.getScalarType() == MVT::i8)
- return ISD::getUnscaledIndexType(IndexType);
-
- return IndexType;
-}
-
SDValue TargetLowering::expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const {
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
SDValue Ops[] = {Chain, PassThru, Mask, BasePtr, Index, Scale};
- IndexType = getUnscaledIndexType(IndexType);
return DAG.getMaskedGather(MGT->getVTList(), MemVT, DL, Ops,
MGT->getMemOperand(), IndexType, ExtType);
}
Scale = DAG.getTargetConstant(1, DL, Scale.getValueType());
SDValue Ops[] = {Chain, StoreVal, Mask, BasePtr, Index, Scale};
- IndexType = getUnscaledIndexType(IndexType);
return DAG.getMaskedScatter(MSC->getVTList(), MemVT, DL, Ops,
MSC->getMemOperand(), IndexType,
MSC->isTruncatingStore());
DL, IndexVT, Index);
}
- unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
- if (IsIndexScaled && Scale != 1) {
- // Manually scale the indices by the element size.
+ if (IsIndexScaled) {
+ // Manually scale the indices.
// TODO: Sanitize the scale operand here?
// TODO: For VP nodes, should we use VP_SHL here?
+ unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
+ ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType());
}
- ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
+ ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED;
if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
{VPGN->getChain(), VPGN->getBasePtr(), Index,
- VPGN->getScale(), VPGN->getMask(),
+ ScaleOp, VPGN->getMask(),
VPGN->getVectorLength()},
VPGN->getMemOperand(), NewIndexTy);
if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
{VPSN->getChain(), VPSN->getValue(),
- VPSN->getBasePtr(), Index, VPSN->getScale(),
+ VPSN->getBasePtr(), Index, ScaleOp,
VPSN->getMask(), VPSN->getVectorLength()},
VPSN->getMemOperand(), NewIndexTy);
if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
return DAG.getMaskedGather(
N->getVTList(), MGN->getMemoryVT(), DL,
{MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
- MGN->getBasePtr(), Index, MGN->getScale()},
+ MGN->getBasePtr(), Index, ScaleOp},
MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
const auto *MSN = cast<MaskedScatterSDNode>(N);
return DAG.getMaskedScatter(
N->getVTList(), MSN->getMemoryVT(), DL,
{MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
- Index, MSN->getScale()},
+ Index, ScaleOp},
MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
}
case RISCVISD::SRA_VL: