For the cover letter of this patch-set, please checkout D146872.
Depends on D146873.
This is the 3rd patch of the patch-set. This patch originates from
D99593.
Note: This patch is a proof-of-concept and will be extended to full
coverage in the future. Currently, the old non-tuple unit-stride
segment load is not removed, and only signed integer unit-strided
segment load of NF=2, EEW=32 is defined here.
When replacing the old intrinsics, the extra `IsTuple` parameter under
various places will be redundant and removed.
Authored-by: eop Chen <eop.chen@sifive.com>
Co-Authored-by: Hsiangkai Wang <kai.wang@sifive.com>
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D147731
/// Return the unique reference to a scalable vector type of the specified
/// element type and scalable number of elements.
+ /// For RISC-V, number of fields is also provided when it fetching for
+ /// tuple type.
///
/// \pre \p EltTy must be a built-in type.
- QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const;
+ QualType getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields = 1) const;
/// Return a WebAssembly externref type.
QualType getWebAssemblyExternrefType() const;
defm : RVVIndexedSegLoad<"vluxseg">;
defm : RVVIndexedSegLoad<"vloxseg">;
}
+
+multiclass RVVUnitStridedSegLoadTuple<string op> {
+ foreach type = ["i"] in {
+ defvar eew = !cond(!eq(type, "i") : "32");
+ foreach nf = [2] in {
+ let Name = op # nf # "e" # eew # "_v_tuple",
+ OverloadedName = op # nf # "e" # eew # "_tuple",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ assert(((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && (PolicyAttrs & RVV_VTA))) &&
+ "FIXME: Only handling default policy (TAMA) for now");
+
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+
+ unsigned Offset = IsMasked ? 1 : 0;
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 1]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<"v", T # "vPCe", type>;
+ }
+ }
+ }
+}
+// TODO: Extend for policy
+let UnMaskedPolicyScheme = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
+ IsTuple = true in {
+defm : RVVUnitStridedSegLoadTuple<"vlseg">;
+}
+
let UnMaskedPolicyScheme = NonePolicy,
MaskedPolicyScheme = NonePolicy in {
defm : RVVUnitStridedSegStore<"vsseg">;
// Number of fields for Load/Store Segment instructions.
int NF = 1;
+
+ // Set to true if the builtin is associated with tuple types.
+ bit IsTuple = false;
}
// This is the code emitted in the header.
SFixedLog2LMUL1,
SFixedLog2LMUL2,
SFixedLog2LMUL3,
+ Tuple2,
};
// Similar to basic type but used to describe what's kind of type related to
unsigned ElementBitwidth = 0;
VScaleVal Scale = 0;
bool Valid;
+ bool IsTuple = false;
+ unsigned NF = 0;
std::string BuiltinStr;
std::string ClangBuiltinStr;
}
bool isConstant() const { return IsConstant; }
bool isPointer() const { return IsPointer; }
+ bool isTuple() const { return IsTuple; }
unsigned getElementBitwidth() const { return ElementBitwidth; }
ScalarTypeKind getScalarType() const { return ScalarType; }
VScaleVal getScale() const { return Scale; }
+ unsigned getNF() const {
+ assert(NF > 1 && NF < 8 && "Only legal NF should be fetched");
+ return NF;
+ }
private:
// Verify RVV vector type and set Valid.
std::vector<int64_t> IntrinsicTypes;
unsigned NF = 1;
Policy PolicyAttrs;
+ bool IsTuple = false;
public:
RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix,
const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
const std::vector<llvm::StringRef> &RequiredFeatures,
- unsigned NF, Policy PolicyAttrs);
+ unsigned NF, Policy PolicyAttrs, bool IsTuple);
~RVVIntrinsic() = default;
RVVTypePtr getOutputType() const { return OutputType; }
computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
unsigned NF, PolicyScheme DefaultScheme,
- Policy PolicyAttrs);
+ Policy PolicyAttrs, bool IsTuple);
static llvm::SmallVector<Policy> getSupportedUnMaskedPolicies();
static llvm::SmallVector<Policy>
bool HasMaskedOffOperand : 1;
bool HasTailPolicy : 1;
bool HasMaskPolicy : 1;
+ bool IsTuple : 1;
uint8_t UnMaskedPolicyScheme : 2;
uint8_t MaskedPolicyScheme : 2;
};
/// getScalableVectorType - Return the unique reference to a scalable vector
/// type of the specified element type and size. VectorType must be a built-in
/// type.
-QualType ASTContext::getScalableVectorType(QualType EltTy,
- unsigned NumElts) const {
+QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields) const {
if (Target->hasAArch64SVETypes()) {
uint64_t EltTySize = getTypeSize(EltTy);
#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
uint64_t EltTySize = getTypeSize(EltTy);
#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
IsFP) \
- if (!EltTy->isBooleanType() && \
- ((EltTy->hasIntegerRepresentation() && \
- EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
- (EltTy->hasFloatingRepresentation() && IsFP)) && \
- EltTySize == ElBits && NumElts == NumEls) \
- return SingletonId;
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
+ return SingletonId;
#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
- if (EltTy->isBooleanType() && NumElts == NumEls) \
- return SingletonId;
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
#include "clang/Basic/RISCVVTypes.def"
}
return QualType();
case Invalid:
llvm_unreachable("Unhandled type.");
}
- if (Type->isVector())
- QT = Context.getScalableVectorType(QT, *Type->getScale());
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
if (Type->isConstant())
QT = Context.getConstType(QT);
const Policy DefaultPolicy;
llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
- RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false,
- /*HasMaskedOffOperand=*/false,
- Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, DefaultPolicy);
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq =
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy);
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
RVVIntrinsic::computeBuiltinTypes(
BasicProtoSeq, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
- UnMaskedPolicyScheme, P);
+ UnMaskedPolicyScheme, P, Record.IsTuple);
std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
for (auto P : SupportedMaskedPolicies) {
llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
- BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
- Record.HasVL, Record.NF, MaskedPolicyScheme, P);
+ BasicProtoSeq, /*IsMasked=*/true,
+ Record.HasMaskedOffOperand, Record.HasVL, Record.NF,
+ MaskedPolicyScheme, P, Record.IsTuple);
std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
BaseType, Log2LMUL, Record.NF, PolicyPrototype);
InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
return false;
if (isFloat() && ElementBitwidth == 8)
return false;
+ if (IsTuple && (NF == 1 || NF > 8))
+ return false;
unsigned V = *Scale;
switch (ElementBitwidth) {
case 1:
// vector values.
if (IsPointer)
BuiltinStr += "*";
+
+ if (IsTuple)
+ BuiltinStr = "T" + utostr(NF) + BuiltinStr;
}
void RVVType::initClangBuiltinStr() {
default:
llvm_unreachable("ScalarTypeKind is invalid");
}
- ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
+ ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t";
}
void RVVType::initTypeStr() {
auto getTypeString = [&](StringRef TypeStr) {
if (isScalar())
return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
- return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
+ return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t")
.str();
};
return std::nullopt;
}
+ } else if (ComplexTT.first == "Tuple") {
+ unsigned NF = 0;
+ if (ComplexTT.second.getAsInteger(10, NF)) {
+ llvm_unreachable("Invalid NF value!");
+ return std::nullopt;
+ }
+ switch (NF) {
+ case 2:
+ VTM = VectorTypeModifier::Tuple2;
+ break;
+ default:
+ llvm_unreachable("Unhandled NF");
+ }
} else {
llvm_unreachable("Illegal complex type transformers!");
}
case VectorTypeModifier::SFixedLog2LMUL3:
applyFixedLog2LMUL(3, FixedLMULType::SmallerThan);
break;
+ case VectorTypeModifier::Tuple2: {
+ IsTuple = true;
+ NF = 2;
+ break;
+ }
case VectorTypeModifier::NoModifier:
break;
}
const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures,
- unsigned NF, Policy NewPolicyAttrs)
+ unsigned NF, Policy NewPolicyAttrs, bool IsTuple)
: IRName(IRName), IsMasked(IsMasked),
HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
- ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) {
+ ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs),
+ IsTuple(IsTuple) {
// Init BuiltinName, Name and OverloadedName
BuiltinName = NewName.str();
llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
llvm::ArrayRef<PrototypeDescriptor> Prototype, bool IsMasked,
bool HasMaskedOffOperand, bool HasVL, unsigned NF,
- PolicyScheme DefaultScheme, Policy PolicyAttrs) {
+ PolicyScheme DefaultScheme, Policy PolicyAttrs, bool IsTuple) {
SmallVector<PrototypeDescriptor> NewPrototype(Prototype.begin(),
Prototype.end());
bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
// to
// (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
// ...)
- NewPrototype.insert(NewPrototype.begin() + NF + 1,
- PrototypeDescriptor::Mask);
+ if (IsTuple)
+ NewPrototype.insert(NewPrototype.begin() + 1,
+ PrototypeDescriptor::Mask);
+ else
+ NewPrototype.insert(NewPrototype.begin() + NF + 1,
+ PrototypeDescriptor::Mask);
} else {
// If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
// If HasVL, append PrototypeDescriptor:VL to last operand
if (HasVL)
NewPrototype.push_back(PrototypeDescriptor::VL);
+ if (IsTuple)
+ NewPrototype[0].VTM = static_cast<uint8_t>(VectorTypeModifier::Tuple2);
return NewPrototype;
}
OS << (int)Record.HasMaskedOffOperand << ",";
OS << (int)Record.HasTailPolicy << ",";
OS << (int)Record.HasMaskPolicy << ",";
+ OS << (int)Record.IsTuple << ",";
OS << (int)Record.UnMaskedPolicyScheme << ",";
OS << (int)Record.MaskedPolicyScheme << ",";
OS << "},\n";
--- /dev/null
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local { <vscale x 2 x i32>, <vscale x 2 x i32> } @test_vlseg2e32_v_tuple_i32m1
+// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x i32>, <vscale x 2 x i32> } @llvm.riscv.vlseg2.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> poison, ptr [[BASE]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]]
+//
+vint32m1x2_t test_vlseg2e32_v_tuple_i32m1(const int32_t *base, size_t vl) {
+ return __riscv_vlseg2e32_v_tuple_i32m1(base, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local { <vscale x 2 x i32>, <vscale x 2 x i32> } @test_vlseg2e32_v_tuple_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { <vscale x 2 x i32>, <vscale x 2 x i32> } @llvm.riscv.vlseg2.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> poison, ptr [[BASE]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret { <vscale x 2 x i32>, <vscale x 2 x i32> } [[TMP0]]
+//
+vint32m1x2_t test_vlseg2e32_v_tuple_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) {
+ return __riscv_vlseg2e32_v_tuple_i32m1_m(mask, base, vl);
+}
bool HasMaskedOffOperand :1;
bool HasTailPolicy : 1;
bool HasMaskPolicy : 1;
+ bool IsTuple : 1;
uint8_t UnMaskedPolicyScheme : 2;
uint8_t MaskedPolicyScheme : 2;
};
TypeModifier::UnsignedInteger));
printType(*UT);
}
+ // FIXME: Expand more type declaration
+ if (I == 'i' && Log2LMUL == 0) { // vint32m1x2_t
+ auto TupleT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector,
+ VectorTypeModifier::Tuple2,
+ TypeModifier::SignedInteger));
+ if (TupleT)
+ printType(*TupleT);
+ }
}
}
StringRef IRName = R->getValueAsString("IRName");
StringRef MaskedIRName = R->getValueAsString("MaskedIRName");
unsigned NF = R->getValueAsInt("NF");
+ bool IsTuple = R->getValueAsBit("IsTuple");
const Policy DefaultPolicy;
SmallVector<Policy> SupportedUnMaskedPolicies =
auto Prototype = RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, HasVL, NF, UnMaskedPolicyScheme,
- DefaultPolicy);
+ DefaultPolicy, IsTuple);
auto MaskedPrototype = RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF,
- MaskedPolicyScheme, DefaultPolicy);
+ MaskedPolicyScheme, DefaultPolicy, IsTuple);
// Create Intrinsics for each type and LMUL.
for (char I : TypeRange) {
/*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF,
- DefaultPolicy));
+ DefaultPolicy, IsTuple));
if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone)
for (auto P : SupportedUnMaskedPolicies) {
SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/false,
/*HasMaskedOffOperand=*/false, HasVL, NF,
- UnMaskedPolicyScheme, P);
+ UnMaskedPolicyScheme, P, IsTuple);
std::optional<RVVTypes> PolicyTypes =
TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
Out.push_back(std::make_unique<RVVIntrinsic>(
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures,
- NF, P));
+ NF, P, IsTuple));
}
if (!HasMasked)
continue;
Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName,
/*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes,
- IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy));
+ IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy, IsTuple));
if (MaskedPolicyScheme == PolicyScheme::SchemeNone)
continue;
for (auto P : SupportedMaskedPolicies) {
SmallVector<PrototypeDescriptor> PolicyPrototype =
RVVIntrinsic::computeBuiltinTypes(
BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
- NF, MaskedPolicyScheme, P);
+ NF, MaskedPolicyScheme, P, IsTuple);
std::optional<RVVTypes> PolicyTypes =
TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
Out.push_back(std::make_unique<RVVIntrinsic>(
MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF,
- P));
+ P, IsTuple));
}
} // End for Log2LMULList
} // End for TypeRange
SR.Prototype = std::move(BasicPrototype);
SR.Suffix = parsePrototypes(SuffixProto);
SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto);
+ SR.IsTuple = IsTuple;
SemaRecords->push_back(SR);
}
R.HasMaskPolicy = SR.HasMaskPolicy;
R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme;
R.MaskedPolicyScheme = SR.MaskedPolicyScheme;
+ R.IsTuple = SR.IsTuple;
assert(R.PrototypeIndex !=
static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));