/// may cause data movement and invalidate the underlying memory address.
TensorExp &exp(unsigned e) { return tensorExps[e]; }
LatPoint &lat(unsigned l) { return latPoints[l]; }
- SmallVector<unsigned, 16> &set(unsigned s) { return latSets[s]; }
+ SmallVector<unsigned> &set(unsigned s) { return latSets[s]; }
#ifndef NDEBUG
/// Print methods (for debugging).
std::vector<std::vector<DimLevelType>> dimTypes;
// Map that converts pair<tensor id, loop id> to the corresponding dimension.
std::vector<std::vector<Optional<unsigned>>> loopIdxToDim;
- llvm::SmallVector<TensorExp, 32> tensorExps;
- llvm::SmallVector<LatPoint, 16> latPoints;
- llvm::SmallVector<SmallVector<unsigned, 16>, 8> latSets;
+ llvm::SmallVector<TensorExp> tensorExps;
+ llvm::SmallVector<LatPoint> latPoints;
+ llvm::SmallVector<SmallVector<unsigned>> latSets;
};
} // namespace sparse_tensor
if (failed(parser.parseGreater()))
return {};
// Process the data from the parsed dictionary value into struct-like data.
- SmallVector<DimLevelType, 4> dlt;
+ SmallVector<DimLevelType> dlt;
AffineMap dimOrd = {};
AffineMap higherOrd = {};
unsigned ptr = 0;
auto rtp = tensor.getType().cast<RankedTensorType>();
int64_t rank = rtp.getRank();
- SmallVector<Type, 4> blockArgTypes;
+ SmallVector<Type> blockArgTypes;
// Starts with n index.
std::fill_n(std::back_inserter(blockArgTypes), rank, builder.getIndexType());
// Followed by one value.
// Followed by reduction variable.
blockArgTypes.append(initArgs.getTypes().begin(), initArgs.getTypes().end());
- SmallVector<Location, 4> blockArgLocs;
+ SmallVector<Location> blockArgLocs;
std::fill_n(std::back_inserter(blockArgLocs), blockArgTypes.size(),
tensor.getLoc());
ArrayRef<size_t> dims, bool needsUniv, MutableArrayRef<Value> reduc,
ArrayRef<size_t> extraTids, ArrayRef<size_t> extraDims) {
assert(tids.size() == dims.size());
- SmallVector<Type, 4> types;
- SmallVector<Value, 4> operands;
+ SmallVector<Type> types;
+ SmallVector<Value> operands;
// Construct the while-loop with a parameter for each index.
Type indexType = builder.getIndexType();
for (auto [tid, dim] : llvm::zip(tids, dims)) {
// instructions during code generation. Moreover, performing the induction
// after the if-statements more closely resembles code generated by TACO.
unsigned o = 0;
- SmallVector<Value, 4> operands;
+ SmallVector<Value> operands;
Value one = constantIndex(builder, loc, 1);
for (auto [tid, dim] : llvm::zip(tids, dims)) {
if (isCompressedDLT(dimTypes[tid][dim]) ||
// earlier stage (instead of silently using a wrong value).
LoopLevelInfo &loopInfo = loopStack.back();
assert(loopInfo.tids.size() == loopInfo.dims.size());
- SmallVector<Value, 2> red;
+ SmallVector<Value> red;
if (llvm::isa<scf::WhileOp>(loopInfo.loop)) {
exitCoIterationLoop(rewriter, loc, reduc);
} else {
}
void mlir::sparse_tensor::genReshapeDstShape(
- Location loc, PatternRewriter &rewriter, SmallVector<Value, 4> &dstShape,
+ Location loc, PatternRewriter &rewriter, SmallVectorImpl<Value> &dstShape,
ArrayRef<Value> srcShape, ArrayRef<int64_t> staticDstShape,
ArrayRef<ReassociationIndices> reassociation) {
// Collapse shape.
void mlir::sparse_tensor::genDenseTensorOrSparseConstantIterLoop(
OpBuilder &builder, Location loc, Value src, unsigned rank,
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder) {
- SmallVector<Value, 4> indicesArray;
+ SmallVector<Value> indicesArray;
SmallVector<Value> lo;
SmallVector<Value> hi;
SmallVector<Value> st;
}
void mlir::sparse_tensor::sizesFromSrc(OpBuilder &builder,
- SmallVector<Value, 4> &sizes,
+ SmallVectorImpl<Value> &sizes,
Location loc, Value src) {
unsigned rank = src.getType().cast<ShapedType>().getRank();
for (unsigned i = 0; i < rank; i++)
/// used when operands have dynamic shape. The shape of the destination is
/// stored into dstShape.
void genReshapeDstShape(Location loc, PatternRewriter &rewriter,
- SmallVector<Value, 4> &dstShape,
+ SmallVectorImpl<Value> &dstShape,
ArrayRef<Value> srcShape,
ArrayRef<int64_t> staticDstShape,
ArrayRef<ReassociationIndices> reassociation);
function_ref<void(OpBuilder &, Location, Value, ValueRange)> bodyBuilder);
/// Populates given sizes array from dense tensor or sparse tensor constant.
-void sizesFromSrc(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+void sizesFromSrc(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, Value src);
/// Scans to top of generated loop.
: tids(tids), dims(dims), loop(loop), iv(iv) {}
// TODO: maybe use a vector<pair> for tid and dim?
// The set of tensors that the loop is operating on
- const llvm::SmallVector<size_t, 4> tids;
+ const llvm::SmallVector<size_t> tids;
// The corresponding dims for the tensors
- const llvm::SmallVector<size_t, 4> dims;
+ const llvm::SmallVector<size_t> dims;
const Operation *loop; // the loop operation
const Value iv; // the induction variable for the loop
};
Location loc = func.getLoc();
ValueRange args = entryBlock->getArguments();
Value p = args[hiIdx];
- SmallVector<Type, 2> types(2, p.getType());
+ SmallVector<Type, 2> types(2, p.getType()); // only two
scf::WhileOp whileOp = builder.create<scf::WhileOp>(
loc, types, SmallVector<Value, 2>{args[loIdx], args[hiIdx]});
Value midp1 = builder.create<arith::AddIOp>(loc, mid, c1);
// Compare xs[p] < xs[mid].
- SmallVector<Value, 6> compareOperands{p, mid};
+ SmallVector<Value> compareOperands{p, mid};
uint64_t numXBuffers = isCoo ? 1 : nx;
compareOperands.append(args.begin() + xStartIdx,
args.begin() + xStartIdx + numXBuffers);
Block *before =
builder.createBlock(&whileOp.getBefore(), {}, {i.getType()}, {loc});
builder.setInsertionPointToEnd(before);
- SmallVector<Value, 6> compareOperands;
+ SmallVector<Value> compareOperands;
if (step > 0) {
compareOperands.push_back(before->getArgument(0));
compareOperands.push_back(p);
Value i = lo;
Value j = builder.create<arith::SubIOp>(loc, hi, c1);
- SmallVector<Value, 4> operands{i, j, p};
- SmallVector<Type, 4> types{i.getType(), j.getType(), p.getType()};
+ SmallVector<Value> operands{i, j, p};
+ SmallVector<Type> types{i.getType(), j.getType(), p.getType()};
scf::WhileOp whileOp = builder.create<scf::WhileOp>(loc, types, operands);
// The before-region of the WhileOp.
cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, i, j);
scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
- SmallVector<Value, 6> swapOperands{i, j};
+ SmallVector<Value> swapOperands{i, j};
swapOperands.append(args.begin() + xStartIdx, args.end());
createSwap(builder, loc, swapOperands, nx, ny, isCoo);
// If the pivot is moved, update p with the new pivot.
auto p = builder.create<func::CallOp>(
loc, partitionFunc, TypeRange{IndexType::get(context)}, ValueRange(args));
- SmallVector<Value, 6> lowOperands{lo, p.getResult(0)};
+ SmallVector<Value> lowOperands{lo, p.getResult(0)};
lowOperands.append(args.begin() + xStartIdx, args.end());
builder.create<func::CallOp>(loc, func, lowOperands);
- SmallVector<Value, 6> highOperands{
+ SmallVector<Value> highOperands{
builder.create<arith::AddIOp>(loc, p.getResult(0),
constantIndex(builder, loc, 1)),
hi};
Value i = forOpI.getInductionVar();
// Binary search to find the insertion point p.
- SmallVector<Value, 6> operands{lo, i};
+ SmallVector<Value> operands{lo, i};
operands.append(args.begin() + xStartIdx, args.end());
FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc(
builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix, nx,
// Move the value at data[i] to a temporary location.
operands[0] = operands[1] = i;
- SmallVector<Value, 6> d;
+ SmallVector<Value> d;
forEachIJPairInAllBuffers(
builder, loc, operands, nx, ny, isCoo,
[&](uint64_t unused, Value i, Value unused2, Value buffer) {
uint64_t ny, bool isCoo,
PatternRewriter &rewriter) {
Location loc = op.getLoc();
- SmallVector<Value, 6> operands{constantIndex(rewriter, loc, 0), op.getN()};
+ SmallVector<Value> operands{constantIndex(rewriter, loc, 0), op.getN()};
// Convert `values` to have dynamic shape and append them to `operands`.
for (Value v : xys) {
LogicalResult matchAndRewrite(SortOp op,
PatternRewriter &rewriter) const override {
- SmallVector<Value, 6> xys(op.getXs());
+ SmallVector<Value> xys(op.getXs());
xys.append(op.getYs().begin(), op.getYs().end());
return matchAndRewriteSortOp(op, xys, op.getXs().size(), /*ny=*/0,
/*isCoo=*/false, rewriter);
LogicalResult matchAndRewrite(SortCooOp op,
PatternRewriter &rewriter) const override {
- SmallVector<Value, 6> xys;
+ SmallVector<Value> xys;
xys.push_back(op.getXy());
xys.append(op.getYs().begin(), op.getYs().end());
uint64_t nx = 1;
unsigned rank = shape.size();
Value heuristic = constantIndex(builder, loc, 16);
// Build original sizes.
- SmallVector<Value, 8> sizes;
+ SmallVector<Value> sizes;
for (unsigned r = 0, o = 0; r < rank; r++) {
if (ShapedType::isDynamic(shape[r]))
sizes.push_back(dynSizes[o++]);
SmallVectorImpl<Value> &indices, Value value,
Value pos, unsigned field, unsigned d) {
unsigned rank = rtp.getShape().size();
- SmallVector<Type, 4> types;
+ SmallVector<Type> types;
Type indexType = builder.getIndexType();
Type boolType = builder.getIntegerType(1);
Value one = constantIndex(builder, loc, 1);
Value hi = genLoad(builder, loc, fields[memSizesIdx], mz);
Value zero = constantIndex(builder, loc, 0);
Value one = constantIndex(builder, loc, 1);
- SmallVector<Value, 1> inits;
+ SmallVector<Value, 1> inits; // only one
inits.push_back(genLoad(builder, loc, fields[field], zero));
scf::ForOp loop = createFor(builder, loc, hi, inits, one);
Value i = loop.getInductionVar();
LogicalResult
matchAndRewrite(func::ReturnOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
- SmallVector<Value, 8> flattened;
+ SmallVector<Value> flattened;
flattenOperands(adaptor.getOperands(), flattened);
// Create a return with the flattened value extracted from sparse tensors.
rewriter.replaceOpWithNewOp<func::ReturnOp>(op, flattened);
// ==>
// memref..., f, memref = call @foo(...) replace with
// cast(memref...)->sparse_tensor, f, cast(memref...)->sparse_tensor
- SmallVector<Type, 8> finalRetTy;
+ SmallVector<Type> finalRetTy;
if (failed(typeConverter->convertTypes(op.getResultTypes(), finalRetTy)))
return failure();
// (1) Genereates new call with flattened return value.
- SmallVector<Value, 8> flattened;
+ SmallVector<Value> flattened;
flattenOperands(adaptor.getOperands(), flattened);
auto newCall = rewriter.create<func::CallOp>(loc, op.getCallee(),
finalRetTy, flattened);
// (2) Create cast operation for sparse tensor returns.
- SmallVector<Value, 4> castedRet;
+ SmallVector<Value> castedRet;
// Tracks the offset of current return value (of the orignal call)
// relative to the new call (after sparse tensor flattening);
unsigned retOffset = 0;
// Temporal buffer to hold the flattened list of type for
// a sparse tensor.
- SmallVector<Type, 8> sparseFlat;
+ SmallVector<Type> sparseFlat;
for (auto ret : op.getResults()) {
assert(retOffset < newCall.getNumResults());
auto retType = ret.getType();
// Construct allocation for each field.
Location loc = op.getLoc();
- SmallVector<Value, 8> fields;
+ SmallVector<Value> fields;
createAllocFields(rewriter, loc, resType, adaptor.getOperands(),
enableBufferInitialization, fields);
// Replace operation with resulting memrefs.
op.getTensor().getType().cast<RankedTensorType>();
auto tuple = getTuple(adaptor.getTensor());
// Prepare fields.
- SmallVector<Value, 8> fields(tuple.getInputs());
+ SmallVector<Value> fields(tuple.getInputs());
// Generate optional insertion finalization code.
if (op.getHasInserts())
genEndInsert(rewriter, op.getLoc(), srcType, fields);
Value added = adaptor.getAdded();
Value count = adaptor.getCount();
// Prepare fields and indices.
- SmallVector<Value, 8> fields(tuple.getInputs());
- SmallVector<Value, 8> indices(adaptor.getIndices());
+ SmallVector<Value> fields(tuple.getInputs());
+ SmallVector<Value> indices(adaptor.getIndices());
// If the innermost dimension is ordered, we need to sort the indices
// in the "added" array prior to applying the compression.
unsigned rank = dstType.getShape().size();
op.getTensor().getType().cast<RankedTensorType>();
auto tuple = getTuple(adaptor.getTensor());
// Prepare fields and indices.
- SmallVector<Value, 8> fields(tuple.getInputs());
- SmallVector<Value, 8> indices(adaptor.getIndices());
+ SmallVector<Value> fields(tuple.getInputs());
+ SmallVector<Value> indices(adaptor.getIndices());
// Generate insertion.
Value value = adaptor.getValue();
genInsert(rewriter, op->getLoc(), dstType, fields, indices, value);
uint64_t lvl) {
// Generate the call.
StringRef name = "sparseLvlSize";
- SmallVector<Value, 2> params{
+ SmallVector<Value, 2> params{ // just two
src, constantIndex(builder, loc, toStoredDim(enc, lvl))};
Type iTp = builder.getIndexType();
return createFuncCall(builder, loc, name, iTp, params, EmitCInterface::Off)
/// Populates given sizes array from type (for static sizes) and from
/// an already-converted opaque pointer source (for dynamic sizes).
-static void sizesFromPtr(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesFromPtr(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, SparseTensorEncodingAttr &enc,
ShapedType stp, Value src) {
for (unsigned i = 0, rank = stp.getRank(); i < rank; i++)
}
/// Populates given sizes array from type.
-static void sizesFromType(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesFromType(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, ShapedType stp) {
auto shape = stp.getShape();
for (unsigned i = 0, rank = stp.getRank(); i < rank; i++) {
/// sizes) and from an already-converted opaque pointer source (for dynamic
/// sizes).
static void concatSizesFromInputs(OpBuilder &builder,
- SmallVector<Value, 4> &sizes, Location loc,
+ SmallVectorImpl<Value> &sizes, Location loc,
ShapedType dstTp, ValueRange srcs,
unsigned dim) {
auto dstShape = dstTp.getShape();
const unsigned lvlRank = enc.getDimLevelType().size();
const unsigned dimRank = stp.getRank();
// Sparsity annotations.
- SmallVector<Value, 4> lvlTypes;
+ SmallVector<Value> lvlTypes;
for (auto dlt : enc.getDimLevelType())
lvlTypes.push_back(constantDimLevelTypeEncoding(builder, loc, dlt));
assert(lvlTypes.size() == lvlRank && "Level-rank mismatch");
// For now however, since we're still assuming permutations, we will
// initialize this parameter alongside the `dim2lvl` and `lvl2dim`
// parameters below. We preinitialize `lvlSizes` for code symmetry.
- SmallVector<Value, 4> lvlSizes(lvlRank);
+ SmallVector<Value> lvlSizes(lvlRank);
// The dimension-to-level mapping and its inverse. We must preinitialize
// `dim2lvl` so that the true branch below can perform random-access
// `operator[]` assignment. We preinitialize `lvl2dim` for code symmetry.
- SmallVector<Value, 4> dim2lvl(dimRank);
- SmallVector<Value, 4> lvl2dim(lvlRank);
+ SmallVector<Value> dim2lvl(dimRank);
+ SmallVector<Value> lvl2dim(lvlRank);
auto dimOrder = enc.getDimOrdering();
if (dimOrder) {
assert(dimOrder.isPermutation());
/// Converts a pointer to COO (from calls to iter->next()) into a vector of
/// indices, apply (optional) `offset` on `offsetDim`.
-static SmallVector<Value, 4> loadIndices(OpBuilder &builder, Location loc,
- unsigned rank, Value ind,
- unsigned offsetDim = 0,
- Value offset = Value()) {
- SmallVector<Value, 4> ivs;
+static SmallVector<Value> loadIndices(OpBuilder &builder, Location loc,
+ unsigned rank, Value ind,
+ unsigned offsetDim = 0,
+ Value offset = Value()) {
+ SmallVector<Value> ivs;
ivs.reserve(rank);
for (unsigned i = 0; i < rank; i++) {
Value idx = constantIndex(builder, loc, i);
unsigned dstRank = dstTp.getRank();
unsigned srcRank = srcTp.getRank();
- SmallVector<Value, 4> srcIndices;
+ SmallVector<Value> srcIndices;
for (unsigned i = 0; i < srcRank; i++) {
Value idx = rewriter.create<memref::LoadOp>(
loc, srcIdx, constantIndex(rewriter, loc, i));
srcIndices.push_back(idx);
}
- SmallVector<Value, 4> dstIndices;
+ SmallVector<Value> dstIndices;
translateIndicesArray(rewriter, loc, reassociation, srcIndices, srcShape,
dstShape, dstIndices);
auto noPerm = SparseTensorEncodingAttr::get(
op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(),
encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesFromPtr(rewriter, srcSizes, loc, encSrc, srcTp, adaptor.getSrc());
NewCallParams params(rewriter, loc);
Value iter = params.genBuffers(noPerm, srcSizes, srcTp)
.genNewCall(Action::kToIterator, adaptor.getSrc());
// Start a new COO for the destination tensor.
- SmallVector<Value, 4> dstSizes;
+ SmallVector<Value> dstSizes;
if (dstTp.hasStaticShape()) {
sizesFromType(rewriter, dstSizes, loc, dstTp);
} else {
auto noPerm = SparseTensorEncodingAttr::get(
rewriter.getContext(), enc.getDimLevelType(), AffineMap(), AffineMap(),
enc.getPointerBitWidth(), enc.getIndexBitWidth());
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, noPerm, tensorTp, t);
Value iter = NewCallParams(rewriter, loc)
.genBuffers(noPerm, sizes, tensorTp)
return failure();
// Generate the call to construct tensor from ptr. The sizes are
// inferred from the result type of the new operator.
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
ShapedType stp = resType.cast<ShapedType>();
sizesFromType(rewriter, sizes, loc, stp);
Value ptr = adaptor.getOperands()[0];
rewriter.replaceOp(op, adaptor.getOperands()); // hidden nop cast
return success();
}
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
NewCallParams params(rewriter, loc);
ShapedType stp = srcType.cast<ShapedType>();
sizesFromPtr(rewriter, sizes, loc, encSrc, stp, src);
op->getContext(),
SmallVector<DimLevelType>(rank, DimLevelType::Dense), AffineMap(),
AffineMap(), encSrc.getPointerBitWidth(), encSrc.getIndexBitWidth());
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, encSrc, srcTensorTp, src);
Value iter = NewCallParams(rewriter, loc)
.genBuffers(encDst, sizes, dstTensorTp)
rewriter.create<scf::ConditionOp>(loc, cond, before->getArguments());
Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, noTypes);
rewriter.setInsertionPointToStart(after);
- SmallVector<Value, 4> ivs = loadIndices(rewriter, loc, rank, ind);
+ SmallVector<Value> ivs = loadIndices(rewriter, loc, rank, ind);
insertScalarIntoDenseTensor(rewriter, loc, elemPtr, dst, ivs);
rewriter.create<scf::YieldOp>(loc);
rewriter.setInsertionPointAfter(whileOp);
// loop is generated by genAddElt().
ShapedType stp = resType.cast<ShapedType>();
unsigned rank = stp.getRank();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromSrc(rewriter, sizes, loc, src);
NewCallParams params(rewriter, loc);
Value coo =
// The offset applied to the dimenstion to be concated (starting from 0)
Value offset = constantIndex(rewriter, loc, 0);
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
NewCallParams params(rewriter, loc);
concatSizesFromInputs(rewriter, sizes, loc, dstTp, op.getInputs(),
concatDim);
} else {
// Case: dense => dense
Value val = genValueForDense(builder, loc, adaptedOp, idx);
- SmallVector<Value, 4> indVec(idx);
+ SmallVector<Value> indVec(idx);
// Apply offset.
indVec[concatDim] = builder.create<arith::AddIOp>(
loc, indVec[concatDim], offset);
// Convert to default permuted COO.
Value src = adaptor.getOperands()[0];
auto encSrc = getSparseTensorEncoding(srcType);
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromPtr(rewriter, sizes, loc, encSrc, srcType, src);
auto enc = SparseTensorEncodingAttr::get(
op->getContext(), encSrc.getDimLevelType(), AffineMap(), AffineMap(),
/// Populates given sizes array from type (for static sizes) and from
/// the tensor (for dynamic sizes).
-static void sizesForTensor(OpBuilder &builder, SmallVector<Value, 4> &sizes,
+static void sizesForTensor(OpBuilder &builder, SmallVectorImpl<Value> &sizes,
Location loc, ShapedType stp, Value tensor) {
for (const auto &d : enumerate(stp.getShape())) {
Value dim;
static RankedTensorType getUnorderedCOOFromType(RankedTensorType src) {
auto *ctx = src.getContext();
auto rank = src.getRank();
- SmallVector<DimLevelType, 4> dims;
+ SmallVector<DimLevelType> dims;
// An unordered and non-unique compressed dim at beginning.
dims.push_back(DimLevelType::CompressedNuNo);
// Generate code to represent the static dimension constants or compute
// the dynamic dimension values.
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesForTensor(rewriter, srcSizes, loc, srcTp, srcTensor);
- SmallVector<Value, 4> dstSizes;
- SmallVector<Value, 4> dstDynSizes;
+ SmallVector<Value> dstSizes;
+ SmallVector<Value> dstDynSizes;
if (dstTp.hasStaticShape()) {
for (auto d : dstTp.getShape())
dstSizes.push_back(constantIndex(rewriter, loc, d));
loc, srcTensor, cooBuffer,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
ValueRange reduc) {
- SmallVector<Value, 4> srcIndices;
- SmallVector<Value, 4> dstIndices;
+ SmallVector<Value> srcIndices;
+ SmallVector<Value> dstIndices;
for (int64_t i = 0, e = srcTp.getRank(); i < e; i++) {
uint64_t dim = toStoredDim(encSrc, i);
srcIndices.push_back(args[dim]);
loc, input, cooBuffer,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
ValueRange reduc) {
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (int64_t i = 0; i < rank; i++) {
Value idx = args[i];
if (i == static_cast<int64_t>(conDim))
Location loc = op.getLoc();
Value src = op.getSource();
RankedTensorType dstTp = op.getType().cast<RankedTensorType>();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesFromSrc(rewriter, sizes, loc, src);
- SmallVector<Value, 4> dynSizes;
+ SmallVector<Value> dynSizes;
getDynamicSizes(dstTp, sizes, dynSizes);
bool fromSparseConst = false;
Value src = op.getSource();
RankedTensorType srcTp = src.getType().cast<RankedTensorType>();
- SmallVector<Value, 4> sizes;
+ SmallVector<Value> sizes;
sizesForTensor(rewriter, sizes, loc, srcTp, src);
Value dst = allocDenseTensor(rewriter, loc, dstTp, sizes);
RankedTensorType dstTp = op.getType().cast<RankedTensorType>();
SparseTensorEncodingAttr encDst = getSparseTensorEncoding(dstTp);
- SmallVector<Value, 4> srcSizes;
+ SmallVector<Value> srcSizes;
sizesForTensor(rewriter, srcSizes, loc, srcTp, src);
Value tmpCoo = Value();
if (!isUniqueCOOType(srcTp)) {
// TODO: there may be cases for which more efficiently without
// going through an intermediate COO, such as cases that only change
// the overhead types.
- SmallVector<Value, 4> dynSrcSizes;
+ SmallVector<Value> dynSrcSizes;
getDynamicSizes(srcTp, srcSizes, dynSrcSizes);
srcTp = getUnorderedCOOFromType(srcTp);
tmpCoo =
MemRefType::get(dynShape, getIndexOverheadType(rewriter, encSrc));
uint64_t rank = dstTp.getRank();
// Gather the indices-arrays in the dst tensor storage order.
- SmallVector<Value, 4> xs(rank, Value());
+ SmallVector<Value> xs(rank, Value());
for (uint64_t i = 0; i < rank; i++) {
uint64_t orgDim = toOrigDim(encSrc, i);
xs[toStoredDim(encDst, orgDim)] = rewriter.create<ToIndicesOp>(
rewriter.create<SortOp>(loc, nnz, xs, ValueRange{y});
// For each element in the COO tensor, insert the element to the dst tensor.
- SmallVector<Value, 4> dynDstSizes;
+ SmallVector<Value> dynDstSizes;
getDynamicSizes(dstTp, srcSizes, dynDstSizes);
Value dst =
rewriter.create<AllocTensorOp>(loc, dstTp, dynDstSizes).getResult();
- SmallVector<Value, 4> indices(srcTp.getRank(), Value());
+ SmallVector<Value> indices(srcTp.getRank(), Value());
auto foreachOp = rewriter.create<ForeachOp>(
loc, src, dst,
[&](OpBuilder &builder, Location loc, ValueRange args, Value v,
loopEmitter.enterLoopOverTensorAtDim(rewriter, loc, 0, i, reduc);
}
- SmallVector<Value, 4> coords;
+ SmallVector<Value> coords;
coords.reserve(rank);
loopEmitter.getCoordinateArray(coords);
// 2. Inline the block in the foreach operator.
Block *srcBlock = op.getBody();
- SmallVector<Value, 4> args;
// Remap coordinates.
+ SmallVector<Value> args;
for (int64_t i = 0; i < rank; i++) {
Value actual = coords[toStoredDim(enc, i)];
args.push_back(actual);
// If the result tensor has dynamic dimensions, get the dynamic sizes from
// the sparse tensor reader.
- SmallVector<Value, 4> dynSizesArray;
+ SmallVector<Value> dynSizesArray;
if (!dstTp.hasStaticShape()) {
createFuncCall(rewriter, loc, "getSparseTensorReaderDimSizes", {},
{reader, dimSizes}, EmitCInterface::On)
createFuncCall(rewriter, loc, getNextFuncName, {eltTp},
{reader, indices, value}, EmitCInterface::On)
.getResult(0);
- SmallVector<Value, 4> indicesArray;
+ SmallVector<Value> indicesArray;
for (uint64_t i = 0; i < rank; i++) {
indicesArray.push_back(rewriter.create<memref::LoadOp>(
loc, indices, constantIndex(rewriter, loc, i)));
// Generate code to calculate dimension size values and store the values to
// the buffer.
- SmallVector<Value, 4> dims;
+ SmallVector<Value> dims;
sizesForTensor(rewriter, dims, loc, srcTp, src);
for (uint64_t i = 0; i < rank; i++) {
rewriter.create<memref::StoreOp>(loc, dims[i], dimSizes,
constantIndex(builder, loc, i));
}
rewriter.create<memref::StoreOp>(loc, v, value);
- SmallVector<Value, 4> operands{writer, rankValue, indices, value};
+ SmallVector<Value> operands{writer, rankValue, indices, value};
FlatSymbolRefAttr fn = getFunc(module, outNextFuncName, {}, operands,
EmitCInterface::On);
builder.create<func::CallOp>(loc, TypeRange(), fn, operands);
assert(m.getNumResults() == sz && "TopoSort/AffineMap size mismatch");
// Construct the inverse of `m`; to avoid the asymptotic complexity
// of calling `m.getPermutedPosition` repeatedly.
- SmallVector<unsigned, 4> inv(sz);
+ SmallVector<unsigned> inv(sz);
for (unsigned i = 0; i < sz; i++)
inv[i] = m.getDimPosition(i);
// Construct the permutation.
- SmallVector<unsigned, 4> perm(sz);
+ SmallVector<unsigned> perm(sz);
for (unsigned i = 0; i < sz; i++)
perm[i] = inv[topSort[i]];
return AffineMap::getPermutationMap(perm, context);
CodeGen &codegen, Merger &merger,
function_ref<Optional<Operation *>(MutableArrayRef<Value> reduc)>
callback) {
- SmallVector<Value, 4> reduc;
+ SmallVector<Value> reduc;
if (codegen.redVal)
reduc.push_back(codegen.redVal);
if (codegen.expValues)
/// Generates subscript for load/store on a dense or sparse tensor.
static Value genSubscript(CodeGen &codegen, OpBuilder &builder,
linalg::GenericOp op, OpOperand *t,
- SmallVector<Value, 4> &args) {
+ SmallVectorImpl<Value> &args) {
unsigned tensor = t->getOperandNumber();
auto map = op.getMatchingIndexingMap(t);
auto enc = getSparseTensorEncoding(t->get().getType());
// Direct insertion in lexicographic index order.
if (!codegen.expValues) {
unsigned rank = op.getRank(t);
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (unsigned i = 0; i < rank; i++) {
assert(codegen.loopEmitter.getLoopIV(i));
indices.push_back(codegen.loopEmitter.getLoopIV(i));
return genInsertionLoad(codegen, builder, op, &t);
}
// Actual load.
- SmallVector<Value, 4> args;
+ SmallVector<Value> args;
Value ptr = genSubscript(codegen, builder, op, &t, args);
return builder.create<memref::LoadOp>(op.getLoc(), ptr, args);
}
// Select operation insertion.
Value insChain = codegen.insChain;
assert(insChain);
- SmallVector<Type, 1> types;
- types.push_back(codegen.insChain.getType());
- scf::IfOp ifOp =
- builder.create<scf::IfOp>(loc, types, rhs, /*else=*/true);
+ scf::IfOp ifOp = builder.create<scf::IfOp>(
+ loc, insChain.getType(), rhs, /*else=*/true);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
// Existing value was preserved to be used here.
assert(merger.exp(exp).val);
return;
}
// Actual store.
- SmallVector<Value, 4> args;
+ SmallVector<Value> args;
Value ptr = genSubscript(codegen, builder, op, t, args);
builder.create<memref::StoreOp>(loc, rhs, ptr, args);
}
codegen.expCount = res.getResult(3);
} else {
assert(codegen.expValues);
- SmallVector<Value, 4> indices;
+ SmallVector<Value> indices;
for (unsigned i = 0; i < at; i++) {
assert(codegen.loopEmitter.getLoopIV(i));
indices.push_back(codegen.loopEmitter.getLoopIV(i));
while (auto ifOp = dyn_cast_or_null<scf::IfOp>(
builder.getInsertionBlock()->getParentOp())) {
unsigned y = 0;
- SmallVector<Value, 4> yields;
+ SmallVector<Value> yields;
if (codegen.redVal) {
yields.push_back(codegen.redVal);
updateReduc(merger, codegen, ifOp.getResult(y++));
linalg::GenericOp op, unsigned idx,
BitVector &conditions) {
Location loc = op.getLoc();
- SmallVector<Type, 4> types;
+ SmallVector<Type> types;
Value cond;
for (unsigned b = 0, be = conditions.size(); b < be; b++) {
if (!conditions[b])
static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder,
linalg::GenericOp op, scf::IfOp ifOp, Operation *loop,
Value redInput, Value cntInput, Value insInput) {
- SmallVector<Value, 4> operands;
+ SmallVector<Value> operands;
if (codegen.redVal) {
operands.push_back(codegen.redVal);
updateReduc(merger, codegen, redInput);
OpBuilder &builder, linalg::GenericOp op,
unsigned at, unsigned li, bool needsUniv) {
// The set of tensors + dims to generate loops on
- SmallVector<size_t, 4> condTids, condDims;
+ SmallVector<size_t> condTids, condDims;
// The set of (dense) tensors that is optimized from condition, yet still
// need extra locals to iterate on them.
- SmallVector<size_t, 4> extraTids, extraDims;
+ SmallVector<size_t> extraTids, extraDims;
translateBitsToTidDimPairs(merger, codegen, li, codegen.topSort[at], condTids,
condDims, extraTids, extraDims);
merger.setHasSparseOut(sparseOut != nullptr);
- SmallVector<Value, 4> tensors;
+ SmallVector<Value> tensors;
for (OpOperand &t : op->getOpOperands())
tensors.push_back(t.get());
unsigned Merger::addSet() {
unsigned s = latSets.size();
- latSets.emplace_back(SmallVector<unsigned, 16>());
+ latSets.emplace_back();
return s;
}