From 7e1eac511658bde4b83c7655f788d4c1e2d58cc5 Mon Sep 17 00:00:00 2001 From: Peiming Liu Date: Wed, 21 Dec 2022 01:11:14 +0000 Subject: [PATCH] [mlir][sparse] add initialize() API to LoopEmitter to support post-constructor initialization Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D140444 --- .../SparseTensor/Transforms/CodegenUtils.cpp | 30 +++++++++++++++++----- .../Dialect/SparseTensor/Transforms/CodegenUtils.h | 24 ++++++++++------- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp index b5c82bd..d26e364 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.cpp @@ -208,13 +208,28 @@ SparseTensorLoopEmitter::SparseTensorLoopEmitter(ValueRange tensors, StringAttr loopTag, bool hasOutput, bool isSparseOut, - ArrayRef topSort) - : loopTag(loopTag), hasOutput(hasOutput), isSparseOut(isSparseOut), - tensors(tensors.begin(), tensors.end()), dimTypes(tensors.size()), - pidxs(tensors.size()), coord(tensors.size()), highs(tensors.size()), - ptrBuffer(tensors.size()), idxBuffer(tensors.size()), - valBuffer(tensors.size()), loopStack(), - sparsiferLoopLvlMap(topSort.size(), 0) { + ArrayRef topSort) { + initialize(tensors, loopTag, hasOutput, isSparseOut, topSort); +} + +void SparseTensorLoopEmitter::initialize(ValueRange tensors, StringAttr loopTag, + bool hasOutput, bool isSparseOut, + ArrayRef topSort) { + // First initializes fields. + this->loopTag = loopTag; + this->hasOutput = hasOutput; + this->isSparseOut = isSparseOut; + this->tensors.assign(tensors.begin(), tensors.end()); + this->dimTypes.assign(tensors.size(), std::vector()); + this->pidxs.assign(tensors.size(), std::vector()); + this->coord.assign(tensors.size(), std::vector()); + this->highs.assign(tensors.size(), std::vector()); + this->ptrBuffer.assign(tensors.size(), std::vector()); + this->idxBuffer.assign(tensors.size(), std::vector()); + this->valBuffer.assign(tensors.size(), nullptr); + this->loopStack.reserve(topSort.size()); + this->sparsiferLoopLvlMap.assign(topSort.size(), 0); + for (size_t tid = 0, e = tensors.size(); tid < e; tid++) { auto t = tensors[tid]; // a scalar or 0-dimension tensors @@ -239,6 +254,7 @@ SparseTensorLoopEmitter::SparseTensorLoopEmitter(ValueRange tensors, idxBuffer[tid].assign(rank, Value()); } + // FIXME: This map should be maintained outside loop emitter. for (unsigned i = 0, e = topSort.size(); i < e; i++) { // This is an inverse map of the topologically sorted loop index from // sparsifier. This is needed to map the AffineDimExpr back to the loopStack diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h index a121522..7fd126f 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CodegenUtils.h @@ -562,15 +562,21 @@ public: using OutputUpdater = function_ref; - /// Constructor: take an array of tensors inputs, on which the generated - /// loops will iterate on. The index of the tensor in the array is also the - /// tensor id (tid) used in related functions. - /// If isSparseOut is set, loop emitter assume that the sparse output tensor - /// is empty, and will always generate loops on it based on the dim sizes. - /// An optional array could be provided (by sparsification) to indicate the - /// loop id sequence that will be generated. It is used to establish the - /// mapping between affineDimExpr to the corresponding loop index in the - /// loop stack that are maintained by the loop emitter. + SparseTensorLoopEmitter() = default; + + /// Takes an array of tensors inputs, on which the generated loops will + /// iterate on. The index of the tensor in the array is also the tensor id + /// (tid) used in related functions. If isSparseOut is set, loop emitter + /// assume that the sparse output tensor is empty, and will always generate + /// loops on it based on the dim sizes. An optional array could be provided + /// (by sparsification) to indicate the loop id sequence that will be + /// generated. It is used to establish the mapping between affineDimExpr to + /// the corresponding loop index in the loop stack that are maintained by the + /// loop emitter. + void initialize(ValueRange tensors, StringAttr loopTag = nullptr, + bool hasOutput = false, bool isSparseOut = false, + ArrayRef topSort = {}); + explicit SparseTensorLoopEmitter(ValueRange tensors, StringAttr loopTag = nullptr, bool hasOutput = false, -- 2.7.4