motivated by a refactoring in the new sparse code (yet to be merged), this avoids some lengthy code dup
Reviewed By: mehdi_amini
Differential Revision: https://reviews.llvm.org/D91465
ArrayRef<AffineExpr> getResults() const;
AffineExpr getResult(unsigned idx) const;
+ /// Extracts the position of the dimensional expression at the given result,
+ /// when the caller knows it is safe to do so.
+ unsigned getDimPosition(unsigned idx) const;
+
/// Walk all of the AffineExpr's in this mapping. Each node in an expression
/// tree is visited in postorder.
void walkExprs(std::function<void(AffineExpr)> callback) const;
unsigned pos = resultExpr.value().cast<AffineDimExpr>().getPosition();
AffineMap foldedDims = reassociationMaps[resultExpr.index()];
numFoldedDims[pos] = foldedDims.getNumResults();
- ArrayRef<int64_t> shape = expandedShape.slice(
- foldedDims.getResult(0).cast<AffineDimExpr>().getPosition(),
- numFoldedDims[pos]);
+ ArrayRef<int64_t> shape =
+ expandedShape.slice(foldedDims.getDimPosition(0), numFoldedDims[pos]);
expandedDimsShape[pos].assign(shape.begin(), shape.end());
}
VectorType v = pair.first;
auto map = pair.second;
for (unsigned idx = 0, e = v.getRank(); idx < e; ++idx) {
- unsigned pos = map.getResult(idx).cast<AffineDimExpr>().getPosition();
+ unsigned pos = map.getDimPosition(idx);
if (!extents[pos])
extents[pos] = getAffineConstantExpr(v.getShape()[idx], ctx);
}
if (insertedPos.size() == extractedPos.size()) {
bool fold = true;
for (unsigned idx = 0, sz = extractedPos.size(); idx < sz; ++idx) {
- auto pos =
- permutationMap.getResult(idx).cast<AffineDimExpr>().getPosition();
+ auto pos = permutationMap.getDimPosition(idx);
if (pos >= sz || insertedPos[pos] != extractedPos[idx]) {
fold = false;
break;
// Helper to find an index in an affine map.
static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
- int64_t idx = map.getResult(i).cast<AffineDimExpr>().getPosition();
+ int64_t idx = map.getDimPosition(i);
if (idx == index)
return i;
}
auto *ctx = rewriter.getContext();
SmallVector<AffineExpr, 4> results;
for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
- int64_t idx = map.getResult(i).cast<AffineDimExpr>().getPosition();
+ int64_t idx = map.getDimPosition(i);
if (idx == index)
continue;
// Re-insert remaining indices, but renamed when occurring
int64_t iterIndex = -1;
int64_t dimSize = -1;
if (lhsIndex >= 0) {
- iterIndex = iMap[0].getResult(lhsIndex).cast<AffineDimExpr>().getPosition();
- assert(
- (rhsIndex < 0 ||
- iterIndex ==
- iMap[1].getResult(rhsIndex).cast<AffineDimExpr>().getPosition()) &&
- "parallel index should be free in LHS or batch in LHS/RHS");
+ iterIndex = iMap[0].getDimPosition(lhsIndex);
+ assert((rhsIndex < 0 || iterIndex == iMap[1].getDimPosition(rhsIndex)) &&
+ "parallel index should be free in LHS or batch in LHS/RHS");
dimSize = lhsType.getDimSize(lhsIndex);
} else {
assert(rhsIndex >= 0 && "missing parallel index");
- iterIndex = iMap[1].getResult(rhsIndex).cast<AffineDimExpr>().getPosition();
+ iterIndex = iMap[1].getDimPosition(rhsIndex);
dimSize = rhsType.getDimSize(rhsIndex);
}
assert(iterIndex >= 0 && "parallel index not listed in operand mapping");
return map->results[idx];
}
+unsigned AffineMap::getDimPosition(unsigned idx) const {
+ return getResult(idx).cast<AffineDimExpr>().getPosition();
+}
+
/// Folds the results of the application of an affine map on the provided
/// operands to a constant if possible. Returns false if the folding happens,
/// true otherwise.