This function is duplicated in various dialects.
Differential Revision: https://reviews.llvm.org/D155462
Value getVectorReductionOp(arith::AtomicRMWKind op, OpBuilder &builder,
Location loc, Value vector);
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-bool isLastMemrefDimUnitStride(MemRefType type);
-
/// Build the default minor identity map suitable for a vector transfer. This
/// also handles the case memref<... x vector<...>> -> vector<...> in which the
/// rank of the identity map must take the vector element type into account.
AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
MLIRContext *context);
-/// Return true if the layout for `t` is compatible with strided semantics.
+/// Return "true" if the layout for `t` is compatible with strided semantics.
bool isStrided(MemRefType t);
+/// Return "true" if the last dimension of the given type has a static unit
+/// stride. Also return "true" for types with no strides.
+bool isLastMemrefDimUnitStride(MemRefType type);
+
} // namespace mlir
#endif // MLIR_IR_BUILTINTYPES_H
// Check if the last stride is non-unit or the memory space is not zero.
static LogicalResult isMemRefTypeSupported(MemRefType memRefType,
LLVMTypeConverter &converter) {
- int64_t offset;
- SmallVector<int64_t, 4> strides;
- auto successStrides = getStridesAndOffset(memRefType, strides, offset);
+ if (!isLastMemrefDimUnitStride(memRefType))
+ return failure();
FailureOr<unsigned> addressSpace =
converter.getMemRefAddressSpace(memRefType);
- if (failed(successStrides) || strides.back() != 1 || failed(addressSpace) ||
- *addressSpace != 0)
+ if (failed(addressSpace) || *addressSpace != 0)
return failure();
return success();
}
}
};
-/// Return true if the last dimension of the MemRefType has unit stride.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t, 4> strides;
- auto successStrides = getStridesAndOffset(type, strides, offset);
- return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
-}
-
/// Lower a 1D vector transfer op to SCF using scalar loads/stores. This is
/// necessary in cases where a 1D vector transfer op cannot be lowered into
/// vector load/stores due to non-unit strides or broadcasts:
// GPU_SubgroupMmaLoadMatrixOp
//===----------------------------------------------------------------------===//
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- if (failed(getStridesAndOffset(type, strides, offset))) {
- return false;
- }
- return strides.back() == 1;
-}
-
LogicalResult SubgroupMmaLoadMatrixOp::verify() {
auto srcType = getSrcMemref().getType();
auto resType = getRes().getType();
// NVGPU_DeviceAsyncCopyOp
//===----------------------------------------------------------------------===//
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-static bool isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- if (failed(getStridesAndOffset(type, strides, offset))) {
- return false;
- }
- return strides.back() == 1;
-}
-
LogicalResult DeviceAsyncCopyOp::verify() {
auto srcMemref = llvm::cast<MemRefType>(getSrc().getType());
auto dstMemref = llvm::cast<MemRefType>(getDst().getType());
return false;
}
-/// Return true if the last dimension of the MemRefType has unit stride. Also
-/// return true for memrefs with no strides.
-bool mlir::vector::isLastMemrefDimUnitStride(MemRefType type) {
- int64_t offset;
- SmallVector<int64_t> strides;
- auto successStrides = getStridesAndOffset(type, strides, offset);
- return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
-}
-
AffineMap mlir::vector::getTransferMinorIdentityMap(ShapedType shapedType,
VectorType vectorType) {
int64_t elementVectorRank = 0;
return rewriter.notifyMatchFailure(read, "not a memref source");
// Non-unit strides are handled by VectorToSCF.
- if (!vector::isLastMemrefDimUnitStride(memRefType))
+ if (!isLastMemrefDimUnitStride(memRefType))
return rewriter.notifyMatchFailure(read, "!= 1 stride needs VectorToSCF");
// If there is broadcasting involved then we first load the unbroadcasted
});
// Non-unit strides are handled by VectorToSCF.
- if (!vector::isLastMemrefDimUnitStride(memRefType))
+ if (!isLastMemrefDimUnitStride(memRefType))
return rewriter.notifyMatchFailure(write.getLoc(), [=](Diagnostic &diag) {
diag << "most minor stride is not 1: " << write;
});
return makeCanonicalStridedLayoutExpr(sizes, exprs, context);
}
-/// Return true if the layout for `t` is compatible with strided semantics.
bool mlir::isStrided(MemRefType t) {
int64_t offset;
SmallVector<int64_t, 4> strides;
auto res = getStridesAndOffset(t, strides, offset);
return succeeded(res);
}
+
+bool mlir::isLastMemrefDimUnitStride(MemRefType type) {
+ int64_t offset;
+ SmallVector<int64_t> strides;
+ auto successStrides = getStridesAndOffset(type, strides, offset);
+ return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
+}