SmallVector<int64_t> newShape;
operandsAndShape(resultType, dynamicExtents, newOperands, newShape);
+ for (int64_t newdim : newShape) {
+ // This check also occurs in the verifier, but we need it here too
+ // since intermediate passes may have some replaced dynamic dimensions
+ // by constants.
+ if (newdim < 0 && !ShapedType::isDynamic(newdim))
+ return failure();
+ }
+
if (newOperands.size() == tensorFromElements.getDynamicExtents().size())
return failure();
}
OpFoldResult ExtractSliceOp::fold(FoldAdaptor adaptor) {
- if (auto splat = llvm::dyn_cast_if_present<SplatElementsAttr>(adaptor.getSource())) {
+ if (auto splat =
+ llvm::dyn_cast_if_present<SplatElementsAttr>(adaptor.getSource())) {
auto resultType = llvm::cast<ShapedType>(getResult().getType());
if (resultType.hasStaticShape())
return splat.resizeSplat(resultType);
--- /dev/null
+// RUN: mlir-opt <%s -split-input-file -verify-diagnostics -canonicalize
+
+// -----
+
+func.func @indirectly_generate_negative_size() -> tensor<?x8xi32> {
+ %cst = arith.constant 0 : i32
+ %c0 = arith.constant 0 : index
+ %size = affine.max affine_map<(d0) -> (d0 mod 64 - 8)>(%c0)
+ // expected-error@+1 {{tensor dimensions must be non-negative}}
+ %tensor = tensor.generate %size {
+ ^bb0(%arg0: index, %arg1: index):
+ tensor.yield %cst : i32
+ } : tensor<?x8xi32>
+ return %tensor : tensor<?x8xi32>
+}