}
for (const auto &[idx, inputDimIdx] : llvm::enumerate(reverseDimMap)) {
- if (inputDimIdx == kUnmappedDim) {
- // This dimensions is being added. Should be statically known.
- if (ShapedType::isDynamic(initShape[idx]))
- return emitOpError()
- << "init dim " << idx
- << " can't be dynamic, because it's not matched to input";
- } else {
+ if (inputDimIdx != kUnmappedDim) {
// This dimensions is mapped from the input. Init and input dims should
// match.
if (inputShape[inputDimIdx] != initShape[idx])
// -----
-func.func @broadcast_added_dynamic_mismatch(
- %input: tensor<4x16xf32>, %init: tensor<4x?x16xf32>)
- -> tensor<4x?x16xf32> {
- // expected-error @+1 {{'linalg.broadcast' op init dim 1 can't be dynamic, because it's not matched to input}}
- %bcast = linalg.broadcast
- ins(%input:tensor<4x16xf32>)
- outs(%init:tensor<4x?x16xf32>)
- dimensions = [0, 2]
- func.return %bcast : tensor<4x?x16xf32>
-}
-
-// -----
-
func.func @broadcast_size_1_extension_not_supported(
%input: tensor<1x16xf32>, %init: tensor<4x?x16xf32>)
-> tensor<4x?x16xf32> {