From 3e41bec03ebdde08b6da7733b5c8340993258cb0 Mon Sep 17 00:00:00 2001 From: natashaknk Date: Wed, 21 Sep 2022 09:29:17 -0700 Subject: [PATCH] [mlir][tosa] Revert added support for dynamic height/weight for pooling in tosa-to-linalg Partial rollback to D133389 Reviewed By: jpienaar Differential Revision: https://reviews.llvm.org/D134370 --- .../Conversion/TosaToLinalg/TosaToLinalgNamed.cpp | 108 ++++++++------------- .../TosaToLinalg/tosa-to-linalg-named.mlir | 39 ++------ 2 files changed, 49 insertions(+), 98 deletions(-) diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp index 250d452..4a31bd6 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp @@ -73,13 +73,12 @@ static mlir::Value reifyConstantDim(Attribute attr, // Calculating the output width/height using the formula: // H = ((IH+pad_top+pad_bottom-(dilation_y*(KH-1)+1))/stride_y)+1 // W = ((IW+pad_left+pad_right-(dilation_x*(KW-1)+1))/stride_x)+1 -static mlir::Value getConvOrPoolOutputDim(Location loc, Value inputDim, - Attribute padBeforeAttr, - Attribute padAfterAttr, - Value kernelDim, Attribute strideAttr, - Attribute dilationAttr, Type inputETy, - ImplicitLocOpBuilder &builder) { - auto one = builder.create( +static mlir::Value +getConvOutputDim(Location loc, Value inputDim, Attribute padBeforeAttr, + Attribute padAfterAttr, Value kernelDim, Attribute strideAttr, + Attribute dilationAttr, Type inputETy, OpBuilder &rewriter) { + ImplicitLocOpBuilder builder(loc, rewriter); + auto one = rewriter.create( loc, IntegerAttr::get(inputDim.getType(), 1)); Value padBefore = reifyConstantDim(padBeforeAttr, builder); Value paddedBefore = builder.create(inputDim, padBefore); @@ -97,27 +96,11 @@ static mlir::Value getConvOrPoolOutputDim(Location loc, Value inputDim, return builder.create(divide, one); } -// For convolution, the kernel is a value. -Value getKernelDim(Location loc, Value kernel, uint64_t dim, - ImplicitLocOpBuilder &builder) { - return builder.create(loc, kernel, dim).getResult(); -} - -// For pooling, the kernel is an attribute. -Value getKernelDim(Location loc, ArrayAttr kernel, uint64_t dim, - ImplicitLocOpBuilder &builder) { - auto kernelArr = kernel.getValue(); - if (dim >= kernelArr.size()) return nullptr; - Attribute kernelDimAttr = kernelArr[dim]; - return reifyConstantDim(kernelDimAttr, builder); -} - -// Creates a vector of the dynamic output dims convolution and pooling ops. -template -static SmallVector inferDynamicDimsForConvOrPool( - Location loc, Value input, T weight, ShapedType resultTy, ArrayAttr padAttr, - ArrayAttr strideAttr, ArrayAttr dilationAttr, int64_t weightHDim, - int64_t weightWDim, OpBuilder &rewriter) { +// Creates a vector of the dynamic output dims for Conv2D and Depthwise_Conv2D +static SmallVector inferDynamicDimsForConv( + Location loc, Value input, Value weight, ShapedType resultTy, + ArrayAttr padAttr, ArrayAttr strideAttr, ArrayAttr dilationAttr, + int64_t weightHDim, int64_t weightWDim, OpBuilder &rewriter) { ShapedType inputTy = input.getType().cast(); Type inputETy = inputTy.getElementType(); int64_t inputRank = inputTy.getRank(); @@ -131,29 +114,30 @@ static SmallVector inferDynamicDimsForConvOrPool( dynDims[i] = rewriter.create(loc, input, i); } - ImplicitLocOpBuilder builder(loc, rewriter); // Dynamic input height if (inputTy.isDynamicDim(heightDim)) { - Value inputHDim = - builder.create(loc, input, heightDim).getResult(); - Value kernelHDim = getKernelDim(loc, weight, weightHDim, builder); + Value initHDim = + rewriter.create(loc, input, heightDim).getResult(); + Value kernelHDim = + rewriter.create(loc, weight, weightHDim).getResult(); // H = F(IH, pad_top, pad_bottom, dilation_y, KH, stride_y) - dynDims[heightDim] = getConvOrPoolOutputDim( - loc, inputHDim, padAttr.getValue()[0], padAttr.getValue()[1], - kernelHDim, strideAttr.getValue()[0], dilationAttr.getValue()[0], - inputETy, builder); + dynDims[heightDim] = getConvOutputDim( + loc, initHDim, padAttr.getValue()[0], padAttr.getValue()[1], kernelHDim, + strideAttr.getValue()[0], dilationAttr.getValue()[0], inputETy, + rewriter); } // Dynamic input weight if (inputTy.isDynamicDim(weightDim)) { - Value inputWDim = - builder.create(loc, input, weightDim).getResult(); - Value kernelWDim = getKernelDim(loc, weight, weightWDim, builder); + Value initWDim = + rewriter.create(loc, input, weightDim).getResult(); + Value kernelWDim = + rewriter.create(loc, weight, weightWDim).getResult(); // W = F(IW, pad_left, pad_right, dilation_x, KW, stride_x) - dynDims[weightDim] = getConvOrPoolOutputDim( - loc, inputWDim, padAttr.getValue()[2], padAttr.getValue()[3], - kernelWDim, strideAttr.getValue()[1], dilationAttr.getValue()[1], - inputETy, builder); + dynDims[weightDim] = getConvOutputDim( + loc, initWDim, padAttr.getValue()[2], padAttr.getValue()[3], kernelWDim, + strideAttr.getValue()[1], dilationAttr.getValue()[1], inputETy, + rewriter); } SmallVector filteredDims = condenseValues(dynDims); @@ -207,7 +191,7 @@ public: return rewriter.notifyMatchFailure( op, "tosa.conv ops does not support unsigned integer input"); - SmallVector filteredDims = inferDynamicDimsForConvOrPool( + SmallVector filteredDims = inferDynamicDimsForConv( loc, input, weight, resultTy, padAttr, strideTosaAttr, dilationTosaAttr, /*weightHDim=*/1, /*weightWDim=*/2, rewriter); @@ -372,7 +356,7 @@ public: op, "tosa.depthwise_conv ops require static shapes"); // Compute output dynamic dims - SmallVector filteredDims = inferDynamicDimsForConvOrPool( + SmallVector filteredDims = inferDynamicDimsForConv( loc, input, weight, resultTy, padAttr, strideTosaAttr, dilationTosaAttr, 0, 1, rewriter); @@ -708,15 +692,11 @@ public: ShapedType resultTy = op.getType().template cast(); Type resultETy = inputTy.getElementType(); - auto kernelAttr = op.getKernel().cast(); - auto padAttr = op.getPad().cast(); - auto strideTosaAttr = op.getStride().cast(); - ArrayAttr dilationTosaAttr = rewriter.getI64ArrayAttr({1, 1}); - - SmallVector filteredDims = inferDynamicDimsForConvOrPool( - loc, input, kernelAttr, resultTy, padAttr, strideTosaAttr, - dilationTosaAttr, - /*weightHDim=*/0, /*weightWDim=*/1, rewriter); + auto dynamicDimsOr = + checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()}); + if (!dynamicDimsOr.has_value()) + return failure(); + SmallVector dynamicDims = dynamicDimsOr.value(); // Determine what the initial value needs to be for the max pool op. Attribute initialAttr; @@ -753,7 +733,7 @@ public: // Create the linalg op that performs pooling. Value initTensor = rewriter.create( - loc, filteredDims, resultTy.getShape(), resultTy.getElementType()); + loc, dynamicDims, resultTy.getShape(), resultTy.getElementType()); Value filledInitTensor = rewriter @@ -789,15 +769,11 @@ public: inElementTy.isa() ? rewriter.getI32Type() : inElementTy; ShapedType accTy = resultTy.clone(accETy); - auto kernelAttr = op.getKernel().cast(); - auto padArrayAttr = op.getPad().cast(); - auto strideTosaAttr = op.getStride().cast(); - ArrayAttr dilationTosaAttr = rewriter.getI64ArrayAttr({1, 1}); - - SmallVector filteredDims = inferDynamicDimsForConvOrPool( - loc, input, kernelAttr, resultTy, padArrayAttr, strideTosaAttr, - dilationTosaAttr, - /*weightHDim=*/0, /*weightWDim=*/1, rewriter); + auto dynamicDimsOr = + checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()}); + if (!dynamicDimsOr.has_value()) + return failure(); + SmallVector dynamicDims = dynamicDimsOr.value(); // Apply padding as necessary. llvm::SmallVector pad; @@ -819,7 +795,7 @@ public: // Create the linalg op that performs pooling. Value poolInitTensor = rewriter.create( - loc, filteredDims, accTy.getShape(), accETy); + loc, dynamicDims, accTy.getShape(), accETy); Value filledInitTensor = rewriter @@ -844,7 +820,7 @@ public: auto affineMap = rewriter.getMultiDimIdentityMap(resultTy.getRank()); Value genericInitTensor = rewriter.create( - loc, filteredDims, resultTy.getShape(), resultETy); + loc, dynamicDims, resultTy.getShape(), resultETy); auto genericOp = rewriter.create( loc, ArrayRef({resultTy}), ValueRange{poolingOp}, diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir index 1240070..d956e82 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -165,21 +165,15 @@ func.func @max_pool_padded(%arg0: tensor<1x6x34x62xf32>) -> () { } // CHECK-LABEL: @max_pool_dyn -func.func @max_pool_dyn(%arg0: tensor) -> () { - // CHECK: %[[C0:.+]] = arith.constant 0 : index - // CHECK: %[[DIM0:.+]] = tensor.dim %arg0, %[[C0]] : tensor - // CHECK: %[[C1:.+]] = arith.constant 1 : index - // CHECK: %[[DIM1:.+]] = tensor.dim %arg0, %[[C1]] : tensor - // CHECK: arith.constant 2 : index - // CHECK: %[[C2:.+]] = arith.constant 2 : index - // CHECK: %[[DIM2:.+]] = tensor.dim %arg0, %[[C2]] : tensor - // CHECK: %[[PAD:.+]] = tensor.pad %arg0 +func.func @max_pool_dyn(%arg0: tensor) -> () { + // CHECK: %[[C0:.+]] = arith.constant 0 + // CHECK: %[[BATCH:.+]] = tensor.dim %arg0, %[[C0]] // CHECK: %[[CONST:.+]] = arith.constant -3.40282347E+38 - // CHECK: %[[INIT:.+]] = linalg.init_tensor - // CHECK: %[[FILL:.+]] = linalg.fill ins(%cst_18 : f32) outs(%20 : tensor) -> tensor + // CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[BATCH]], 4, 32, 62] + // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[CONST]]{{.*}}outs(%[[INIT]] // CHECK: %[[KERNEL:.+]] = linalg.init_tensor [3, 3] - // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<2> : vector<2xi64>} ins(%[[PAD]], %[[KERNEL]] : tensor, tensor<3x3xf32>) outs(%[[FILL]] : tensor) -> tensor - %0 = "tosa.max_pool2d"(%arg0) {kernel = [3, 3], pad = [1, 1, 1, 1], stride = [2, 2]} : (tensor) -> (tensor) + // CHECK: linalg.pooling_nhwc_max {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%arg0, %[[KERNEL]] : tensor, tensor<3x3xf32>) outs(%[[FILL]] : tensor) + %0 = "tosa.max_pool2d"(%arg0) {pad = [0, 0, 0, 0], kernel = [3, 3], stride = [1, 1]} : (tensor) -> (tensor) return } @@ -285,25 +279,6 @@ func.func @avg_pool_dyn(%arg0: tensor) -> (tensor) return %0 : tensor } -// CHECK-LABEL: @avg_pool_dyn_h -func.func @avg_pool_dyn_h(%arg0: tensor<2x?x34x62xf32>) -> (tensor<2x?x33x62xf32>) { - // CHECK: %[[C1:.+]] = arith.constant 1 - // CHECK: %[[DIM1:.+]] = tensor.dim %arg0, %[[C1]] - // CHECK: arith.addi - // CHECK: arith.addi - // CHECK: arith.addi - // CHECK: %[[RESULT:.+]] = arith.addi - // CHECK: %[[PAD:.+]] = tensor.pad %arg0 low[0, 1, 1, 0] high[0, 1, 1, 0] - // CHECK: %[[POOLINIT:.+]] = linalg.init_tensor [2, %[[RESULT]], 33, 62] - // CHECK: %[[FILL:.+]] = linalg.fill - // CHECK: %[[KERNEL:.+]] = linalg.init_tensor [4, 4] - // CHECK: %[[POOL:.+]] = linalg.pooling_nhwc_sum {dilations = dense<1> : vector<2xi64>, strides = dense<1> : vector<2xi64>} ins(%[[PAD]], %[[KERNEL]] : tensor<2x?x36x62xf32>, tensor<4x4xf32>) outs(%[[FILL]] : tensor<2x?x33x62xf32>) - // CHECK: %[[INIT:.+]] = linalg.init_tensor [2, %[[RESULT]], 33, 62] - // CHECK: %[[GENERIC:.+]] = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%[[POOL]] : tensor<2x?x33x62xf32>) outs(%[[INIT]] : tensor<2x?x33x62xf32>) - %0 = "tosa.avg_pool2d"(%arg0) {pad = [1, 1, 1, 1], kernel = [4, 4], stride = [1, 1]} : (tensor<2x?x34x62xf32>) -> (tensor<2x?x33x62xf32>) - return %0 : tensor<2x?x33x62xf32> -} - // ----- // CHECK-LABEL: @avg_pool_i8 -- 2.7.4