From afb0582325f4165c9aea8e3ed3ed451e577616e4 Mon Sep 17 00:00:00 2001 From: Mehdi Amini Date: Sun, 5 Dec 2021 19:16:54 +0000 Subject: [PATCH] Fix TOSA verifier to emit verbose errors Also as a test for invalid ops which was missing. --- mlir/lib/Dialect/Tosa/IR/TosaOps.cpp | 19 +++++++++++++++--- mlir/test/Dialect/Tosa/invalid.mlir | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 mlir/test/Dialect/Tosa/invalid.mlir diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 2a9d5d4..78a6b1b 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -527,8 +527,14 @@ template static LogicalResult verifyConvOp(T op) { auto weightType = op.weight().getType().template dyn_cast(); // Must be ranked tensor types - if (!inputType || !weightType) + if (!inputType) { + op.emitOpError("expect a ranked tensor for input, got ") << op.input(); return failure(); + } + if (!weightType) { + op.emitOpError("expect a ranked tensor for weight, got ") << op.weight(); + return failure(); + } auto inputEType = inputType.getElementType(); auto weightEType = weightType.getElementType(); @@ -537,14 +543,21 @@ template static LogicalResult verifyConvOp(T op) { bool weightIsQuant = !weightEType.template isa(); // Either both must be quantized or both unquantized. - if (inputIsQuant != weightIsQuant) + if (inputIsQuant != weightIsQuant) { + op.emitOpError( + "expect both input and weight to be float or not together, got ") + << inputEType << " and " << weightEType; return failure(); + } // Quantized type must have constructed the quantizationattr, and unquantized // types should not have a quantizationattr. if ((inputIsQuant && !op.quantization_info()) || - (!inputIsQuant && op.quantization_info())) + (!inputIsQuant && op.quantization_info())) { + op.emitOpError("quantizationattr is required for quantized type, and not " + "allowed for float type"); return failure(); + } return success(); } diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir new file mode 100644 index 0000000..b5be2f3 --- /dev/null +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -0,0 +1,39 @@ +// RUN: mlir-opt %s -split-input-file -verify-diagnostics + + +func @test_conv2d(%arg0: tensor<1x29x29x4xf32>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { + // expected-error@+1 {{expect both input and weight to be float or not together, got 'f32' and 'i8'}} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + : (tensor<1x29x29x4xf32>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> + return %0 : tensor<1x27x27x16xi8> +} + +// ----- + +func @test_conv2d(%arg0: tensor<*xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { + // expected-error@+1 {{expect a ranked tensor for input, got of type 'tensor<*xi8>' at index: 0}} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + : (tensor<*xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> + return %0 : tensor<1x27x27x16xi8> +} + +// ----- + +func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<*xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { + // expected-error@+1 {{expect a ranked tensor for weight, got of type 'tensor<*xi8>' at index: 1}} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + : (tensor<1x29x29x4xi8>, tensor<*xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> + return %0 : tensor<1x27x27x16xi8> +} + + +// ----- + +func @test_conv2d(%arg0: tensor<1x29x29x4xi8>, %arg1: tensor<16x3x3x4xi8>, %arg2: tensor<16xi8>) -> tensor<1x27x27x16xi8> { + // expected-error@+1 {{'tosa.conv2d' op quantizationattr is required for quantized type, and not allowed for float type}} + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {dilation = [1, 1], pad = [0, 0, 0, 0], stride = [1, 1]} + : (tensor<1x29x29x4xi8>, tensor<16x3x3x4xi8>, tensor<16xi8>) -> tensor<1x27x27x16xi8> + return %0 : tensor<1x27x27x16xi8> +} + + -- 2.7.4