From 42b3d7da750ab6ad39ea228985f422685f89eb45 Mon Sep 17 00:00:00 2001 From: Darshan Patel Date: Mon, 25 May 2020 22:30:07 +0530 Subject: [PATCH] Add DIV support to TFLite parser * Added unit tests * Updated Documentation Signed-off-by: Darshan Patel Change-Id: Iadb5c76139d74d755d7f4be4b023b4417efe1e92 --- CMakeLists.txt | 1 + docs/01_parsers.dox | 1 + src/armnnTfLiteParser/TensorFlowLiteSupport.md | 2 + src/armnnTfLiteParser/TfLiteParser.cpp | 40 ++++++++- src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Div.cpp | 115 +++++++++++++++++++++++++ 6 files changed, 159 insertions(+), 1 deletion(-) create mode 100644 src/armnnTfLiteParser/test/Div.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 692bc04..c2c4cc6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -769,6 +769,7 @@ if(BUILD_UNIT_TESTS) src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp src/armnnTfLiteParser/test/Dequantize.cpp src/armnnTfLiteParser/test/DetectionPostProcess.cpp + src/armnnTfLiteParser/test/Div.cpp src/armnnTfLiteParser/test/Exp.cpp src/armnnTfLiteParser/test/FullyConnected.cpp src/armnnTfLiteParser/test/L2Normalization.cpp diff --git a/docs/01_parsers.dox b/docs/01_parsers.dox index 4d3189a..2856411 100644 --- a/docs/01_parsers.dox +++ b/docs/01_parsers.dox @@ -157,6 +157,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators - CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE - CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE +- DIV - EXP - FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE - LOGISTIC diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md index f782862..b69da0a 100644 --- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md +++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md @@ -20,6 +20,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators * DEQUANTIZE +* DIV + * EXP * FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 1ae2de0..dcdae34 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -529,7 +529,7 @@ TfLiteParser::TfLiteParser(const Optional& o m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose; m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv; m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack; - + m_ParserFunctions[tflite::BuiltinOperator_DIV] = &TfLiteParser::ParseDiv; // register supported custom operators m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess; } @@ -1668,6 +1668,44 @@ void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex) RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); } +void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex) +{ + CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); + + const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex]; + const auto * options = operatorPtr->builtin_options.AsDivOptions(); + + auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(inputs.size(), 2); + + auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]); + armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]); + + auto layerName = boost::str(boost::format("Div:%1%:%2%") % subgraphIndex % operatorIndex); + IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str()); + + TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex)); + if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions()) + { + AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer); + } + else + { + RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]}); + } + + layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function); + + auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex)); + RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]}); +} + void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex) { CHECK_MODEL(m_Model, subgraphIndex, operatorIndex); diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp index c72f7ad..6ed6d83 100644 --- a/src/armnnTfLiteParser/TfLiteParser.hpp +++ b/src/armnnTfLiteParser/TfLiteParser.hpp @@ -129,6 +129,7 @@ private: void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex); void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex); void ParseSub(size_t subgraphIndex, size_t operatorIndex); + void ParseDiv(size_t subgraphIndex, size_t operatorIndex); void ParseTanH(size_t subgraphIndex, size_t operatorIndex); void ParseTranspose(size_t subgraphIndex, size_t operatorIndex); void ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex); diff --git a/src/armnnTfLiteParser/test/Div.cpp b/src/armnnTfLiteParser/test/Div.cpp new file mode 100644 index 0000000..f83e455 --- /dev/null +++ b/src/armnnTfLiteParser/test/Div.cpp @@ -0,0 +1,115 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include +#include "ParserFlatbuffersFixture.hpp" +#include "../TfLiteParser.hpp" + +#include +#include + +BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) + +struct DivFixture : public ParserFlatbuffersFixture +{ + explicit DivFixture(const std::string & inputShape1, + const std::string & inputShape2, + const std::string & outputShape, + const std::string & activation="NONE") + { + m_JsonString = R"( + { + "version": 3, + "operator_codes": [ { "builtin_code": "DIV" } ], + "subgraphs": [ { + "tensors": [ + { + "shape": )" + inputShape1 + R"(, + "type": "FLOAT32", + "buffer": 0, + "name": "inputTensor1", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + inputShape2 + R"(, + "type": "FLOAT32", + "buffer": 1, + "name": "inputTensor2", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + }, + { + "shape": )" + outputShape + R"( , + "type": "FLOAT32", + "buffer": 2, + "name": "outputTensor", + "quantization": { + "min": [ 0.0 ], + "max": [ 255.0 ], + "scale": [ 1.0 ], + "zero_point": [ 0 ], + } + } + ], + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "operators": [ + { + "opcode_index": 0, + "inputs": [ 0, 1 ], + "outputs": [ 2 ], + "builtin_options_type": "DivOptions", + "builtin_options": { + "fused_activation_function": )" + activation + R"( + }, + "custom_options_format": "FLEXBUFFERS" + } + ], + } ], + "buffers" : [ + { }, + { } + ] + } + )"; + Setup(); + } +}; + +struct SimpleDivFixture : public DivFixture +{ + SimpleDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {} +}; + +BOOST_FIXTURE_TEST_CASE(ParseDiv, SimpleDivFixture) +{ + using armnn::DataType; + float Inf = std::numeric_limits::infinity(); + float NaN = std::numeric_limits::quiet_NaN(); + + RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, -11.0f } }, + { "inputTensor2", { 0.0f, 0.0f, 4.0f, + 3.0f, 40.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f} } }, + {{ "outputTensor", { NaN, Inf, 0.5f, + 1.0f, 0.1f, 1.0f, + 1.0f, 1.0f, 1.0f, + 1.0f, 1.0f, -1.0f } } }); +} + +BOOST_AUTO_TEST_SUITE_END() -- 2.7.4