Add DIV support to TFLite parser
authorDarshan Patel <darsh.jp@gmail.com>
Mon, 25 May 2020 17:00:07 +0000 (22:30 +0530)
committerTeresaARM <teresa.charlinreyes@arm.com>
Mon, 25 May 2020 20:23:00 +0000 (20:23 +0000)
* Added unit tests
* Updated Documentation

Signed-off-by: Darshan Patel <darsh.jp@gmail.com>
Change-Id: Iadb5c76139d74d755d7f4be4b023b4417efe1e92

CMakeLists.txt
docs/01_parsers.dox
src/armnnTfLiteParser/TensorFlowLiteSupport.md
src/armnnTfLiteParser/TfLiteParser.cpp
src/armnnTfLiteParser/TfLiteParser.hpp
src/armnnTfLiteParser/test/Div.cpp [new file with mode: 0644]

index 692bc04..c2c4cc6 100644 (file)
@@ -769,6 +769,7 @@ if(BUILD_UNIT_TESTS)
              src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
              src/armnnTfLiteParser/test/Dequantize.cpp
              src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+             src/armnnTfLiteParser/test/Div.cpp
              src/armnnTfLiteParser/test/Exp.cpp
              src/armnnTfLiteParser/test/FullyConnected.cpp
              src/armnnTfLiteParser/test/L2Normalization.cpp
index 4d3189a..2856411 100644 (file)
@@ -157,6 +157,7 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
 - CONCATENATION, Supported Fused Activation: RELU , RELU6 , TANH, NONE
 - CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
 - DEPTHWISE_CONV_2D, Supported Fused Activation: RELU , RELU6 , TANH, NONE
+- DIV
 - EXP
 - FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE
 - LOGISTIC
index f782862..b69da0a 100644 (file)
@@ -20,6 +20,8 @@ The Arm NN SDK TensorFlow Lite parser currently supports the following operators
 
 * DEQUANTIZE
 
+* DIV
+
 * EXP
 
 * FULLY_CONNECTED, Supported Fused Activation: RELU , RELU6 , TANH, NONE
index 1ae2de0..dcdae34 100644 (file)
@@ -529,7 +529,7 @@ TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& o
     m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]               = &TfLiteParser::ParseTranspose;
     m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV]          = &TfLiteParser::ParseTransposeConv;
     m_ParserFunctions[tflite::BuiltinOperator_UNPACK]                  = &TfLiteParser::ParseUnpack;
-
+    m_ParserFunctions[tflite::BuiltinOperator_DIV]                     = &TfLiteParser::ParseDiv;
     // register supported custom operators
     m_CustomParserFunctions["TFLite_Detection_PostProcess"]      = &TfLiteParser::ParseDetectionPostProcess;
 }
@@ -1668,6 +1668,44 @@ void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    const auto * options = operatorPtr->builtin_options.AsDivOptions();
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
+    armnn::TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
+
+    auto layerName = boost::str(boost::format("Div:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    if (inputTensorInfo.GetNumDimensions() != input1TensorInfo.GetNumDimensions())
+    {
+        AddBroadcastReshapeLayer(subgraphIndex, operatorIndex, layer);
+    }
+    else
+    {
+        RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0], inputTensorIndexes[1]});
+    }
+
+    layer = AddFusedActivationLayer(layer, 0, options->fused_activation_function);
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
index c72f7ad..6ed6d83 100644 (file)
@@ -129,6 +129,7 @@ private:
     void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
     void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex);
     void ParseSub(size_t subgraphIndex, size_t operatorIndex);
+    void ParseDiv(size_t subgraphIndex, size_t operatorIndex);
     void ParseTanH(size_t subgraphIndex, size_t operatorIndex);
     void ParseTranspose(size_t subgraphIndex, size_t operatorIndex);
     void ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Div.cpp b/src/armnnTfLiteParser/test/Div.cpp
new file mode 100644 (file)
index 0000000..f83e455
--- /dev/null
@@ -0,0 +1,115 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct DivFixture : public ParserFlatbuffersFixture
+{
+    explicit DivFixture(const std::string & inputShape1,
+                        const std::string & inputShape2,
+                        const std::string & outputShape,
+                        const std::string & activation="NONE")
+    {
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "DIV" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + inputShape1 + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 0,
+                            "name": "inputTensor1",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + inputShape2 + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 1,
+                            "name": "inputTensor2",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + outputShape + R"( ,
+                            "type": "FLOAT32",
+                            "buffer": 2,
+                            "name": "outputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        }
+                    ],
+                    "inputs": [ 0, 1 ],
+                    "outputs": [ 2 ],
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": [ 0, 1 ],
+                            "outputs": [ 2 ],
+                            "builtin_options_type": "DivOptions",
+                            "builtin_options": {
+                                "fused_activation_function": )" + activation + R"(
+                            },
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [
+                    { },
+                    { }
+                ]
+            }
+        )";
+        Setup();
+    }
+};
+
+struct SimpleDivFixture : public DivFixture
+{
+    SimpleDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDiv, SimpleDivFixture)
+{
+    using armnn::DataType;
+    float Inf = std::numeric_limits<float>::infinity();
+    float NaN = std::numeric_limits<float>::quiet_NaN();
+
+    RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f,  1.0f,  2.0f,
+                                                          3.0f,  4.0f,  5.0f,
+                                                          6.0f,  7.0f,  8.0f,
+                                                          9.0f, 10.0f, -11.0f } },
+                                      { "inputTensor2", { 0.0f,  0.0f,  4.0f,
+                                                          3.0f,  40.0f,  5.0f,
+                                                          6.0f,  7.0f,  8.0f,
+                                                          9.0f,  10.0f,  11.0f} } },
+                                     {{ "outputTensor", { NaN,   Inf,  0.5f,
+                                                          1.0f,  0.1f, 1.0f,
+                                                          1.0f,  1.0f, 1.0f,
+                                                          1.0f,  1.0f, -1.0f } } });
+}
+
+BOOST_AUTO_TEST_SUITE_END()