IVGCVSW-4191 Add DEQUANTIZE to the TfLite Parser
authorFinn Williams <Finn.Williams@arm.com>
Fri, 6 Dec 2019 09:55:55 +0000 (09:55 +0000)
committerFinn Williams <Finn.Williams@arm.com>
Tue, 10 Dec 2019 12:55:18 +0000 (12:55 +0000)
!armnn:2421

Change-Id: Icdb02b7248ed408c3c8ad2e3e38df5b7cda1c545
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
CMakeLists.txt
src/armnnTfLiteParser/TfLiteParser.cpp
src/armnnTfLiteParser/TfLiteParser.hpp
src/armnnTfLiteParser/test/Dequantize.cpp [new file with mode: 0644]

index 8f6d794..7af6a94 100644 (file)
@@ -706,6 +706,7 @@ if(BUILD_UNIT_TESTS)
              src/armnnTfLiteParser/test/Constant.cpp
              src/armnnTfLiteParser/test/Conv2D.cpp
              src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+             src/armnnTfLiteParser/test/Dequantize.cpp
              src/armnnTfLiteParser/test/DetectionPostProcess.cpp
              src/armnnTfLiteParser/test/FullyConnected.cpp
              src/armnnTfLiteParser/test/L2Normalization.cpp
index f06e244..6853512 100644 (file)
@@ -314,6 +314,12 @@ armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::
         case tflite::TensorType_FLOAT32:
             type = armnn::DataType::Float32;
             break;
+        case tflite::TensorType_INT8:
+            type = armnn::DataType::QSymmS8;
+            break;
+        case tflite::TensorType_INT16:
+            type = armnn::DataType::QuantisedSymm16;
+            break;
         case tflite::TensorType_INT32:
             type = armnn::DataType::Signed32;
             break;
@@ -440,6 +446,7 @@ TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& o
     m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION]           = &TfLiteParser::ParseConcatenation;
     m_ParserFunctions[tflite::BuiltinOperator_CONV_2D]                 = &TfLiteParser::ParseConv2D;
     m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D]       = &TfLiteParser::ParseDepthwiseConv2D;
+    m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE]              = &TfLiteParser::ParseDequantize;
     m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]                  = &TfLiteParser::ParseCustomOperator;
     m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]         = &TfLiteParser::ParseFullyConnected;
     m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]                = &TfLiteParser::ParseLogistic;
@@ -923,6 +930,31 @@ void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorInd
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = boost::str(boost::format("Dequantize:%1%:%2%") % subgraphIndex % operatorIndex);
+
+    IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
+    BOOST_ASSERT(layer != nullptr);
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
 void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
index 7f97b6d..a8241f6 100644 (file)
@@ -100,6 +100,7 @@ private:
     void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
     void ParseConv2D(size_t subgraphIndex, size_t operatorIndex);
     void ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex);
+    void ParseDequantize(size_t subgraphIndex, size_t operatorIndex);
     void ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex);
     void ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex);
     void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
new file mode 100644 (file)
index 0000000..2f98c07
--- /dev/null
@@ -0,0 +1,121 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+    struct DequantizeFixture : public ParserFlatbuffersFixture
+    {
+        explicit DequantizeFixture(const std::string & inputShape,
+                                   const std::string & outputShape,
+                                   const std::string & dataType)
+        {
+            m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "DEQUANTIZE" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + inputShape + R"(,
+                            "type": )" + dataType + R"(,
+                            "buffer": 0,
+                            "name": "inputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.5 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                            "shape": )" + outputShape + R"( ,
+                            "type": "FLOAT32",
+                            "buffer": 1,
+                            "name": "outputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        }
+                    ],
+                    "inputs": [ 0 ],
+                    "outputs": [ 1 ],
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": [ 0 ],
+                            "outputs": [ 1 ],
+                            "builtin_options_type": "DequantizeOptions",
+                            "builtin_options": {
+                            },
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                ],
+                } ],
+                "buffers" : [
+                    { },
+                    { },
+                ]
+            }
+        )";
+            SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+        }
+    };
+
+    struct SimpleDequantizeFixtureQAsymm8 : DequantizeFixture
+    {
+        SimpleDequantizeFixtureQAsymm8() : DequantizeFixture("[ 1, 6 ]",
+                                                             "[ 1, 6 ]",
+                                                             "UINT8") {}
+    };
+
+    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
+    {
+        RunTest<2, armnn::DataType::QuantisedAsymm8 , armnn::DataType::Float32>(
+                0,
+                {{"inputTensor",  { 0u,   1u,   5u,   100u,   200u,   255u }}},
+                {{"outputTensor", { 0.0f, 1.5f, 7.5f, 150.0f, 300.0f, 382.5f }}});
+    }
+
+    struct SimpleDequantizeFixtureQSymm16 : DequantizeFixture
+    {
+        SimpleDequantizeFixtureQSymm16() : DequantizeFixture("[ 1, 6 ]",
+                                                             "[ 1, 6 ]",
+                                                             "INT16") {}
+    };
+
+    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
+    {
+        RunTest<2, armnn::DataType::QuantisedSymm16 , armnn::DataType::Float32>(
+                0,
+                {{"inputTensor",  { 0,    1,    5,    32767,    -1,   -32768 }}},
+                {{"outputTensor", { 0.0f, 1.5f, 7.5f, 49150.5f, -1.5f,-49152.0f }}});
+    }
+
+    struct SimpleDequantizeFixtureQSymmS8 : DequantizeFixture
+    {
+        SimpleDequantizeFixtureQSymmS8() : DequantizeFixture("[ 1, 6 ]",
+                                                             "[ 1, 6 ]",
+                                                             "INT8") {}
+    };
+
+    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQSymmS8, SimpleDequantizeFixtureQSymmS8)
+    {
+        RunTest<2, armnn::DataType::QSymmS8 , armnn::DataType::Float32>(
+                0,
+                {{"inputTensor",  { 0,    1,    5,    127,    -128,   -1 }}},
+                {{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});
+    }
+
+BOOST_AUTO_TEST_SUITE_END()