Added TfLiteParser test for InceptionV4Quantized
authorBruno Goncalves <bruno.slackware@gmail.com>
Fri, 28 Dec 2018 12:09:53 +0000 (10:09 -0200)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Thu, 7 Feb 2019 09:23:03 +0000 (09:23 +0000)
Change-Id: Iad57395b0231a0960e7fc27c1d487bdee8a1f464

tests/CMakeLists.txt
tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp [new file with mode: 0644]

index 352c358..de40137 100644 (file)
@@ -189,6 +189,12 @@ if (BUILD_TF_LITE_PARSER)
         ImagePreprocessor.cpp)
     TfLiteParserTest(TfLiteInceptionV3Quantized-Armnn "${TfLiteInceptionV3Quantized-Armnn_sources}")
 
+    set(TfLiteInceptionV4Quantized-Armnn_sources
+        TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
+        ImagePreprocessor.hpp
+        ImagePreprocessor.cpp)
+    TfLiteParserTest(TfLiteInceptionV4Quantized-Armnn "${TfLiteInceptionV4Quantized-Armnn_sources}")
+
     set(TfLiteResNetV2-Armnn_sources
         TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
         ImagePreprocessor.hpp
diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
new file mode 100644 (file)
index 0000000..bcb9db8
--- /dev/null
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "../InferenceTest.hpp"
+#include "../ImagePreprocessor.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+using namespace armnnTfLiteParser;
+
+int main(int argc, char* argv[])
+{
+    int retVal = EXIT_FAILURE;
+    try
+    {
+        std::vector<ImageSet> imageSet =
+        {
+            {"Dog.jpg", 209},
+            {"Cat.jpg", 283},
+            {"shark.jpg", 3},
+
+        };
+
+        armnn::TensorShape inputTensorShape({ 1, 299, 299, 3  });
+
+        using DataType = uint8_t;
+        using DatabaseType = ImagePreprocessor<DataType>;
+        using ParserType = armnnTfLiteParser::ITfLiteParser;
+        using ModelType = InferenceModel<ParserType, DataType>;
+
+        // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
+        retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
+                                                          ParserType>(
+                     argc, argv,
+                     "inception_v4_299_quant.tflite",     // model name
+                     true,                                // model is binary
+                     "input",                             // input tensor name
+                     "InceptionV4/Logits/Predictions",    // output tensor name
+                     { 0, 1, 2 },                         // test images to test with as above
+                     [&imageSet](const char* dataDir, const ModelType & model) {
+                         // we need to get the input quantization parameters from
+                         // the parsed model
+                         auto inputBinding = model.GetInputBindingInfo();
+                         return DatabaseType(
+                             dataDir,
+                             299,
+                             299,
+                             imageSet,
+                             inputBinding.second.GetQuantizationScale(),
+                             inputBinding.second.GetQuantizationOffset());
+                     },
+                     &inputTensorShape);
+    }
+    catch (const std::exception& e)
+    {
+        // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
+        // exception of type std::length_error.
+        // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
+        std::cerr << "WARNING: " << *argv << ": An error has occurred when running "
+                     "the classifier inference tests: " << e.what() << std::endl;
+    }
+    return retVal;
+}