IVGCVSW-5284 Refactor ExecuteNetwork
authorJan Eilers <jan.eilers@arm.com>
Thu, 15 Oct 2020 17:34:43 +0000 (18:34 +0100)
committerJan Eilers <jan.eilers@arm.com>
Tue, 20 Oct 2020 12:48:50 +0000 (13:48 +0100)
 * Removed boost program options and replaced it with cxxopts
 * Unified adding, parsing and validation of program options
   into the struct ProgramOptions
 * Program options are now parsed directly into ExecuteNetworkParams
   which can be passed directly to MainImpl
 * Split NetworkExecutionUtils into header and source
 * Removed RunTest
 * Removed RunCsvTest
 * Removed RunClTuning
 * Moved MainImpl back to ExecuteNetwork.cpp
 * Added additional util functions
The functionality of ExecuteNetwork remains the same. Only
cl tuning runs need to be started separately
and there is no short option for fp16-turbo-mode because -h is
reserved in cxxopts to print help messages

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: Ib9689375c81e1a184c17bb3ea66c3550430bbe09

tests/CMakeLists.txt
tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/ExecuteNetwork/ExecuteNetworkParams.cpp [new file with mode: 0644]
tests/ExecuteNetwork/ExecuteNetworkParams.hpp [new file with mode: 0644]
tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp [new file with mode: 0644]
tests/ExecuteNetwork/ExecuteNetworkProgramOptions.hpp [new file with mode: 0644]
tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp [new file with mode: 0644]
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp

index b3496b4..7141edf 100644 (file)
@@ -255,12 +255,19 @@ endif()
 
 if (BUILD_ARMNN_SERIALIZER OR BUILD_CAFFE_PARSER OR BUILD_TF_PARSER OR BUILD_TF_LITE_PARSER OR BUILD_ONNX_PARSER)
     set(ExecuteNetwork_sources
-        ExecuteNetwork/ExecuteNetwork.cpp)
+        ExecuteNetwork/ExecuteNetwork.cpp
+        ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
+        ExecuteNetwork/ExecuteNetworkProgramOptions.hpp
+        ExecuteNetwork/ExecuteNetworkParams.cpp
+        ExecuteNetwork/ExecuteNetworkParams.hpp
+        NetworkExecutionUtils/NetworkExecutionUtils.cpp
+        NetworkExecutionUtils/NetworkExecutionUtils.hpp)
 
     add_executable_ex(ExecuteNetwork ${ExecuteNetwork_sources})
     target_include_directories(ExecuteNetwork PRIVATE ../src/armnn)
     target_include_directories(ExecuteNetwork PRIVATE ../src/armnnUtils)
     target_include_directories(ExecuteNetwork PRIVATE ../src/backends)
+    target_include_directories(ExecuteNetwork PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
 
     if (BUILD_ARMNN_SERIALIZER)
         target_link_libraries(ExecuteNetwork armnnSerializer)
index 58f1bd3..c17eabd 100644 (file)
 // SPDX-License-Identifier: MIT
 //
 
-#include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp"
+#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
+#include "ExecuteNetworkProgramOptions.hpp"
 
-// MAIN
-int main(int argc, const char* argv[])
-{
-    // Configures logging for both the ARMNN library and this test program.
-#ifdef NDEBUG
-    armnn::LogSeverity level = armnn::LogSeverity::Info;
-#else
-    armnn::LogSeverity level = armnn::LogSeverity::Debug;
-#endif
-    armnn::ConfigureLogging(true, true, level);
-
-    std::string testCasesFile;
+#include <armnn/Logging.hpp>
+#include <Filesystem.hpp>
+#include <InferenceTest.hpp>
 
-    std::string modelFormat;
-    std::string modelPath;
-    std::string inputNames;
-    std::string inputTensorShapes;
-    std::string inputTensorDataFilePaths;
-    std::string outputNames;
-    std::string inputTypes;
-    std::string outputTypes;
-    std::string dynamicBackendsPath;
-    std::string outputTensorFiles;
-
-    // external profiling parameters
-    std::string outgoingCaptureFile;
-    std::string incomingCaptureFile;
-    uint32_t counterCapturePeriod;
-    std::string fileFormat;
+#if defined(ARMNN_SERIALIZER)
+#include "armnnDeserializer/IDeserializer.hpp"
+#endif
+#if defined(ARMNN_CAFFE_PARSER)
+#include "armnnCaffeParser/ICaffeParser.hpp"
+#endif
+#if defined(ARMNN_TF_PARSER)
+#include "armnnTfParser/ITfParser.hpp"
+#endif
+#if defined(ARMNN_TF_LITE_PARSER)
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#endif
+#if defined(ARMNN_ONNX_PARSER)
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#endif
 
-    size_t iterations = 1;
-    int tuningLevel = 0;
-    std::string tuningPath;
+#include <future>
 
-    double thresholdTime = 0.0;
+template<typename TParser, typename TDataType>
+int MainImpl(const ExecuteNetworkParams& params,
+             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
+{
+    using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
 
-    size_t subgraphId = 0;
+    std::vector<TContainer> inputDataContainers;
 
-    const std::string backendsMessage = "REQUIRED: Which device to run layers on by default. Possible choices: "
-                                      + armnn::BackendRegistryInstance().GetBackendIdsAsString();
-    po::options_description desc("Options");
     try
     {
-        desc.add_options()
-            ("help", "Display usage information")
-            ("compute,c", po::value<std::vector<std::string>>()->multitoken()->required(),
-             backendsMessage.c_str())
-            ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. "
-             "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, "
-             "as they are expected to be defined in the file for each test in particular.")
-            ("concurrent,n", po::bool_switch()->default_value(false),
-             "Whether or not the test cases should be executed in parallel")
-            ("model-format,f", po::value(&modelFormat)->required(),
-             "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
-             "tensorflow-text.")
-            ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .armnn, .caffemodel, "
-             ".prototxt, .tflite, .onnx")
-            ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
-             "Path where to load any available dynamic backend from. "
-             "If left empty (the default), dynamic backends will not be used.")
-            ("input-name,i", po::value(&inputNames),
-             "Identifier of the input tensors in the network separated by comma.")
-            ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
-              "Defaults to 0")
-            ("input-tensor-shape,s", po::value(&inputTensorShapes),
-             "The shape of the input tensors in the network as a flat array of integers separated by comma."
-             "Several shapes can be passed by separating them with a colon (:)."
-             "This parameter is optional, depending on the network.")
-            ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
-             "Path to files containing the input data as a flat array separated by whitespace. "
-             "Several paths can be passed by separating them with a comma. If not specified, the network will be run "
-             "with dummy data (useful for profiling).")
-            ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
-             "If unset, defaults to \"float\" for all defined inputs. "
-             "Accepted values (float, int or qasymm8)")
-            ("quantize-input,q",po::bool_switch()->default_value(false),
-             "If this option is enabled, all float inputs will be quantized to qasymm8. "
-             "If unset, default to not quantized. "
-             "Accepted values (true or false)")
-            ("output-type,z",po::value(&outputTypes),
-             "The type of the output tensors in the network separated by comma. "
-             "If unset, defaults to \"float\" for all defined outputs. "
-             "Accepted values (float, int or qasymm8).")
-            ("dequantize-output,l",po::bool_switch()->default_value(false),
-             "If this option is enabled, all quantized outputs will be dequantized to float. "
-             "If unset, default to not get dequantized. "
-             "Accepted values (true or false)")
-            ("output-name,o", po::value(&outputNames),
-             "Identifier of the output tensors in the network separated by comma.")
-            ("write-outputs-to-file,w", po::value(&outputTensorFiles),
-             "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
-             "If left empty (the default), the output tensors will not be written to a file.")
-            ("event-based-profiling,e", po::bool_switch()->default_value(false),
-             "Enables built in profiler. If unset, defaults to off.")
-            ("visualize-optimized-model,v", po::bool_switch()->default_value(false),
-             "Enables built optimized model visualizer. If unset, defaults to off.")
-            ("bf16-turbo-mode", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
-             "weights and biases will be converted to BFloat16 where the backend supports it")
-            ("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
-             "weights and biases will be converted to FP16 where the backend supports it")
-            ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
-             "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
-             "inference time is greater than the threshold time, the test will fail. By default, no threshold "
-             "time is used.")
-            ("print-intermediate-layers,p", po::bool_switch()->default_value(false),
-             "If this option is enabled, the output of every graph layer will be printed.")
-            ("enable-external-profiling,a", po::bool_switch()->default_value(false),
-             "If enabled external profiling will be switched on")
-            ("timeline-profiling", po::bool_switch()->default_value(false),
-             "If enabled timeline profiling will be switched on, requires external profiling")
-            ("outgoing-capture-file,j", po::value(&outgoingCaptureFile),
-             "If specified the outgoing external profiling packets will be captured in this binary file")
-            ("incoming-capture-file,k", po::value(&incomingCaptureFile),
-             "If specified the incoming external profiling packets will be captured in this binary file")
-            ("file-only-external-profiling,g", po::bool_switch()->default_value(false),
-             "If enabled then the 'file-only' test mode of external profiling will be enabled")
-            ("counter-capture-period,u", po::value<uint32_t>(&counterCapturePeriod)->default_value(150u),
-             "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test")
-            ("file-format", po::value(&fileFormat)->default_value("binary"),
-             "If profiling is enabled specifies the output file format")
-            ("iterations", po::value<size_t>(&iterations)->default_value(1),
-             "Number of iterations to run the network for, default is set to 1")
-            ("tuning-path", po::value(&tuningPath),
-            "Path to tuning file. Enables use of CL tuning")
-            ("tuning-level", po::value<int>(&tuningLevel)->default_value(0),
-             "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
-             "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
-             "Requires tuning-path to be set, default is set to 0 (No tuning run)")
-            ("parse-unsupported", po::bool_switch()->default_value(false),
-                "Add unsupported operators as stand-in layers (where supported by parser)")
-            ("infer-output-shape", po::bool_switch()->default_value(false),
-                "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
-                "parser)")
-            ("enable-fast-math", po::bool_switch()->default_value(false),
-             "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
-             "performance improvements but may result in reduced or different precision.");
-    }
-    catch (const std::exception& e)
-    {
-        // Coverity points out that default_value(...) can throw a bad_lexical_cast,
-        // and that desc.add_options() can throw boost::io::too_few_args.
-        // They really won't in any of these cases.
-        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
-        ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
-        return EXIT_FAILURE;
-    }
+        // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
+        typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
+        inferenceModelParams.m_ModelPath                      = params.m_ModelPath;
+        inferenceModelParams.m_IsModelBinary                  = params.m_IsModelBinary;
+        inferenceModelParams.m_ComputeDevices                 = params.m_ComputeDevices;
+        inferenceModelParams.m_DynamicBackendsPath            = params.m_DynamicBackendsPath;
+        inferenceModelParams.m_PrintIntermediateLayers        = params.m_PrintIntermediate;
+        inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
+        inferenceModelParams.m_ParseUnsupported               = params.m_ParseUnsupported;
+        inferenceModelParams.m_InferOutputShape               = params.m_InferOutputShape;
+        inferenceModelParams.m_EnableFastMath                 = params.m_EnableFastMath;
 
-    // Parses the command-line.
-    po::variables_map vm;
-    try
-    {
-        po::store(po::parse_command_line(argc, argv, desc), vm);
+        for(const std::string& inputName: params.m_InputNames)
+        {
+            inferenceModelParams.m_InputBindings.push_back(inputName);
+        }
 
-        if (CheckOption(vm, "help") || argc <= 1)
+        for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
         {
-            std::cout << "Executes a neural network model using the provided input tensor. " << std::endl;
-            std::cout << "Prints the resulting output tensor." << std::endl;
-            std::cout << std::endl;
-            std::cout << desc << std::endl;
-            return EXIT_SUCCESS;
+            inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
         }
 
-        po::notify(vm);
-    }
-    catch (const po::error& e)
-    {
-        std::cerr << e.what() << std::endl << std::endl;
-        std::cerr << desc << std::endl;
-        return EXIT_FAILURE;
-    }
+        for(const std::string& outputName: params.m_OutputNames)
+        {
+            inferenceModelParams.m_OutputBindings.push_back(outputName);
+        }
 
-    // Get the value of the switch arguments.
-    bool concurrent = vm["concurrent"].as<bool>();
-    bool enableProfiling = vm["event-based-profiling"].as<bool>();
-    bool enableLayerDetails = vm["visualize-optimized-model"].as<bool>();
-    bool enableBf16TurboMode = vm["bf16-turbo-mode"].as<bool>();
-    bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
-    bool quantizeInput = vm["quantize-input"].as<bool>();
-    bool dequantizeOutput = vm["dequantize-output"].as<bool>();
-    bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
-    bool enableExternalProfiling = vm["enable-external-profiling"].as<bool>();
-    bool fileOnlyExternalProfiling = vm["file-only-external-profiling"].as<bool>();
-    bool parseUnsupported = vm["parse-unsupported"].as<bool>();
-    bool timelineEnabled = vm["timeline-profiling"].as<bool>();
-    bool inferOutputShape = vm["infer-output-shape"].as<bool>();
-    bool enableFastMath = vm["enable-fast-math"].as<bool>();
-
-    if (enableBf16TurboMode && enableFp16TurboMode)
-    {
-        ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
-        return EXIT_FAILURE;
-    }
+        inferenceModelParams.m_SubgraphId          = params.m_SubgraphId;
+        inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
+        inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
 
-    // Create runtime
-    armnn::IRuntime::CreationOptions options;
-    options.m_EnableGpuProfiling                     = enableProfiling;
-    options.m_DynamicBackendsPath                    = dynamicBackendsPath;
-    options.m_ProfilingOptions.m_EnableProfiling     = enableExternalProfiling;
-    options.m_ProfilingOptions.m_IncomingCaptureFile = incomingCaptureFile;
-    options.m_ProfilingOptions.m_OutgoingCaptureFile = outgoingCaptureFile;
-    options.m_ProfilingOptions.m_FileOnly            = fileOnlyExternalProfiling;
-    options.m_ProfilingOptions.m_CapturePeriod       = counterCapturePeriod;
-    options.m_ProfilingOptions.m_FileFormat          = fileFormat;
-    options.m_ProfilingOptions.m_TimelineEnabled     = timelineEnabled;
-
-    if (timelineEnabled && !enableExternalProfiling)
-    {
-        ARMNN_LOG(fatal) << "Timeline profiling requires external profiling to be turned on";
-        return EXIT_FAILURE;
-    }
+        InferenceModel<TParser, TDataType> model(inferenceModelParams,
+                                                 params.m_EnableProfiling,
+                                                 params.m_DynamicBackendsPath,
+                                                 runtime);
 
-    // Check whether we have to load test cases from a file.
-    if (CheckOption(vm, "test-cases"))
-    {
-        // Check that the file exists.
-        if (!fs::exists(testCasesFile))
+        const size_t numInputs = inferenceModelParams.m_InputBindings.size();
+        for(unsigned int i = 0; i < numInputs; ++i)
         {
-            ARMNN_LOG(fatal) << "Given file \"" << testCasesFile << "\" does not exist";
-            return EXIT_FAILURE;
+            armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
+                                                          armnn::MakeOptional<QuantizationParams>(
+                                                                  model.GetInputQuantizationParams()) :
+                                                          armnn::EmptyOptional();
+
+            armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
+                                                    armnn::EmptyOptional() :
+                                                    armnn::MakeOptional<std::string>(
+                                                            params.m_InputTensorDataFilePaths[i]);
+
+            unsigned int numElements = model.GetInputSize(i);
+            if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
+            {
+                // If the user has provided a tensor shape for the current input,
+                // override numElements
+                numElements = params.m_InputTensorShapes[i]->GetNumElements();
+            }
+
+            TContainer tensorData;
+            PopulateTensorWithData(tensorData,
+                                   numElements,
+                                   params.m_InputTypes[i],
+                                   qParams,
+                                   dataFile);
+
+            inputDataContainers.push_back(tensorData);
         }
 
-        // Parse CSV file and extract test cases
-        armnnUtils::CsvReader reader;
-        std::vector<armnnUtils::CsvRow> testCases = reader.ParseFile(testCasesFile);
+        const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
+        std::vector<TContainer> outputDataContainers;
 
-        // Check that there is at least one test case to run
-        if (testCases.empty())
+        for (unsigned int i = 0; i < numOutputs; ++i)
         {
-            ARMNN_LOG(fatal) << "Given file \"" << testCasesFile << "\" has no test cases";
-            return EXIT_FAILURE;
+            if (params.m_OutputTypes[i].compare("float") == 0)
+            {
+                outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
+            }
+            else if (params.m_OutputTypes[i].compare("int") == 0)
+            {
+                outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
+            }
+            else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
+            {
+                outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
+            }
+            else
+            {
+                ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
+                return EXIT_FAILURE;
+            }
         }
-        // Create runtime
-        std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
-
-        const std::string executableName("ExecuteNetwork");
 
-        // Check whether we need to run the test cases concurrently
-        if (concurrent)
+        for (size_t x = 0; x < params.m_Iterations; x++)
         {
-            std::vector<std::future<int>> results;
-            results.reserve(testCases.size());
+            // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
+            auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
 
-            // Run each test case in its own thread
-            for (auto&  testCase : testCases)
+            if (params.m_GenerateTensorData)
             {
-                testCase.values.insert(testCase.values.begin(), executableName);
-                results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
-                                             enableProfiling, enableFp16TurboMode, enableBf16TurboMode, thresholdTime,
-                                             printIntermediate, enableLayerDetails, parseUnsupported,
-                                             inferOutputShape, enableFastMath));
+                ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
             }
 
-            // Check results
-            for (auto& result : results)
+            // Print output tensors
+            const auto& infosOut = model.GetOutputBindingInfos();
+            for (size_t i = 0; i < numOutputs; i++)
             {
-                if (result.get() != EXIT_SUCCESS)
-                {
-                    return EXIT_FAILURE;
-                }
+                const armnn::TensorInfo& infoOut = infosOut[i].second;
+                auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
+
+                TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
+                                      infoOut,
+                                      outputTensorFile,
+                                      params.m_DequantizeOutput);
+                mapbox::util::apply_visitor(printer, outputDataContainers[i]);
             }
-        }
-        else
-        {
-            // Run tests sequentially
-            for (auto&  testCase : testCases)
+
+            ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
+                            << std::fixed << inference_duration.count() << " ms\n";
+
+            // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
+            if (params.m_ThresholdTime != 0.0)
             {
-                testCase.values.insert(testCase.values.begin(), executableName);
-                if (RunCsvTest(testCase, runtime, enableProfiling,
-                               enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate,
-                               enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath) != EXIT_SUCCESS)
+                ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
+                                << std::fixed << params.m_ThresholdTime << " ms";
+                auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
+                ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
+                                << std::fixed << thresholdMinusInference << " ms" << "\n";
+
+                if (thresholdMinusInference < 0)
                 {
-                    return EXIT_FAILURE;
+                    std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
+                    ARMNN_LOG(fatal) << errorMessage;
                 }
             }
         }
-
-        return EXIT_SUCCESS;
     }
-    else // Run single test
+    catch (const armnn::Exception& e)
     {
-        // Get the preferred order of compute devices. If none are specified, default to using CpuRef
-        const std::string computeOption("compute");
-        std::vector<std::string> computeDevicesAsStrings =
-                CheckOption(vm, computeOption.c_str()) ?
-                    vm[computeOption].as<std::vector<std::string>>() :
-                    std::vector<std::string>();
-        std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
+        ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
+        return EXIT_FAILURE;
+    }
 
-        // Remove duplicates from the list of compute devices.
-        RemoveDuplicateDevices(computeDevices);
+    return EXIT_SUCCESS;
+}
 
-#if defined(ARMCOMPUTECL_ENABLED)
-        std::shared_ptr<armnn::IGpuAccTunedParameters> tuned_params;
 
-        if (tuningPath != "")
-        {
-            if (tuningLevel != 0)
-            {
-                RunCLTuning(tuningPath, tuningLevel, modelFormat, inputTensorShapes, computeDevices,
-                    dynamicBackendsPath, modelPath, inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput,
-                    outputTypes, outputNames, outputTensorFiles, dequantizeOutput, enableProfiling,
-                    enableFp16TurboMode, enableBf16TurboMode, thresholdTime, printIntermediate, subgraphId,
-                    enableLayerDetails, parseUnsupported, inferOutputShape, enableFastMath);
-            }
-            ARMNN_LOG(info) << "Using tuning params: " << tuningPath << "\n";
-            options.m_BackendOptions.emplace_back(
-                armnn::BackendOptions
-                {
-                    "GpuAcc",
-                    {
-                        {"TuningLevel", 0},
-                        {"TuningFile", tuningPath.c_str()},
-                        {"KernelProfilingEnabled", enableProfiling}
-                    }
-                }
-            );
-        }
-#endif
-        try
-        {
-            CheckOptionDependencies(vm);
-        }
-        catch (const po::error& e)
-        {
-            std::cerr << e.what() << std::endl << std::endl;
-            std::cerr << desc << std::endl;
-            return EXIT_FAILURE;
-        }
-        // Create runtime
-        std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
-
-        return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath,
-            inputNames, inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
-            outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-            thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, inferOutputShape,
-            enableFastMath, iterations, runtime);
+// MAIN
+int main(int argc, const char* argv[])
+{
+    // Configures logging for both the ARMNN library and this test program.
+    #ifdef NDEBUG
+    armnn::LogSeverity level = armnn::LogSeverity::Info;
+    #else
+    armnn::LogSeverity level = armnn::LogSeverity::Debug;
+    #endif
+    armnn::ConfigureLogging(true, true, level);
+
+
+    // Get ExecuteNetwork parameters and runtime options from command line
+    ProgramOptions ProgramOptions(argc, argv);
+
+    // Create runtime
+    std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
+
+    std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
+
+    // Forward to implementation based on the parser type
+    if (modelFormat.find("armnn") != std::string::npos)
+    {
+    #if defined(ARMNN_SERIALIZER)
+        return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
+    #else
+        ARMNN_LOG(fatal) << "Not built with serialization support.";
+        return EXIT_FAILURE;
+    #endif
+    }
+    else if (modelFormat.find("caffe") != std::string::npos)
+    {
+    #if defined(ARMNN_CAFFE_PARSER)
+        return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
+    #else
+        ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
+        return EXIT_FAILURE;
+    #endif
+    }
+    else if (modelFormat.find("onnx") != std::string::npos)
+    {
+    #if defined(ARMNN_ONNX_PARSER)
+        return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
+    #else
+        ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
+        return EXIT_FAILURE;
+    #endif
+    }
+    else if (modelFormat.find("tensorflow") != std::string::npos)
+    {
+    #if defined(ARMNN_TF_PARSER)
+        return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
+    #else
+        ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
+        return EXIT_FAILURE;
+    #endif
+    }
+    else if(modelFormat.find("tflite") != std::string::npos)
+    {
+    #if defined(ARMNN_TF_LITE_PARSER)
+        return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
+    #else
+        ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
+        return EXIT_FAILURE;
+    #endif
+    }
+    else
+    {
+        ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
+                         << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
+        return EXIT_FAILURE;
     }
 }
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.cpp b/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
new file mode 100644 (file)
index 0000000..c298bd6
--- /dev/null
@@ -0,0 +1,212 @@
+//
+// Copyright Â© 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ExecuteNetworkParams.hpp"
+
+#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
+#include <InferenceModel.hpp>
+#include <armnn/Logging.hpp>
+
+#include <fmt/format.h>
+
+bool IsModelBinary(const std::string& modelFormat)
+{
+    // Parse model binary flag from the model-format string we got from the command-line
+    if (modelFormat.find("binary") != std::string::npos)
+    {
+        return true;
+    }
+    else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
+    {
+        return false;
+    }
+    else
+    {
+        throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
+                                                          "Please include 'binary' or 'text'",
+                                                          modelFormat));
+    }
+}
+
+void CheckModelFormat(const std::string& modelFormat)
+{
+    // Forward to implementation based on the parser type
+    if (modelFormat.find("armnn") != std::string::npos)
+    {
+#if defined(ARMNN_SERIALIZER)
+#else
+    throw armnn::InvalidArgumentException("Can't run model in armnn format without a "
+                                          "built with serialization support.");
+#endif
+    }
+    else if (modelFormat.find("caffe") != std::string::npos)
+    {
+#if defined(ARMNN_CAFFE_PARSER)
+#else
+        throw armnn::InvalidArgumentException("Can't run model in caffe format without a "
+                                              "built with Caffe parser support.");
+#endif
+    }
+    else if (modelFormat.find("onnx") != std::string::npos)
+    {
+#if defined(ARMNN_ONNX_PARSER)
+#else
+        throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
+                                              "built with Onnx parser support.");
+#endif
+    }
+    else if (modelFormat.find("tensorflow") != std::string::npos)
+    {
+#if defined(ARMNN_TF_PARSER)
+#else
+        throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
+                                              "built with Tensorflow parser support.");
+#endif
+    }
+    else if(modelFormat.find("tflite") != std::string::npos)
+    {
+#if defined(ARMNN_TF_LITE_PARSER)
+        if (!IsModelBinary(modelFormat))
+        {
+            throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' format "
+                                                              "supported for tflite files",
+                                                              modelFormat));
+        }
+#else
+        throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
+                                              "built with Tensorflow Lite parser support.");
+#endif
+    }
+    else
+    {
+        throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
+                                                          "Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'",
+                                                          modelFormat));
+    }
+}
+
+void CheckClTuningParameter(const int& tuningLevel,
+                            const std::string& tuningPath,
+                            const std::vector<armnn::BackendId> computeDevices)
+{
+    if (!tuningPath.empty())
+    {
+        if(tuningLevel == 0)
+        {
+            ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
+            if(!ValidatePath(tuningPath, true))
+            {
+                throw armnn::InvalidArgumentException("The tuning path is not valid");
+            }
+        }
+        else if ((1 <= tuningLevel) && (tuningLevel <= 3))
+        {
+            ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
+                            << "Tuning level in use: " << tuningLevel << "\n";
+        }
+        else if ((0 < tuningLevel) || (tuningLevel > 3))
+        {
+            throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.", tuningLevel));
+        }
+
+        // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
+        // Only warn if it's not enabled
+        auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
+        if (it == computeDevices.end())
+        {
+            ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
+        }
+    }
+
+
+}
+
+void ExecuteNetworkParams::ValidateParams()
+{
+    // Check compute devices
+    std::string invalidBackends;
+    if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
+    {
+        throw armnn::InvalidArgumentException(fmt::format("Some of the requested compute devices are invalid. "
+                                                          "\nInvalid devices: {} \nAvailable devices are: {}",
+                                                          invalidBackends,
+                                                          armnn::BackendRegistryInstance().GetBackendIdsAsString()));
+    }
+
+    CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
+
+    // Check turbo modes
+    if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
+    {
+        throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be enabled at the same time.");
+    }
+
+    m_IsModelBinary = IsModelBinary(m_ModelFormat);
+
+    CheckModelFormat(m_ModelFormat);
+
+    // Check input tensor shapes
+    if ((m_InputTensorShapes.size() != 0) &&
+        (m_InputTensorShapes.size() != m_InputNames.size()))
+    {
+        throw armnn::InvalidArgumentException("input-name and input-tensor-shape must "
+                                              "have the same amount of elements.");
+    }
+
+    if (m_InputTensorDataFilePaths.size() != 0)
+    {
+        if (!ValidatePaths(m_InputTensorDataFilePaths, true))
+        {
+            throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
+        }
+
+        if (m_InputTensorDataFilePaths.size() != m_InputNames.size())
+        {
+            throw armnn::InvalidArgumentException("input-name and input-tensor-data must have "
+                                                  "the same amount of elements.");
+        }
+    }
+
+    if ((m_OutputTensorFiles.size() != 0) &&
+        (m_OutputTensorFiles.size() != m_OutputNames.size()))
+    {
+        throw armnn::InvalidArgumentException("output-name and write-outputs-to-file must have the "
+                                              "same amount of elements.");
+    }
+
+    if (m_InputTypes.size() == 0)
+    {
+        //Defaults the value of all inputs to "float"
+        m_InputTypes.assign(m_InputNames.size(), "float");
+    }
+    else if ((m_InputTypes.size() != 0) &&
+             (m_InputTypes.size() != m_InputNames.size()))
+    {
+        throw armnn::InvalidArgumentException("input-name and input-type must have the same amount of elements.");
+    }
+
+    if (m_OutputTypes.size() == 0)
+    {
+        //Defaults the value of all outputs to "float"
+        m_OutputTypes.assign(m_OutputNames.size(), "float");
+    }
+    else if ((m_OutputTypes.size() != 0) &&
+             (m_OutputTypes.size() != m_OutputNames.size()))
+    {
+        throw armnn::InvalidArgumentException("output-name and output-type must have the same amount of elements.");
+    }
+
+    // Check that threshold time is not less than zero
+    if (m_ThresholdTime < 0)
+    {
+        throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
+    }
+
+    // Warn if ExecuteNetwork will generate dummy input data
+    if (m_GenerateTensorData)
+    {
+        ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
+    }
+}
\ No newline at end of file
diff --git a/tests/ExecuteNetwork/ExecuteNetworkParams.hpp b/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
new file mode 100644 (file)
index 0000000..5490230
--- /dev/null
@@ -0,0 +1,48 @@
+//
+// Copyright Â© 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Tensor.hpp>
+
+/// Holds all parameters necessary to execute a network
+/// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
+struct ExecuteNetworkParams
+{
+    using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
+
+    std::vector<armnn::BackendId> m_ComputeDevices;
+    bool                          m_DequantizeOutput;
+    std::string                   m_DynamicBackendsPath;
+    bool                          m_EnableBf16TurboMode;
+    bool                          m_EnableFastMath = false;
+    bool                          m_EnableFp16TurboMode;
+    bool                          m_EnableLayerDetails = false;
+    bool                          m_EnableProfiling;
+    bool                          m_GenerateTensorData;
+    bool                          m_InferOutputShape = false;
+    std::vector<std::string>      m_InputNames;
+    std::vector<std::string>      m_InputTensorDataFilePaths;
+    std::vector<TensorShapePtr>   m_InputTensorShapes;
+    std::vector<std::string>      m_InputTypes;
+    bool                          m_IsModelBinary;
+    size_t                        m_Iterations;
+    std::string                   m_ModelFormat;
+    std::string                   m_ModelPath;
+    std::vector<std::string>      m_OutputNames;
+    std::vector<std::string>      m_OutputTensorFiles;
+    std::vector<std::string>      m_OutputTypes;
+    bool                          m_ParseUnsupported = false;
+    bool                          m_PrintIntermediate;
+    bool                          m_QuantizeInput;
+    size_t                        m_SubgraphId;
+    double                        m_ThresholdTime;
+    int                           m_TuningLevel;
+    std::string                   m_TuningPath;
+
+    // Ensures that the parameters for ExecuteNetwork fit together
+    void ValidateParams();
+};
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp
new file mode 100644 (file)
index 0000000..8434adf
--- /dev/null
@@ -0,0 +1,414 @@
+//
+// Copyright Â© 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ExecuteNetworkProgramOptions.hpp"
+#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
+#include "InferenceTest.hpp"
+
+#include <armnn/BackendRegistry.hpp>
+#include <armnn/Exceptions.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/StringUtils.hpp>
+#include <armnn/Logging.hpp>
+
+#include <fmt/format.h>
+
+bool CheckOption(const cxxopts::ParseResult& result,
+                 const char* option)
+{
+    // Check that the given option is valid.
+    if (option == nullptr)
+    {
+        return false;
+    }
+
+    // Check whether 'option' is provided.
+    return ((result.count(option)) ? true : false);
+}
+
+void CheckOptionDependency(const cxxopts::ParseResult& result,
+                           const char* option,
+                           const char* required)
+{
+    // Check that the given options are valid.
+    if (option == nullptr || required == nullptr)
+    {
+        throw cxxopts::OptionParseException("Invalid option to check dependency for");
+    }
+
+    // Check that if 'option' is provided, 'required' is also provided.
+    if (CheckOption(result, option) && !result[option].has_default())
+    {
+        if (CheckOption(result, required) == 0 || result[required].has_default())
+        {
+            throw cxxopts::OptionParseException(
+                    std::string("Option '") + option + "' requires option '" + required + "'.");
+        }
+    }
+}
+
+void CheckOptionDependencies(const cxxopts::ParseResult& result)
+{
+    CheckOptionDependency(result, "model-path", "model-format");
+    CheckOptionDependency(result, "input-tensor-shape", "model-path");
+    CheckOptionDependency(result, "tuning-level", "tuning-path");
+}
+
+void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
+{
+    // Mark the duplicate devices as 'Undefined'.
+    for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
+    {
+        for (auto j = std::next(i); j != computeDevices.end(); ++j)
+        {
+            if (*j == *i)
+            {
+                *j = armnn::Compute::Undefined;
+            }
+        }
+    }
+
+    // Remove 'Undefined' devices.
+    computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
+                         computeDevices.end());
+}
+
+/// Takes a vector of backend strings and returns a vector of backendIDs. Removes duplicate entries.
+std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStrings)
+{
+    std::vector<armnn::BackendId> backendIDs;
+    for (const auto& b : backendStrings)
+    {
+        backendIDs.push_back(armnn::BackendId(b));
+    }
+
+    RemoveDuplicateDevices(backendIDs);
+
+    return backendIDs;
+}
+
+/// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
+/// If the option wasn't defined it returns an empty object.
+template<typename optionType>
+optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
+{
+    optionType out;
+    if(result.count(optionName))
+    {
+        out = result[optionName].as<optionType>();
+    }
+    return out;
+}
+
+void LogAndThrowFatal(std::string errorMessage)
+{
+    throw armnn::InvalidArgumentException (errorMessage);
+}
+
+void CheckRequiredOptions(const cxxopts::ParseResult& result)
+{
+
+    // For each option in option-group "a) Required
+    std::vector<std::string> requiredOptions{"compute",
+                                             "model-format",
+                                             "model-path",
+                                             "input-name",
+                                             "output-name"};
+
+    bool requiredMissing = false;
+    for(auto const&  str : requiredOptions)
+    {
+        if(!(result.count(str) > 0))
+        {
+            ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
+            requiredMissing = true;
+        }
+    }
+    if(requiredMissing)
+    {
+        throw armnn::InvalidArgumentException ("Some required arguments are missing");
+    }
+}
+
+void ProgramOptions::ValidateExecuteNetworkParams()
+{
+    m_ExNetParams.ValidateParams();
+}
+
+void ProgramOptions::ValidateRuntimeOptions()
+{
+    if (m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled &&
+        !m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
+    {
+        LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
+    }
+}
+
+
+ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
+                                                "Executes a neural network model using the provided input "
+                                                "tensor. Prints the resulting output tensor."}
+{
+    try
+    {
+        // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
+        // separate function CheckRequiredOptions() for that.
+        m_CxxOptions.add_options("a) Required")
+                ("c,compute",
+                 "Which device to run layers on by default. Possible choices: "
+                 + armnn::BackendRegistryInstance().GetBackendIdsAsString()
+                 + " NOTE: Compute devices need to be passed as a comma separated list without whitespaces "
+                   "e.g. CpuRef,CpuAcc",
+                 cxxopts::value<std::string>())
+
+                ("f,model-format",
+                 "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
+                 "tensorflow-text.",
+                 cxxopts::value<std::string>())
+
+                ("m,model-path",
+                 "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
+                 cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
+
+                ("i,input-name",
+                 "Identifier of the input tensors in the network separated by comma.",
+                 cxxopts::value<std::string>())
+
+                ("o,output-name",
+                 "Identifier of the output tensors in the network separated by comma.",
+                 cxxopts::value<std::string>());
+
+        m_CxxOptions.add_options("b) General")
+                ("b,dynamic-backends-path",
+                 "Path where to load any available dynamic backend from. "
+                 "If left empty (the default), dynamic backends will not be used.",
+                 cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
+
+                ("d,input-tensor-data",
+                 "Path to files containing the input data as a flat array separated by whitespace. "
+                 "Several paths can be passed by separating them with a comma. If not specified, the network will be "
+                 "run with dummy data (useful for profiling).",
+                 cxxopts::value<std::string>()->default_value(""))
+
+                ("h,help", "Display usage information")
+
+                ("infer-output-shape",
+                 "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
+                 "parser)",
+                 cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
+
+                ("iterations",
+                 "Number of iterations to run the network for, default is set to 1",
+                 cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
+
+                ("l,dequantize-output",
+                 "If this option is enabled, all quantized outputs will be dequantized to float. "
+                 "If unset, default to not get dequantized. "
+                 "Accepted values (true or false)",
+                 cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
+
+                ("p,print-intermediate-layers",
+                 "If this option is enabled, the output of every graph layer will be printed.",
+                 cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
+                 ->implicit_value("true"))
+
+                ("parse-unsupported",
+                 "Add unsupported operators as stand-in layers (where supported by parser)",
+                 cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
+
+                ("q,quantize-input",
+                 "If this option is enabled, all float inputs will be quantized to qasymm8. "
+                 "If unset, default to not quantized. Accepted values (true or false)",
+                 cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
+
+                ("r,threshold-time",
+                 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
+                 "inference time is greater than the threshold time, the test will fail. By default, no threshold "
+                 "time is used.",
+                 cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
+
+                ("s,input-tensor-shape",
+                 "The shape of the input tensors in the network as a flat array of integers separated by comma."
+                 "Several shapes can be passed by separating them with a colon (:).",
+                 cxxopts::value<std::string>())
+
+                ("v,visualize-optimized-model",
+                 "Enables built optimized model visualizer. If unset, defaults to off.",
+                 cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
+                 ->implicit_value("true"))
+
+                ("w,write-outputs-to-file",
+                 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
+                 "If left empty (the default), the output tensors will not be written to a file.",
+                 cxxopts::value<std::string>())
+
+                ("x,subgraph-number",
+                 "Id of the subgraph to be executed. Defaults to 0.",
+                 cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
+
+                ("y,input-type",
+                 "The type of the input tensors in the network separated by comma. "
+                 "If unset, defaults to \"float\" for all defined inputs. "
+                 "Accepted values (float, int or qasymm8).",
+                 cxxopts::value<std::string>())
+
+                ("z,output-type",
+                 "The type of the output tensors in the network separated by comma. "
+                 "If unset, defaults to \"float\" for all defined outputs. "
+                 "Accepted values (float, int or qasymm8).",
+                 cxxopts::value<std::string>());
+
+        m_CxxOptions.add_options("c) Optimization")
+                ("bf16-turbo-mode",
+                 "If this option is enabled, FP32 layers, "
+                 "weights and biases will be converted to BFloat16 where the backend supports it",
+                 cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
+                 ->default_value("false")->implicit_value("true"))
+
+                ("enable-fast-math",
+                 "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
+                 "performance improvements but may result in reduced or different precision.",
+                 cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
+
+                ("fp16-turbo-mode",
+                 "If this option is enabled, FP32 layers, "
+                 "weights and biases will be converted to FP16 where the backend supports it",
+                 cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
+                 ->default_value("false")->implicit_value("true"))
+
+                ("tuning-level",
+                 "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
+                 "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
+                 "Requires tuning-path to be set, default is set to 0 (No tuning run)",
+                 cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
+
+                ("tuning-path",
+                 "Path to tuning file. Enables use of CL tuning",
+                 cxxopts::value<std::string>(m_ExNetParams.m_TuningPath));
+
+        m_CxxOptions.add_options("d) Profiling")
+                ("a,enable-external-profiling",
+                 "If enabled external profiling will be switched on",
+                 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
+                         ->default_value("false")->implicit_value("true"))
+
+                ("e,event-based-profiling",
+                 "Enables built in profiler. If unset, defaults to off.",
+                 cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
+
+                ("g,file-only-external-profiling",
+                 "If enabled then the 'file-only' test mode of external profiling will be enabled",
+                 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
+                 ->default_value("false")->implicit_value("true"))
+
+                ("file-format",
+                 "If profiling is enabled specifies the output file format",
+                 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
+
+                ("j,outgoing-capture-file",
+                 "If specified the outgoing external profiling packets will be captured in this binary file",
+                 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
+
+                ("k,incoming-capture-file",
+                 "If specified the incoming external profiling packets will be captured in this binary file",
+                 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
+
+                ("timeline-profiling",
+                 "If enabled timeline profiling will be switched on, requires external profiling",
+                 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled)
+                 ->default_value("false")->implicit_value("true"))
+
+                ("u,counter-capture-period",
+                 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
+                 cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"));
+    }
+    catch (const std::exception& e)
+    {
+        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
+        ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
+        exit(EXIT_FAILURE);
+    }
+}
+
+ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
+{
+    ParseOptions(ac, av);
+}
+
+void ProgramOptions::ParseOptions(int ac, const char* av[])
+{
+    // Parses the command-line.
+    m_CxxResult = m_CxxOptions.parse(ac, av);
+
+    if (m_CxxResult.count("help") || ac <= 1)
+    {
+        std::cout << m_CxxOptions.help() << std::endl;
+        exit(EXIT_SUCCESS);
+    }
+
+    CheckRequiredOptions(m_CxxResult);
+    CheckOptionDependencies(m_CxxResult);
+
+    // Some options can't be assigned directly because they need some post-processing:
+    auto computeDevices = GetOptionValue<std::string>("compute", m_CxxResult);
+    m_ExNetParams.m_ComputeDevices =
+            GetBackendIDs(ParseStringList(computeDevices, ","));
+    m_ExNetParams.m_ModelFormat =
+            armnn::stringUtils::StringTrimCopy(GetOptionValue<std::string>("model-format", m_CxxResult));
+    m_ExNetParams.m_InputNames =
+            ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
+    m_ExNetParams.m_InputTensorDataFilePaths =
+            ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
+    m_ExNetParams.m_OutputNames =
+            ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
+    m_ExNetParams.m_InputTypes =
+            ParseStringList(GetOptionValue<std::string>("input-type", m_CxxResult), ",");
+    m_ExNetParams.m_OutputTypes =
+            ParseStringList(GetOptionValue<std::string>("output-type", m_CxxResult), ",");
+    m_ExNetParams.m_OutputTensorFiles =
+            ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
+    m_ExNetParams.m_GenerateTensorData =
+            m_ExNetParams.m_InputTensorDataFilePaths.empty();
+
+    // Parse input tensor shape from the string we got from the command-line.
+    std::vector<std::string> inputTensorShapesVector =
+            ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
+
+    if (!inputTensorShapesVector.empty())
+    {
+        m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
+
+        for(const std::string& shape : inputTensorShapesVector)
+        {
+            std::stringstream ss(shape);
+            std::vector<unsigned int> dims = ParseArray(ss);
+
+            m_ExNetParams.m_InputTensorShapes.push_back(
+                    std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
+        }
+    }
+
+    // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
+    ValidateExecuteNetworkParams();
+
+    // Parse CL tuning parameters to runtime options
+    if (!m_ExNetParams.m_TuningPath.empty())
+    {
+        m_RuntimeOptions.m_BackendOptions.emplace_back(
+            armnn::BackendOptions
+            {
+                "GpuAcc",
+                {
+                    {"TuningLevel", m_ExNetParams.m_TuningLevel},
+                    {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
+                    {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling}
+                }
+            }
+        );
+    }
+
+    ValidateRuntimeOptions();
+}
+
diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.hpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.hpp
new file mode 100644 (file)
index 0000000..0b43176
--- /dev/null
@@ -0,0 +1,46 @@
+//
+// Copyright Â© 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ExecuteNetworkParams.hpp"
+#include <armnn/IRuntime.hpp>
+
+/*
+ * Historically we use the ',' character to separate dimensions in a tensor shape. However, cxxopts will read this
+ * an an array of values which is fine until we have multiple tensors specified. This lumps the values of all shapes
+ * together in a single array and we cannot break it up again. We'll change the vector delimiter to a '.'. We do this
+ * as close as possible to the usage of cxxopts to avoid polluting other possible uses.
+ */
+#define CXXOPTS_VECTOR_DELIMITER '.'
+#include <cxxopts/cxxopts.hpp>
+
+/// Holds and parses program options for the ExecuteNetwork application
+struct ProgramOptions
+{
+    /// Initializes ProgramOptions by adding options to the underlying cxxopts::options object.
+    /// (Does not parse any options)
+    ProgramOptions();
+
+    /// Runs ParseOptions() on initialization
+    ProgramOptions(int ac, const char* av[]);
+
+    /// Parses program options from the command line or another source and stores
+    /// the values in member variables. It also checks the validity of the parsed parameters.
+    /// Throws a cxxopts exception if parsing fails or an armnn exception if parameters are not valid.
+    void ParseOptions(int ac, const char* av[]);
+
+    /// Ensures that the parameters for ExecuteNetwork fit together
+    void ValidateExecuteNetworkParams();
+
+    /// Ensures that the runtime options are valid
+    void ValidateRuntimeOptions();
+
+    cxxopts::Options m_CxxOptions;
+    cxxopts::ParseResult m_CxxResult;
+
+    ExecuteNetworkParams m_ExNetParams;
+    armnn::IRuntime::CreationOptions m_RuntimeOptions;
+};
\ No newline at end of file
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp
new file mode 100644 (file)
index 0000000..3e7c87d
--- /dev/null
@@ -0,0 +1,292 @@
+//
+// Copyright Â© 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NetworkExecutionUtils.hpp"
+
+#include <Filesystem.hpp>
+#include <InferenceTest.hpp>
+#include <ResolveType.hpp>
+
+#if defined(ARMNN_SERIALIZER)
+#include "armnnDeserializer/IDeserializer.hpp"
+#endif
+#if defined(ARMNN_CAFFE_PARSER)
+#include "armnnCaffeParser/ICaffeParser.hpp"
+#endif
+#if defined(ARMNN_TF_PARSER)
+#include "armnnTfParser/ITfParser.hpp"
+#endif
+#if defined(ARMNN_TF_LITE_PARSER)
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+#endif
+#if defined(ARMNN_ONNX_PARSER)
+#include "armnnOnnxParser/IOnnxParser.hpp"
+#endif
+
+
+template<typename T, typename TParseElementFunc>
+std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char* chars = "\t ,:")
+{
+    std::vector<T> result;
+    // Processes line-by-line.
+    std::string line;
+    while (std::getline(stream, line))
+    {
+        std::vector<std::string> tokens = armnn::stringUtils::StringTokenizer(line, chars);
+        for (const std::string& token : tokens)
+        {
+            if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
+            {
+                try
+                {
+                    result.push_back(parseElementFunc(token));
+                }
+                catch (const std::exception&)
+                {
+                    ARMNN_LOG(error) << "'" << token << "' is not a valid number. It has been ignored.";
+                }
+            }
+        }
+    }
+
+    return result;
+}
+
+
+template<armnn::DataType NonQuantizedType>
+auto ParseDataArray(std::istream& stream);
+
+template<armnn::DataType QuantizedType>
+auto ParseDataArray(std::istream& stream,
+                    const float& quantizationScale,
+                    const int32_t& quantizationOffset);
+
+template<>
+auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
+{
+    return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
+{
+    return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
+{
+    return ParseArrayImpl<uint8_t>(stream,
+                                   [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
+}
+
+template<>
+auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
+                                               const float& quantizationScale,
+                                               const int32_t& quantizationOffset)
+{
+    return ParseArrayImpl<uint8_t>(stream,
+                                   [&quantizationScale, &quantizationOffset](const std::string& s)
+                                   {
+                                       return armnn::numeric_cast<uint8_t>(
+                                               armnn::Quantize<uint8_t>(std::stof(s),
+                                                                        quantizationScale,
+                                                                        quantizationOffset));
+                                   });
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+std::vector<T> GenerateDummyTensorData(unsigned int numElements)
+{
+    return std::vector<T>(numElements, static_cast<T>(0));
+}
+
+
+std::vector<unsigned int> ParseArray(std::istream& stream)
+{
+    return ParseArrayImpl<unsigned int>(
+            stream,
+            [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
+}
+
+std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
+{
+    std::stringstream stream(inputString);
+    return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
+        return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
+}
+
+
+TensorPrinter::TensorPrinter(const std::string& binding,
+                             const armnn::TensorInfo& info,
+                             const std::string& outputTensorFile,
+                             bool dequantizeOutput)
+                             : m_OutputBinding(binding)
+                             , m_Scale(info.GetQuantizationScale())
+                             , m_Offset(info.GetQuantizationOffset())
+                             , m_OutputTensorFile(outputTensorFile)
+                             , m_DequantizeOutput(dequantizeOutput) {}
+
+void TensorPrinter::operator()(const std::vector<float>& values)
+{
+    ForEachValue(values, [](float value)
+    {
+        printf("%f ", value);
+    });
+    WriteToFile(values);
+}
+
+void TensorPrinter::operator()(const std::vector<uint8_t>& values)
+{
+    if(m_DequantizeOutput)
+    {
+        auto& scale = m_Scale;
+        auto& offset = m_Offset;
+        std::vector<float> dequantizedValues;
+        ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
+        {
+            auto dequantizedValue = armnn::Dequantize(value, scale, offset);
+            printf("%f ", dequantizedValue);
+            dequantizedValues.push_back(dequantizedValue);
+        });
+        WriteToFile(dequantizedValues);
+    }
+    else
+    {
+        const std::vector<int> intValues(values.begin(), values.end());
+        operator()(intValues);
+    }
+}
+
+void TensorPrinter::operator()(const std::vector<int>& values)
+{
+    ForEachValue(values, [](int value)
+    {
+        printf("%d ", value);
+    });
+    WriteToFile(values);
+}
+
+template<typename Container, typename Delegate>
+void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
+{
+    std::cout << m_OutputBinding << ": ";
+    for (const auto& value : c)
+    {
+        delegate(value);
+    }
+    printf("\n");
+}
+
+template<typename T>
+void TensorPrinter::WriteToFile(const std::vector<T>& values)
+{
+    if (!m_OutputTensorFile.empty())
+    {
+        std::ofstream outputTensorFile;
+        outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
+        if (outputTensorFile.is_open())
+        {
+            outputTensorFile << m_OutputBinding << ": ";
+            std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
+        }
+        else
+        {
+            ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
+        }
+        outputTensorFile.close();
+    }
+}
+
+using TContainer         = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
+using QuantizationParams = std::pair<float, int32_t>;
+
+void PopulateTensorWithData(TContainer& tensorData,
+                            unsigned int numElements,
+                            const std::string& dataTypeStr,
+                            const armnn::Optional<QuantizationParams>& qParams,
+                            const armnn::Optional<std::string>& dataFile)
+{
+    const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
+    const bool quantizeData = qParams.has_value();
+
+    std::ifstream inputTensorFile;
+    if (readFromFile)
+    {
+        inputTensorFile = std::ifstream(dataFile.value());
+    }
+
+    if (dataTypeStr.compare("float") == 0)
+    {
+        if (quantizeData)
+        {
+            const float qScale  = qParams.value().first;
+            const int   qOffset = qParams.value().second;
+
+            tensorData = readFromFile ?
+                         ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
+                         GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
+        }
+        else
+        {
+            tensorData = readFromFile ?
+                         ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
+                         GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
+        }
+    }
+    else if (dataTypeStr.compare("int") == 0)
+    {
+        tensorData = readFromFile ?
+                     ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
+                     GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
+    }
+    else if (dataTypeStr.compare("qasymm8") == 0)
+    {
+        tensorData = readFromFile ?
+                     ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
+                     GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
+    }
+    else
+    {
+        std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
+        ARMNN_LOG(fatal) << errorMessage;
+
+        inputTensorFile.close();
+        throw armnn::Exception(errorMessage);
+    }
+
+    inputTensorFile.close();
+}
+
+bool ValidatePath(const std::string& file, const bool expectFile)
+{
+    if (!fs::exists(file))
+    {
+        std::cerr << "Given file path '" << file << "' does not exist" << std::endl;
+        return false;
+    }
+    if (!fs::is_regular_file(file) && expectFile)
+    {
+        std::cerr << "Given file path '" << file << "' is not a regular file" << std::endl;
+        return false;
+    }
+    return true;
+}
+
+bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
+{
+    bool allPathsValid = true;
+    for (auto const& file : fileVec)
+    {
+        if(!ValidatePath(file, expectFile))
+        {
+            allPathsValid = false;
+        }
+    }
+    return allPathsValid;
+}
+
+
+
index f79d630..d101d4a 100644 (file)
 // Copyright Â© 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <armnn/ArmNN.hpp>
-#include <armnn/TypesUtils.hpp>
-#include <armnn/utility/NumericCast.hpp>
-#include <armnn/utility/Timer.hpp>
 
-#if defined(ARMNN_SERIALIZER)
-#include "armnnDeserializer/IDeserializer.hpp"
-#endif
-#if defined(ARMNN_CAFFE_PARSER)
-#include "armnnCaffeParser/ICaffeParser.hpp"
-#endif
-#if defined(ARMNN_TF_PARSER)
-#include "armnnTfParser/ITfParser.hpp"
-#endif
-#if defined(ARMNN_TF_LITE_PARSER)
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
-#endif
-#if defined(ARMNN_ONNX_PARSER)
-#include "armnnOnnxParser/IOnnxParser.hpp"
-#endif
-#include "CsvReader.hpp"
-#include "../InferenceTest.hpp"
+#pragma once
 
-#include <Profiling.hpp>
-#include <ResolveType.hpp>
+#include "CsvReader.hpp"
+#include <armnn/IRuntime.hpp>
+#include <armnn/Types.hpp>
 
-#include <boost/program_options.hpp>
 #include <mapbox/variant.hpp>
 
 #include <iostream>
-#include <fstream>
-#include <functional>
-#include <future>
-#include <algorithm>
-#include <iterator>
-
-namespace
-{
-
-// Configure boost::program_options for command-line parsing and validation.
-namespace po = boost::program_options;
-
-template<typename T, typename TParseElementFunc>
-std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
-{
-    std::vector<T> result;
-    // Processes line-by-line.
-    std::string line;
-    while (std::getline(stream, line))
-    {
-        std::vector<std::string> tokens = armnn::stringUtils::StringTokenizer(line, chars);
-        for (const std::string& token : tokens)
-        {
-            if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
-            {
-                try
-                {
-                    result.push_back(parseElementFunc(token));
-                }
-                catch (const std::exception&)
-                {
-                    ARMNN_LOG(error) << "'" << token << "' is not a valid number. It has been ignored.";
-                }
-            }
-        }
-    }
 
-    return result;
-}
 
-bool CheckOption(const po::variables_map& vm,
-                 const char* option)
-{
-    // Check that the given option is valid.
-    if (option == nullptr)
-    {
-        return false;
-    }
-
-    // Check whether 'option' is provided.
-    return vm.find(option) != vm.end();
-}
-
-void CheckOptionDependency(const po::variables_map& vm,
-                           const char* option,
-                           const char* required)
-{
-    // Check that the given options are valid.
-    if (option == nullptr || required == nullptr)
-    {
-        throw po::error("Invalid option to check dependency for");
-    }
-
-    // Check that if 'option' is provided, 'required' is also provided.
-    if (CheckOption(vm, option) && !vm[option].defaulted())
-    {
-        if (CheckOption(vm, required) == 0 || vm[required].defaulted())
-        {
-            throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
-        }
-    }
-}
-
-void CheckOptionDependencies(const po::variables_map& vm)
-{
-    CheckOptionDependency(vm, "model-path", "model-format");
-    CheckOptionDependency(vm, "model-path", "input-name");
-    CheckOptionDependency(vm, "model-path", "output-name");
-    CheckOptionDependency(vm, "input-tensor-shape", "model-path");
-}
+std::vector<unsigned int> ParseArray(std::istream& stream);
 
-template<armnn::DataType NonQuantizedType>
-auto ParseDataArray(std::istream & stream);
-
-template<armnn::DataType QuantizedType>
-auto ParseDataArray(std::istream& stream,
-                    const float& quantizationScale,
-                    const int32_t& quantizationOffset);
-
-template<>
-auto ParseDataArray<armnn::DataType::Float32>(std::istream & stream)
-{
-    return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
-}
-
-template<>
-auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
-{
-    return ParseArrayImpl<int>(stream, [](const std::string & s) { return std::stoi(s); });
-}
-
-template<>
-auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
-{
-    return ParseArrayImpl<uint8_t>(stream,
-                                   [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
-}
-
-template<>
-auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
-                                                      const float& quantizationScale,
-                                                      const int32_t& quantizationOffset)
-{
-    return ParseArrayImpl<uint8_t>(stream,
-                                   [&quantizationScale, &quantizationOffset](const std::string & s)
-                                   {
-                                       return armnn::numeric_cast<uint8_t>(
-                                           armnn::Quantize<uint8_t>(std::stof(s),
-                                                                     quantizationScale,
-                                                                     quantizationOffset));
-                                   });
-}
-std::vector<unsigned int> ParseArray(std::istream& stream)
-{
-    return ParseArrayImpl<unsigned int>(stream,
-        [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
-}
-
-std::vector<std::string> ParseStringList(const std::string & inputString, const char * delimiter)
-{
-    std::stringstream stream(inputString);
-    return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
-           return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
-}
-
-void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
-{
-    // Mark the duplicate devices as 'Undefined'.
-    for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
-    {
-        for (auto j = std::next(i); j != computeDevices.end(); ++j)
-        {
-            if (*j == *i)
-            {
-                *j = armnn::Compute::Undefined;
-            }
-        }
-    }
-
-    // Remove 'Undefined' devices.
-    computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
-                         computeDevices.end());
-}
+/// Splits a given string at every accurance of delimiter into a vector of string
+std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter);
 
 struct TensorPrinter
 {
     TensorPrinter(const std::string& binding,
                   const armnn::TensorInfo& info,
                   const std::string& outputTensorFile,
-                  bool dequantizeOutput)
-        : m_OutputBinding(binding)
-        , m_Scale(info.GetQuantizationScale())
-        , m_Offset(info.GetQuantizationOffset())
-        , m_OutputTensorFile(outputTensorFile)
-        , m_DequantizeOutput(dequantizeOutput)
-    {}
+                  bool dequantizeOutput);
 
-    void operator()(const std::vector<float>& values)
-    {
-        ForEachValue(values, [](float value)
-            {
-                printf("%f ", value);
-            });
-        WriteToFile(values);
-    }
+    void operator()(const std::vector<float>& values);
 
-    void operator()(const std::vector<uint8_t>& values)
-    {
-        if(m_DequantizeOutput)
-        {
-            auto& scale = m_Scale;
-            auto& offset = m_Offset;
-            std::vector<float> dequantizedValues;
-            ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
-            {
-                auto dequantizedValue = armnn::Dequantize(value, scale, offset);
-                printf("%f ", dequantizedValue);
-                dequantizedValues.push_back(dequantizedValue);
-            });
-            WriteToFile(dequantizedValues);
-        }
-        else
-        {
-            const std::vector<int> intValues(values.begin(), values.end());
-            operator()(intValues);
-        }
-    }
+    void operator()(const std::vector<uint8_t>& values);
 
-    void operator()(const std::vector<int>& values)
-    {
-        ForEachValue(values, [](int value)
-            {
-                printf("%d ", value);
-            });
-        WriteToFile(values);
-    }
+    void operator()(const std::vector<int>& values);
 
 private:
     template<typename Container, typename Delegate>
-    void ForEachValue(const Container& c, Delegate delegate)
-    {
-        std::cout << m_OutputBinding << ": ";
-        for (const auto& value : c)
-        {
-            delegate(value);
-        }
-        printf("\n");
-    }
+    void ForEachValue(const Container& c, Delegate delegate);
 
     template<typename T>
-    void WriteToFile(const std::vector<T>& values)
-    {
-        if (!m_OutputTensorFile.empty())
-        {
-            std::ofstream outputTensorFile;
-            outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
-            if (outputTensorFile.is_open())
-            {
-                outputTensorFile << m_OutputBinding << ": ";
-                std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
-            }
-            else
-            {
-                ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
-            }
-            outputTensorFile.close();
-        }
-    }
+    void WriteToFile(const std::vector<T>& values);
 
     std::string m_OutputBinding;
-    float m_Scale=0.0f;
-    int m_Offset=0;
+    float m_Scale;
+    int m_Offset;
     std::string m_OutputTensorFile;
-    bool m_DequantizeOutput = false;
+    bool m_DequantizeOutput;
 };
 
-
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-std::vector<T> GenerateDummyTensorData(unsigned int numElements)
-{
-    return std::vector<T>(numElements, static_cast<T>(0));
-}
-
 using TContainer         = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
 using QuantizationParams = std::pair<float, int32_t>;
 
@@ -293,648 +53,20 @@ void PopulateTensorWithData(TContainer& tensorData,
                             unsigned int numElements,
                             const std::string& dataTypeStr,
                             const armnn::Optional<QuantizationParams>& qParams,
-                            const armnn::Optional<std::string>& dataFile)
-{
-    const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
-    const bool quantizeData = qParams.has_value();
-
-    std::ifstream inputTensorFile;
-    if (readFromFile)
-    {
-        inputTensorFile = std::ifstream(dataFile.value());
-    }
-
-    if (dataTypeStr.compare("float") == 0)
-    {
-        if (quantizeData)
-        {
-            const float qScale  = qParams.value().first;
-            const int   qOffset = qParams.value().second;
-
-            tensorData = readFromFile ?
-                ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
-                GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
-        }
-        else
-        {
-            tensorData = readFromFile ?
-                ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
-                GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
-        }
-    }
-    else if (dataTypeStr.compare("int") == 0)
-    {
-        tensorData = readFromFile ?
-            ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
-            GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
-    }
-    else if (dataTypeStr.compare("qasymm8") == 0)
-    {
-         tensorData = readFromFile ?
-            ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
-            GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
-    }
-    else
-    {
-        std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
-        ARMNN_LOG(fatal) << errorMessage;
-
-        inputTensorFile.close();
-        throw armnn::Exception(errorMessage);
-    }
-
-    inputTensorFile.close();
-}
-
-} // anonymous namespace
-
-bool generateTensorData = true;
-
-struct ExecuteNetworkParams
-{
-    using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
-
-    const char*                   m_ModelPath;
-    bool                          m_IsModelBinary;
-    std::vector<armnn::BackendId> m_ComputeDevices;
-    std::string                   m_DynamicBackendsPath;
-    std::vector<string>           m_InputNames;
-    std::vector<TensorShapePtr>   m_InputTensorShapes;
-    std::vector<string>           m_InputTensorDataFilePaths;
-    std::vector<string>           m_InputTypes;
-    bool                          m_QuantizeInput;
-    std::vector<string>           m_OutputTypes;
-    std::vector<string>           m_OutputNames;
-    std::vector<string>           m_OutputTensorFiles;
-    bool                          m_DequantizeOutput;
-    bool                          m_EnableProfiling;
-    bool                          m_EnableFp16TurboMode;
-    bool                          m_EnableBf16TurboMode;
-    double                        m_ThresholdTime;
-    bool                          m_PrintIntermediate;
-    size_t                        m_SubgraphId;
-    bool                          m_EnableLayerDetails = false;
-    bool                          m_GenerateTensorData;
-    bool                          m_ParseUnsupported = false;
-    bool                          m_InferOutputShape = false;
-    bool                          m_EnableFastMath   = false;
-};
-
-template<typename TParser, typename TDataType>
-int MainImpl(const ExecuteNetworkParams& params,
-             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr,
-             size_t iterations = 1)
-{
-    using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
-
-    std::vector<TContainer> inputDataContainers;
-
-    try
-    {
-        // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
-        typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
-        inferenceModelParams.m_ModelPath                      = params.m_ModelPath;
-        inferenceModelParams.m_IsModelBinary                  = params.m_IsModelBinary;
-        inferenceModelParams.m_ComputeDevices                 = params.m_ComputeDevices;
-        inferenceModelParams.m_DynamicBackendsPath            = params.m_DynamicBackendsPath;
-        inferenceModelParams.m_PrintIntermediateLayers        = params.m_PrintIntermediate;
-        inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
-        inferenceModelParams.m_ParseUnsupported               = params.m_ParseUnsupported;
-        inferenceModelParams.m_InferOutputShape               = params.m_InferOutputShape;
-        inferenceModelParams.m_EnableFastMath                 = params.m_EnableFastMath;
-
-        for(const std::string& inputName: params.m_InputNames)
-        {
-            inferenceModelParams.m_InputBindings.push_back(inputName);
-        }
-
-        for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
-        {
-            inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
-        }
-
-        for(const std::string& outputName: params.m_OutputNames)
-        {
-            inferenceModelParams.m_OutputBindings.push_back(outputName);
-        }
-
-        inferenceModelParams.m_SubgraphId          = params.m_SubgraphId;
-        inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
-        inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
-
-        InferenceModel<TParser, TDataType> model(inferenceModelParams,
-                                                 params.m_EnableProfiling,
-                                                 params.m_DynamicBackendsPath,
-                                                 runtime);
-
-        const size_t numInputs = inferenceModelParams.m_InputBindings.size();
-        for(unsigned int i = 0; i < numInputs; ++i)
-        {
-            armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
-                armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
-                armnn::EmptyOptional();
-
-            armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
-                armnn::EmptyOptional() :
-                armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
-
-            unsigned int numElements = model.GetInputSize(i);
-            if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
-            {
-                // If the user has provided a tensor shape for the current input,
-                // override numElements
-                numElements = params.m_InputTensorShapes[i]->GetNumElements();
-            }
-
-            TContainer tensorData;
-            PopulateTensorWithData(tensorData,
-                                   numElements,
-                                   params.m_InputTypes[i],
-                                   qParams,
-                                   dataFile);
-
-            inputDataContainers.push_back(tensorData);
-        }
-
-        const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
-        std::vector<TContainer> outputDataContainers;
-
-        for (unsigned int i = 0; i < numOutputs; ++i)
-        {
-            if (params.m_OutputTypes[i].compare("float") == 0)
-            {
-                outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
-            }
-            else if (params.m_OutputTypes[i].compare("int") == 0)
-            {
-                outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
-            }
-            else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
-            {
-                outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
-            }
-            else
-            {
-                ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
-                return EXIT_FAILURE;
-            }
-        }
-
-        for (size_t x = 0; x < iterations; x++)
-        {
-            // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
-            auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
-
-            if (params.m_GenerateTensorData)
-            {
-                ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
-            }
-
-            // Print output tensors
-            const auto& infosOut = model.GetOutputBindingInfos();
-            for (size_t i = 0; i < numOutputs; i++)
-            {
-                const armnn::TensorInfo& infoOut = infosOut[i].second;
-                auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
-
-                TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
-                                    infoOut,
-                                    outputTensorFile,
-                                    params.m_DequantizeOutput);
-                mapbox::util::apply_visitor(printer, outputDataContainers[i]);
-            }
-
-            ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
-                                    << std::fixed << inference_duration.count() << " ms\n";
-
-            // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
-            if (params.m_ThresholdTime != 0.0)
-            {
-                ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
-                                        << std::fixed << params.m_ThresholdTime << " ms";
-                auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
-                ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
-                                        << std::fixed << thresholdMinusInference << " ms" << "\n";
-
-                if (thresholdMinusInference < 0)
-                {
-                    std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
-                    ARMNN_LOG(fatal) << errorMessage;
-                }
-            }
-        }
-    }
-    catch (const armnn::Exception& e)
-    {
-        ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
-        return EXIT_FAILURE;
-    }
-
-    return EXIT_SUCCESS;
-}
-
-// This will run a test
-int RunTest(const std::string& format,
-            const std::string& inputTensorShapesStr,
-            const vector<armnn::BackendId>& computeDevices,
-            const std::string& dynamicBackendsPath,
-            const std::string& path,
-            const std::string& inputNames,
-            const std::string& inputTensorDataFilePaths,
-            const std::string& inputTypes,
-            bool quantizeInput,
-            const std::string& outputTypes,
-            const std::string& outputNames,
-            const std::string& outputTensorFiles,
-            bool dequantizeOuput,
-            bool enableProfiling,
-            bool enableFp16TurboMode,
-            bool enableBf16TurboMode,
-            const double& thresholdTime,
-            bool printIntermediate,
-            const size_t subgraphId,
-            bool enableLayerDetails = false,
-            bool parseUnsupported = false,
-            bool inferOutputShape = false,
-            bool enableFastMath   = false,
-            const size_t iterations = 1,
-            const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
-{
-    std::string modelFormat = armnn::stringUtils::StringTrimCopy(format);
-    std::string modelPath = armnn::stringUtils::StringTrimCopy(path);
-    std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
-    std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
-    std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
-        inputTensorDataFilePaths, ",");
-    std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
-    std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
-    std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
-    std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
-
-    // Parse model binary flag from the model-format string we got from the command-line
-    bool isModelBinary;
-    if (modelFormat.find("bin") != std::string::npos)
-    {
-        isModelBinary = true;
-    }
-    else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
-    {
-        isModelBinary = false;
-    }
-    else
-    {
-        ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
-        return EXIT_FAILURE;
-    }
-
-    if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
-    {
-        ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
-        return EXIT_FAILURE;
-    }
-
-    if ((inputTensorDataFilePathsVector.size() != 0) &&
-        (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
-    {
-        ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
-        return EXIT_FAILURE;
-    }
-
-    if ((outputTensorFilesVector.size() != 0) &&
-        (outputTensorFilesVector.size() != outputNamesVector.size()))
-    {
-        ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
-        return EXIT_FAILURE;
-    }
-
-    if (inputTypesVector.size() == 0)
-    {
-        //Defaults the value of all inputs to "float"
-        inputTypesVector.assign(inputNamesVector.size(), "float");
-    }
-    else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
-    {
-        ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
-        return EXIT_FAILURE;
-    }
-
-    if (outputTypesVector.size() == 0)
-    {
-        //Defaults the value of all outputs to "float"
-        outputTypesVector.assign(outputNamesVector.size(), "float");
-    }
-    else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
-    {
-        ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
-        return EXIT_FAILURE;
-    }
-
-    // Parse input tensor shape from the string we got from the command-line.
-    std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
-
-    if (!inputTensorShapesVector.empty())
-    {
-        inputTensorShapes.reserve(inputTensorShapesVector.size());
-
-        for(const std::string& shape : inputTensorShapesVector)
-        {
-            std::stringstream ss(shape);
-            std::vector<unsigned int> dims = ParseArray(ss);
-
-            try
-            {
-                // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
-                inputTensorShapes.push_back(
-                    std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
-            }
-            catch (const armnn::InvalidArgumentException& e)
-            {
-                ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
-                return EXIT_FAILURE;
-            }
-        }
-    }
-
-    // Check that threshold time is not less than zero
-    if (thresholdTime < 0)
-    {
-        ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
-        return EXIT_FAILURE;
-    }
-
-    ExecuteNetworkParams params;
-    params.m_ModelPath                = modelPath.c_str();
-    params.m_IsModelBinary            = isModelBinary;
-    params.m_ComputeDevices           = computeDevices;
-    params.m_DynamicBackendsPath      = dynamicBackendsPath;
-    params.m_InputNames               = inputNamesVector;
-    params.m_InputTensorShapes        = std::move(inputTensorShapes);
-    params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
-    params.m_InputTypes               = inputTypesVector;
-    params.m_QuantizeInput            = quantizeInput;
-    params.m_OutputTypes              = outputTypesVector;
-    params.m_OutputNames              = outputNamesVector;
-    params.m_OutputTensorFiles        = outputTensorFilesVector;
-    params.m_DequantizeOutput         = dequantizeOuput;
-    params.m_EnableProfiling          = enableProfiling;
-    params.m_EnableFp16TurboMode      = enableFp16TurboMode;
-    params.m_EnableBf16TurboMode      = enableBf16TurboMode;
-    params.m_ThresholdTime            = thresholdTime;
-    params.m_PrintIntermediate        = printIntermediate;
-    params.m_SubgraphId               = subgraphId;
-    params.m_EnableLayerDetails       = enableLayerDetails;
-    params.m_GenerateTensorData       = inputTensorDataFilePathsVector.empty();
-    params.m_ParseUnsupported         = parseUnsupported;
-    params.m_InferOutputShape         = inferOutputShape;
-    params.m_EnableFastMath           = enableFastMath;
-
-    // Warn if ExecuteNetwork will generate dummy input data
-    if (params.m_GenerateTensorData)
-    {
-        ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
-    }
-
-    // Forward to implementation based on the parser type
-    if (modelFormat.find("armnn") != std::string::npos)
-    {
-#if defined(ARMNN_SERIALIZER)
-        return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime, iterations);
-#else
-        ARMNN_LOG(fatal) << "Not built with serialization support.";
-        return EXIT_FAILURE;
-#endif
-    }
-    else if (modelFormat.find("caffe") != std::string::npos)
-    {
-#if defined(ARMNN_CAFFE_PARSER)
-        return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime, iterations);
-#else
-        ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
-        return EXIT_FAILURE;
-#endif
-    }
-    else if (modelFormat.find("onnx") != std::string::npos)
-    {
-#if defined(ARMNN_ONNX_PARSER)
-        return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime, iterations);
-#else
-        ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
-        return EXIT_FAILURE;
-#endif
-    }
-    else if (modelFormat.find("tensorflow") != std::string::npos)
-    {
-#if defined(ARMNN_TF_PARSER)
-        return MainImpl<armnnTfParser::ITfParser, float>(params, runtime, iterations);
-#else
-        ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
-        return EXIT_FAILURE;
-#endif
-    }
-    else if(modelFormat.find("tflite") != std::string::npos)
-    {
-#if defined(ARMNN_TF_LITE_PARSER)
-        if (! isModelBinary)
-        {
-            ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
-                << "'. Only 'binary' format supported for tflite files";
-            return EXIT_FAILURE;
-        }
-        return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime, iterations);
-#else
-        ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
-            << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
-        return EXIT_FAILURE;
-#endif
-    }
-    else
-    {
-        ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
-            << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
-        return EXIT_FAILURE;
-    }
-}
-
-int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
-               const bool enableProfiling, const bool enableFp16TurboMode, const bool enableBf16TurboMode,
-               const double& thresholdTime, const bool printIntermediate, bool enableLayerDetails = false,
-               bool parseUnuspported = false, bool inferOutputShape = false, bool enableFastMath = false)
-{
-    IgnoreUnused(runtime);
-    std::string modelFormat;
-    std::string modelPath;
-    std::string inputNames;
-    std::string inputTensorShapes;
-    std::string inputTensorDataFilePaths;
-    std::string outputNames;
-    std::string inputTypes;
-    std::string outputTypes;
-    std::string dynamicBackendsPath;
-    std::string outputTensorFiles;
-
-    size_t subgraphId = 0;
-
-    const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
-                                      + std::string("Possible choices: ")
-                                      + armnn::BackendRegistryInstance().GetBackendIdsAsString();
-
-    po::options_description desc("Options");
-    try
-    {
-        desc.add_options()
-        ("model-format,f", po::value(&modelFormat),
-         "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
-         "tensorflow-text.")
-        ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
-         ".tflite, .onnx")
-        ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
-         backendsMessage.c_str())
-        ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
-         "Path where to load any available dynamic backend from. "
-         "If left empty (the default), dynamic backends will not be used.")
-        ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
-        ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
-         "executed. Defaults to 0.")
-        ("input-tensor-shape,s", po::value(&inputTensorShapes),
-         "The shape of the input tensors in the network as a flat array of integers separated by comma. "
-         "Several shapes can be passed separating them by semicolon. "
-         "This parameter is optional, depending on the network.")
-        ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
-         "Path to files containing the input data as a flat array separated by whitespace. "
-         "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
-         "data (useful for profiling).")
-        ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
-         "If unset, defaults to \"float\" for all defined inputs. "
-         "Accepted values (float, int or qasymm8).")
-        ("quantize-input,q",po::bool_switch()->default_value(false),
-         "If this option is enabled, all float inputs will be quantized to qasymm8. "
-         "If unset, default to not quantized. "
-         "Accepted values (true or false)")
-        ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
-         "If unset, defaults to \"float\" for all defined outputs. "
-         "Accepted values (float, int or qasymm8).")
-        ("output-name,o", po::value(&outputNames),
-         "Identifier of the output tensors in the network separated by comma.")
-        ("dequantize-output,l",po::bool_switch()->default_value(false),
-         "If this option is enabled, all quantized outputs will be dequantized to float. "
-         "If unset, default to not get dequantized. "
-         "Accepted values (true or false)")
-        ("write-outputs-to-file,w", po::value(&outputTensorFiles),
-         "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
-         "If left empty (the default), the output tensors will not be written to a file.");
-    }
-    catch (const std::exception& e)
-    {
-        // Coverity points out that default_value(...) can throw a bad_lexical_cast,
-        // and that desc.add_options() can throw boost::io::too_few_args.
-        // They really won't in any of these cases.
-        ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
-        ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
-        return EXIT_FAILURE;
-    }
-
-    std::vector<const char*> clOptions;
-    clOptions.reserve(csvRow.values.size());
-    for (const std::string& value : csvRow.values)
-    {
-        clOptions.push_back(value.c_str());
-    }
-
-    po::variables_map vm;
-    try
-    {
-        po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
-
-        po::notify(vm);
-
-        CheckOptionDependencies(vm);
-    }
-    catch (const po::error& e)
-    {
-        std::cerr << e.what() << std::endl << std::endl;
-        std::cerr << desc << std::endl;
-        return EXIT_FAILURE;
-    }
-
-    // Get the value of the switch arguments.
-    bool quantizeInput = vm["quantize-input"].as<bool>();
-    bool dequantizeOutput = vm["dequantize-output"].as<bool>();
-
-    // Get the preferred order of compute devices.
-    std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
-
-    // Remove duplicates from the list of compute devices.
-    RemoveDuplicateDevices(computeDevices);
-
-    // Check that the specified compute devices are valid.
-    std::string invalidBackends;
-    if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
-    {
-        ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
-                                 << invalidBackends;
-        return EXIT_FAILURE;
-    }
-
-    return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
-                   inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
-                   dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-                   thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported,
-                   inferOutputShape, enableFastMath);
-}
-
-#if defined(ARMCOMPUTECL_ENABLED)
-int RunCLTuning(const std::string& tuningPath,
-            const int tuningLevel,
-            const std::string& modelFormat,
-            const std::string& inputTensorShapes,
-            const vector<armnn::BackendId>& computeDevices,
-            const std::string& dynamicBackendsPath,
-            const std::string& modelPath,
-            const std::string& inputNames,
-            const std::string& inputTensorDataFilePaths,
-            const std::string& inputTypes,
-            bool quantizeInput,
-            const std::string& outputTypes,
-            const std::string& outputNames,
-            const std::string& outputTensorFiles,
-            bool dequantizeOutput,
-            bool enableProfiling,
-            bool enableFp16TurboMode,
-            bool enableBf16TurboMode,
-            const double& thresholdTime,
-            bool printIntermediate,
-            const size_t subgraphId,
-            bool enableLayerDetails = false,
-            bool parseUnsupported = false,
-            bool inferOutputShape = false,
-            bool enableFastMath = false)
-{
-    armnn::IRuntime::CreationOptions options;
-    options.m_BackendOptions.emplace_back(
-        armnn::BackendOptions
-        {
-            "GpuAcc",
-            {
-                {"TuningLevel", tuningLevel},
-                {"TuningFile", tuningPath.c_str()},
-                {"KernelProfilingEnabled", enableProfiling}
-            }
-        }
-    );
-
-    std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
-    const auto start_time = armnn::GetTimeNow();
-
-    ARMNN_LOG(info) << "Tuning run...\n";
-    int state = RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
-                        inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
-                        outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
-                        thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported,
-                        inferOutputShape, enableFastMath, 1, runtime);
-
-    ARMNN_LOG(info) << "Tuning time: " << std::setprecision(2)
-                    << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
-
-    return state;
-}
-#endif
\ No newline at end of file
+                            const armnn::Optional<std::string>& dataFile);
+
+/**
+ * Verifies if the given string is a valid path. Reports invalid paths to std::err.
+ * @param file string - A string containing the path to check
+ * @param expectFile bool - If true, checks for a regular file.
+ * @return bool - True if given string is a valid path., false otherwise.
+ * */
+bool ValidatePath(const std::string& file, const bool expectFile);
+
+/**
+ * Verifies if a given vector of strings are valid paths. Reports invalid paths to std::err.
+ * @param fileVec vector of string - A vector of string containing the paths to check
+ * @param expectFile bool - If true, checks for a regular file.
+ * @return bool - True if all given strings are valid paths., false otherwise.
+ * */
+bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile);
\ No newline at end of file