IVGCVSW-3547 Use ExecuteNetwork to run a dynamic backend end to end test
authorMatteo Martincigh <matteo.martincigh@arm.com>
Wed, 14 Aug 2019 10:42:30 +0000 (11:42 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Fri, 23 Aug 2019 12:44:27 +0000 (12:44 +0000)
 * Added path override for dynamic backend loading
 * Do not default to CpuRef, as there could be dynamic backends loaded at
   runtime
 * Do not check right away whether the backends are correct, as more of
   them can be loaded at runtime as dynamic backends

Change-Id: If23f79aa1480b8dfce57e49b1746c23b6b9e6f82
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp
tests/ExecuteNetwork/ExecuteNetwork.cpp
tests/InferenceModel.hpp
tests/InferenceTest.hpp
tests/InferenceTest.inl
tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp

index c136672..d563faa 100644 (file)
@@ -40,7 +40,9 @@ int main(int argc, char* argv[])
                         modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
                         modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
 
-                        return std::make_unique<YoloInferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+                        return std::make_unique<YoloInferenceModel>(modelParams,
+                                                                    commonOptions.m_EnableProfiling,
+                                                                    commonOptions.m_DynamicBackendsPath);
                 });
             });
     }
index a8f3b3d..1a03062 100644 (file)
@@ -27,6 +27,7 @@ int main(int argc, const char* argv[])
     std::string outputNames;
     std::string inputTypes;
     std::string outputTypes;
+    std::string dynamicBackendsPath;
 
     double thresholdTime = 0.0;
 
@@ -52,6 +53,9 @@ int main(int argc, const char* argv[])
              ".prototxt, .tflite, .onnx")
             ("compute,c", po::value<std::vector<std::string>>()->multitoken(),
              backendsMessage.c_str())
+            ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+             "Path where to load any available dynamic backend from. "
+             "If left empty (the default), dynamic backends will not be used.")
             ("input-name,i", po::value(&inputNames),
              "Identifier of the input tensors in the network separated by comma.")
             ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
@@ -62,7 +66,7 @@ int main(int argc, const char* argv[])
              "This parameter is optional, depending on the network.")
             ("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
              "Path to files containing the input data as a flat array separated by whitespace. "
-             "Several paths can be passed separating them by comma. ")
+             "Several paths can be passed separating them by comma.")
             ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
              "If unset, defaults to \"float\" for all defined inputs. "
              "Accepted values (float, int or qasymm8)")
@@ -196,23 +200,15 @@ int main(int argc, const char* argv[])
     {
         // Get the preferred order of compute devices. If none are specified, default to using CpuRef
         const std::string computeOption("compute");
-        std::vector<std::string> computeDevicesAsStrings = CheckOption(vm, computeOption.c_str()) ?
-            vm[computeOption].as<std::vector<std::string>>() :
-            std::vector<std::string>({ "CpuRef" });
+        std::vector<std::string> computeDevicesAsStrings =
+                CheckOption(vm, computeOption.c_str()) ?
+                    vm[computeOption].as<std::vector<std::string>>() :
+                    std::vector<std::string>();
         std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
 
         // Remove duplicates from the list of compute devices.
         RemoveDuplicateDevices(computeDevices);
 
-        // Check that the specified compute devices are valid.
-        std::string invalidBackends;
-        if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
-        {
-            BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: "
-                                     << invalidBackends;
-            return EXIT_FAILURE;
-        }
-
         try
         {
             CheckOptionDependencies(vm);
@@ -224,7 +220,7 @@ int main(int argc, const char* argv[])
             return EXIT_FAILURE;
         }
 
-        return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+        return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                        inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
                        enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
     }
index 4ad5872..13e8031 100644 (file)
@@ -86,13 +86,14 @@ struct Params
     std::vector<armnn::TensorShape> m_InputShapes;
     std::vector<std::string>        m_OutputBindings;
     std::vector<armnn::BackendId>   m_ComputeDevices;
+    std::string                     m_DynamicBackendsPath;
     size_t                          m_SubgraphId;
     bool                            m_IsModelBinary;
     bool                            m_VisualizePostOptimizationModel;
     bool                            m_EnableFp16TurboMode;
 
     Params()
-        : m_ComputeDevices{"CpuRef"}
+        : m_ComputeDevices{}
         , m_SubgraphId(0)
         , m_IsModelBinary(true)
         , m_VisualizePostOptimizationModel(false)
@@ -318,6 +319,7 @@ public:
     {
         std::string m_ModelDir;
         std::vector<std::string> m_ComputeDevices;
+        std::string m_DynamicBackendsPath;
         bool m_VisualizePostOptimizationModel;
         bool m_EnableFp16TurboMode;
         std::string m_Labels;
@@ -345,6 +347,9 @@ public:
             ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
                 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
                 multitoken(), backendsMessage.c_str())
+            ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
+                "Path where to load any available dynamic backend from. "
+                "If left empty (the default), dynamic backends will not be used.")
             ("labels,l", po::value<std::string>(&options.m_Labels),
                 "Text file containing one image filename - correct label pair per line, "
                 "used to test the accuracy of the network.")
@@ -359,8 +364,10 @@ public:
 
     InferenceModel(const Params& params,
                    bool enableProfiling,
+                   const std::string& dynamicBackendsPath,
                    const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
         : m_EnableProfiling(enableProfiling)
+        , m_DynamicBackendsPath(dynamicBackendsPath)
     {
         if (runtime)
         {
@@ -370,6 +377,7 @@ public:
         {
             armnn::IRuntime::CreationOptions options;
             options.m_EnableGpuProfiling = m_EnableProfiling;
+            options.m_DynamicBackendsPath = m_DynamicBackendsPath;
             m_Runtime = std::move(armnn::IRuntime::Create(options));
         }
 
@@ -379,10 +387,9 @@ public:
             throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
         }
 
-        armnn::INetworkPtr network =
-            CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
+        armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
 
-        armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
+        armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
         {
             ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
 
@@ -544,6 +551,7 @@ private:
     std::vector<armnn::BindingPointInfo> m_InputBindings;
     std::vector<armnn::BindingPointInfo> m_OutputBindings;
     bool m_EnableProfiling;
+    std::string m_DynamicBackendsPath;
 
     template<typename TContainer>
     armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
index 40c9e5e..f2b8c63 100644 (file)
@@ -58,10 +58,12 @@ struct InferenceTestOptions
     unsigned int m_IterationCount;
     std::string m_InferenceTimesFile;
     bool m_EnableProfiling;
+    std::string m_DynamicBackendsPath;
 
     InferenceTestOptions()
-        : m_IterationCount(0),
-          m_EnableProfiling(0)
+        : m_IterationCount(0)
+        , m_EnableProfiling(0)
+        , m_DynamicBackendsPath()
     {}
 };
 
index 04cae99..c91193f 100644 (file)
@@ -397,7 +397,9 @@ int ClassifierInferenceTestMain(int argc,
                     modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
                     modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
 
-                    return std::make_unique<InferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+                    return std::make_unique<InferenceModel>(modelParams,
+                                                            commonOptions.m_EnableProfiling,
+                                                            commonOptions.m_DynamicBackendsPath);
             });
         });
 }
index 440dcf9..810f499 100644 (file)
@@ -254,6 +254,7 @@ template<typename TParser, typename TDataType>
 int MainImpl(const char* modelPath,
              bool isModelBinary,
              const std::vector<armnn::BackendId>& computeDevices,
+             const std::string& dynamicBackendsPath,
              const std::vector<string>& inputNames,
              const std::vector<std::unique_ptr<armnn::TensorShape>>& inputTensorShapes,
              const std::vector<string>& inputTensorDataFilePaths,
@@ -278,6 +279,7 @@ int MainImpl(const char* modelPath,
         params.m_ModelPath = modelPath;
         params.m_IsModelBinary = isModelBinary;
         params.m_ComputeDevices = computeDevices;
+        params.m_DynamicBackendsPath = dynamicBackendsPath;
 
         for(const std::string& inputName: inputNames)
         {
@@ -296,7 +298,7 @@ int MainImpl(const char* modelPath,
 
         params.m_SubgraphId = subgraphId;
         params.m_EnableFp16TurboMode = enableFp16TurboMode;
-        InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime);
+        InferenceModel<TParser, TDataType> model(params, enableProfiling, dynamicBackendsPath, runtime);
 
         for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i)
         {
@@ -407,6 +409,7 @@ int MainImpl(const char* modelPath,
 int RunTest(const std::string& format,
             const std::string& inputTensorShapesStr,
             const vector<armnn::BackendId>& computeDevice,
+            const std::string& dynamicBackendsPath,
             const std::string& path,
             const std::string& inputNames,
             const std::string& inputTensorDataFilePaths,
@@ -513,7 +516,7 @@ int RunTest(const std::string& format,
 #if defined(ARMNN_SERIALIZER)
     return MainImpl<armnnDeserializer::IDeserializer, float>(
         modelPath.c_str(), isModelBinary, computeDevice,
-        inputNamesVector, inputTensorShapes,
+        dynamicBackendsPath, inputNamesVector, inputTensorShapes,
         inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
         outputTypesVector, outputNamesVector, enableProfiling,
         enableFp16TurboMode, thresholdTime, subgraphId, runtime);
@@ -526,6 +529,7 @@ int RunTest(const std::string& format,
     {
 #if defined(ARMNN_CAFFE_PARSER)
         return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+                                                               dynamicBackendsPath,
                                                                inputNamesVector, inputTensorShapes,
                                                                inputTensorDataFilePathsVector, inputTypesVector,
                                                                quantizeInput, outputTypesVector, outputNamesVector,
@@ -540,6 +544,7 @@ int RunTest(const std::string& format,
 {
 #if defined(ARMNN_ONNX_PARSER)
     return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+                                                         dynamicBackendsPath,
                                                          inputNamesVector, inputTensorShapes,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
@@ -554,6 +559,7 @@ int RunTest(const std::string& format,
     {
 #if defined(ARMNN_TF_PARSER)
         return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+                                                         dynamicBackendsPath,
                                                          inputNamesVector, inputTensorShapes,
                                                          inputTensorDataFilePathsVector, inputTypesVector,
                                                          quantizeInput, outputTypesVector, outputNamesVector,
@@ -574,6 +580,7 @@ int RunTest(const std::string& format,
             return EXIT_FAILURE;
         }
         return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+                                                                 dynamicBackendsPath,
                                                                  inputNamesVector, inputTensorShapes,
                                                                  inputTensorDataFilePathsVector, inputTypesVector,
                                                                  quantizeInput, outputTypesVector, outputNamesVector,
@@ -604,6 +611,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
     std::string outputNames;
     std::string inputTypes;
     std::string outputTypes;
+    std::string dynamicBackendsPath;
 
     size_t subgraphId = 0;
 
@@ -622,6 +630,9 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
          ".tflite, .onnx")
         ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
          backendsMessage.c_str())
+        ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+         "Path where to load any available dynamic backend from. "
+         "If left empty (the default), dynamic backends will not be used.")
         ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
         ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
          "executed. Defaults to 0.")
@@ -696,7 +707,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
         return EXIT_FAILURE;
     }
 
-    return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+    return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
                    inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
                    enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
-}
\ No newline at end of file
+}
index 0619647..787102e 100644 (file)
@@ -64,7 +64,9 @@ int main(int argc, char* argv[])
                         modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
                         modelParams.m_EnableFp16TurboMode            = modelOptions.m_EnableFp16TurboMode;
 
-                        return std::make_unique<Model>(modelParams, commonOptions.m_EnableProfiling);
+                        return std::make_unique<Model>(modelParams,
+                                                       commonOptions.m_EnableProfiling,
+                                                       commonOptions.m_DynamicBackendsPath);
                 });
             });
     }