From 339bcae73515c66899432b5844d7c239c570c4b8 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Thu, 31 Jan 2019 16:44:26 +0000 Subject: [PATCH] IVGCVSW-2604 Fix bug that made it impossible to execute inference tests on certain backends * Read compute devices from the CL as strings and convert them into BackendId objects afterwards Change-Id: Icded1c572778f5a213644e3052ff6dfe7022128b Signed-off-by: Aron Virginas-Tar --- tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp | 2 +- tests/ExecuteNetwork/ExecuteNetwork.cpp | 4 ++-- tests/InferenceModel.hpp | 27 +++++++++++++++------- tests/InferenceTest.inl | 2 +- .../TfLiteMobileNetSsd-Armnn.cpp | 2 +- 5 files changed, 24 insertions(+), 13 deletions(-) diff --git a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp index b752c7c..c6ffe3d 100644 --- a/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp +++ b/tests/CaffeYolo-Armnn/CaffeYolo-Armnn.cpp @@ -35,7 +35,7 @@ int main(int argc, char* argv[]) modelParams.m_OutputBindings = { "fc12" }; modelParams.m_InputShapes = { inputTensorShape }; modelParams.m_IsModelBinary = true; - modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; + modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds(); modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode; diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index 2978010..afde986 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -171,7 +171,7 @@ void RemoveDuplicateDevices(std::vector& computeDevices) template int MainImpl(const char* modelPath, bool isModelBinary, - const std::vector& computeDevice, + const std::vector& computeDevices, const char* inputName, const armnn::TensorShape* inputTensorShape, const char* inputTensorDataFilePath, @@ -200,7 +200,7 @@ int MainImpl(const char* modelPath, typename InferenceModel::Params params; params.m_ModelPath = modelPath; params.m_IsModelBinary = isModelBinary; - params.m_ComputeDevice = computeDevice; + params.m_ComputeDevices = computeDevices; params.m_InputBindings = { inputName }; params.m_InputShapes = { *inputTensorShape }; params.m_OutputBindings = { outputName }; diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 1c89238..7e33866 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -16,6 +16,7 @@ #include +#include #include #include #include @@ -24,6 +25,8 @@ #include #include +#include +#include #include #include #include @@ -78,7 +81,7 @@ struct Params std::vector m_InputBindings; std::vector m_InputShapes; std::vector m_OutputBindings; - std::vector m_ComputeDevice; + std::vector m_ComputeDevices; bool m_EnableProfiling; size_t m_SubgraphId; bool m_IsModelBinary; @@ -86,7 +89,7 @@ struct Params bool m_EnableFp16TurboMode; Params() - : m_ComputeDevice{armnn::Compute::CpuRef} + : m_ComputeDevices{"CpuRef"} , m_EnableProfiling(false) , m_SubgraphId(0) , m_IsModelBinary(true) @@ -319,16 +322,23 @@ public: struct CommandLineOptions { std::string m_ModelDir; - std::vector m_ComputeDevice; + std::vector m_ComputeDevices; bool m_VisualizePostOptimizationModel; bool m_EnableFp16TurboMode; + + std::vector GetComputeDevicesAsBackendIds() + { + std::vector backendIds; + std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds)); + return backendIds; + } }; static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options) { namespace po = boost::program_options; - std::vector defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef}; + const std::vector defaultComputes = { "CpuAcc", "CpuRef" }; const std::string backendsMessage = "Which device to run layers on by default. Possible choices: " + armnn::BackendRegistryInstance().GetBackendIdsAsString(); @@ -336,8 +346,9 @@ public: desc.add_options() ("model-dir,m", po::value(&options.m_ModelDir)->required(), "Path to directory containing model files (.caffemodel/.prototxt/.tflite)") - ("compute,c", po::value>(&options.m_ComputeDevice)->default_value - (defaultBackends), backendsMessage.c_str()) + ("compute,c", po::value>(&options.m_ComputeDevices)-> + default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))-> + multitoken(), backendsMessage.c_str()) ("visualize-optimized-model,v", po::value(&options.m_VisualizePostOptimizationModel)->default_value(false), "Produce a dot file useful for visualizing the graph post optimization." @@ -362,7 +373,7 @@ public: } std::string invalidBackends; - if (!CheckRequestedBackendsAreValid(params.m_ComputeDevice, armnn::Optional(invalidBackends))) + if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional(invalidBackends))) { throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends); } @@ -377,7 +388,7 @@ public: armnn::OptimizerOptions options; options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode; - optNet = armnn::Optimize(*network, params.m_ComputeDevice, m_Runtime->GetDeviceSpec(), options); + optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options); if (!optNet) { throw armnn::Exception("Optimize returned nullptr"); diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl index 4dde354..07a20d5 100644 --- a/tests/InferenceTest.inl +++ b/tests/InferenceTest.inl @@ -342,7 +342,7 @@ int ClassifierInferenceTestMain(int argc, } modelParams.m_IsModelBinary = isModelBinary; - modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; + modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds(); modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode; diff --git a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp index b1bc0f6..3328339 100644 --- a/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp +++ b/tests/TfLiteMobileNetSsd-Armnn/TfLiteMobileNetSsd-Armnn.cpp @@ -59,7 +59,7 @@ int main(int argc, char* argv[]) modelParams.m_InputShapes = { inputTensorShape }; modelParams.m_IsModelBinary = true; - modelParams.m_ComputeDevice = modelOptions.m_ComputeDevice; + modelParams.m_ComputeDevices = modelOptions.GetComputeDevicesAsBackendIds(); modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel; modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode; -- 2.7.4