2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include <armnn/ArmNN.hpp>
9 #include <armnn/BackendRegistry.hpp>
11 #if defined(ARMNN_SERIALIZER)
12 #include "armnnDeserializer/IDeserializer.hpp"
14 #if defined(ARMNN_TF_LITE_PARSER)
15 #include <armnnTfLiteParser/ITfLiteParser.hpp>
17 #if defined(ARMNN_ONNX_PARSER)
18 #include <armnnOnnxParser/IOnnxParser.hpp>
21 #include <HeapProfiling.hpp>
22 #include <TensorIOUtils.hpp>
24 #include <boost/algorithm/string/join.hpp>
25 #include <boost/exception/exception.hpp>
26 #include <boost/exception/diagnostic_information.hpp>
27 #include <boost/log/trivial.hpp>
28 #include <boost/format.hpp>
29 #include <boost/program_options.hpp>
30 #include <boost/filesystem.hpp>
31 #include <boost/lexical_cast.hpp>
32 #include <boost/variant.hpp>
41 #include <type_traits>
46 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
47 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
49 if (backendIds.empty())
54 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
57 for (const auto& backendId : backendIds)
59 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
62 if (invalidBackendIds)
64 if (!invalidBackendIds.value().empty())
66 invalidBackendIds.value() += ", ";
68 invalidBackendIds.value() += backendId;
75 } // anonymous namespace
77 namespace InferenceModelInternal
79 using BindingPointInfo = armnn::BindingPointInfo;
81 using QuantizationParams = std::pair<float,int32_t>;
85 std::string m_ModelPath;
86 std::vector<std::string> m_InputBindings;
87 std::vector<armnn::TensorShape> m_InputShapes;
88 std::vector<std::string> m_OutputBindings;
89 std::vector<armnn::BackendId> m_ComputeDevices;
90 std::string m_DynamicBackendsPath;
93 bool m_VisualizePostOptimizationModel;
94 bool m_EnableFp16TurboMode;
95 bool m_PrintIntermediateLayers;
100 , m_IsModelBinary(true)
101 , m_VisualizePostOptimizationModel(false)
102 , m_EnableFp16TurboMode(false)
103 , m_PrintIntermediateLayers(false)
107 } // namespace InferenceModelInternal
109 template <typename IParser>
110 struct CreateNetworkImpl
113 using Params = InferenceModelInternal::Params;
115 static armnn::INetworkPtr Create(const Params& params,
116 std::vector<armnn::BindingPointInfo>& inputBindings,
117 std::vector<armnn::BindingPointInfo>& outputBindings)
119 const std::string& modelPath = params.m_ModelPath;
121 // Create a network from a file on disk
122 auto parser(IParser::Create());
124 std::map<std::string, armnn::TensorShape> inputShapes;
125 if (!params.m_InputShapes.empty())
127 const size_t numInputShapes = params.m_InputShapes.size();
128 const size_t numInputBindings = params.m_InputBindings.size();
129 if (numInputShapes < numInputBindings)
131 throw armnn::Exception(boost::str(boost::format(
132 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
133 % numInputBindings % numInputShapes));
136 for (size_t i = 0; i < numInputShapes; i++)
138 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
142 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
143 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
146 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
147 // Handle text and binary input differently by calling the corresponding parser function
148 network = (params.m_IsModelBinary ?
149 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
150 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
153 for (const std::string& inputLayerName : params.m_InputBindings)
155 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
158 for (const std::string& outputLayerName : params.m_OutputBindings)
160 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
167 #if defined(ARMNN_SERIALIZER)
169 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
172 using IParser = armnnDeserializer::IDeserializer;
173 using Params = InferenceModelInternal::Params;
175 static armnn::INetworkPtr Create(const Params& params,
176 std::vector<armnn::BindingPointInfo>& inputBindings,
177 std::vector<armnn::BindingPointInfo>& outputBindings)
179 auto parser(IParser::Create());
180 BOOST_ASSERT(parser);
182 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
185 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
187 boost::system::error_code errorCode;
188 boost::filesystem::path pathToFile(params.m_ModelPath);
189 if (!boost::filesystem::exists(pathToFile, errorCode))
191 throw armnn::FileNotFoundException(boost::str(
192 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
195 CHECK_LOCATION().AsString()));
197 std::ifstream file(params.m_ModelPath, std::ios::binary);
199 network = parser->CreateNetworkFromBinary(file);
202 unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
204 for (const std::string& inputLayerName : params.m_InputBindings)
206 armnnDeserializer::BindingPointInfo inputBinding =
207 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
208 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
211 for (const std::string& outputLayerName : params.m_OutputBindings)
213 armnnDeserializer::BindingPointInfo outputBinding =
214 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
215 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
223 #if defined(ARMNN_TF_LITE_PARSER)
225 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
228 using IParser = armnnTfLiteParser::ITfLiteParser;
229 using Params = InferenceModelInternal::Params;
231 static armnn::INetworkPtr Create(const Params& params,
232 std::vector<armnn::BindingPointInfo>& inputBindings,
233 std::vector<armnn::BindingPointInfo>& outputBindings)
235 const std::string& modelPath = params.m_ModelPath;
237 // Create a network from a file on disk
238 auto parser(IParser::Create());
240 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
243 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
244 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
247 for (const std::string& inputLayerName : params.m_InputBindings)
249 armnn::BindingPointInfo inputBinding =
250 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
251 inputBindings.push_back(inputBinding);
254 for (const std::string& outputLayerName : params.m_OutputBindings)
256 armnn::BindingPointInfo outputBinding =
257 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
258 outputBindings.push_back(outputBinding);
266 #if defined(ARMNN_ONNX_PARSER)
268 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
271 using IParser = armnnOnnxParser::IOnnxParser;
272 using Params = InferenceModelInternal::Params;
273 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
275 static armnn::INetworkPtr Create(const Params& params,
276 std::vector<BindingPointInfo>& inputBindings,
277 std::vector<BindingPointInfo>& outputBindings)
279 const std::string& modelPath = params.m_ModelPath;
281 // Create a network from a file on disk
282 auto parser(IParser::Create());
284 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
287 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
288 network = (params.m_IsModelBinary ?
289 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
290 parser->CreateNetworkFromTextFile(modelPath.c_str()));
293 for (const std::string& inputLayerName : params.m_InputBindings)
295 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
296 inputBindings.push_back(inputBinding);
299 for (const std::string& outputLayerName : params.m_OutputBindings)
301 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
302 outputBindings.push_back(outputBinding);
312 template <typename IParser, typename TDataType>
316 using DataType = TDataType;
317 using Params = InferenceModelInternal::Params;
318 using QuantizationParams = InferenceModelInternal::QuantizationParams;
319 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
321 struct CommandLineOptions
323 std::string m_ModelDir;
324 std::vector<std::string> m_ComputeDevices;
325 std::string m_DynamicBackendsPath;
326 bool m_VisualizePostOptimizationModel;
327 bool m_EnableFp16TurboMode;
328 std::string m_Labels;
330 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
332 std::vector<armnn::BackendId> backendIds;
333 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
338 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
340 namespace po = boost::program_options;
342 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
344 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
345 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
348 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
349 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
350 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
351 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
352 multitoken(), backendsMessage.c_str())
353 ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
354 "Path where to load any available dynamic backend from. "
355 "If left empty (the default), dynamic backends will not be used.")
356 ("labels,l", po::value<std::string>(&options.m_Labels),
357 "Text file containing one image filename - correct label pair per line, "
358 "used to test the accuracy of the network.")
359 ("visualize-optimized-model,v",
360 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
361 "Produce a dot file useful for visualizing the graph post optimization."
362 "The file will have the same name as the model with the .dot extention.")
363 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
364 "If this option is enabled FP32 layers, weights and biases will be converted "
365 "to FP16 where the backend supports it.");
368 InferenceModel(const Params& params,
369 bool enableProfiling,
370 const std::string& dynamicBackendsPath,
371 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
372 : m_EnableProfiling(enableProfiling)
373 , m_DynamicBackendsPath(dynamicBackendsPath)
381 armnn::IRuntime::CreationOptions options;
382 options.m_EnableGpuProfiling = m_EnableProfiling;
383 options.m_DynamicBackendsPath = m_DynamicBackendsPath;
384 m_Runtime = std::move(armnn::IRuntime::Create(options));
387 std::string invalidBackends;
388 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
390 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
393 armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
395 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
397 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
399 armnn::OptimizerOptions options;
400 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
401 options.m_Debug = params.m_PrintIntermediateLayers;
403 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
406 throw armnn::Exception("Optimize returned nullptr");
410 if (params.m_VisualizePostOptimizationModel)
412 boost::filesystem::path filename = params.m_ModelPath;
413 filename.replace_extension("dot");
414 std::fstream file(filename.c_str(), std::ios_base::out);
415 optNet->SerializeToDot(file);
420 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
421 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
424 if (ret == armnn::Status::Failure)
426 throw armnn::Exception("IRuntime::LoadNetwork failed");
430 void CheckInputIndexIsValid(unsigned int inputIndex) const
432 if (m_InputBindings.size() < inputIndex + 1)
434 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
438 void CheckOutputIndexIsValid(unsigned int outputIndex) const
440 if (m_OutputBindings.size() < outputIndex + 1)
442 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
446 unsigned int GetInputSize(unsigned int inputIndex = 0u) const
448 CheckInputIndexIsValid(inputIndex);
449 return m_InputBindings[inputIndex].second.GetNumElements();
452 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
454 CheckOutputIndexIsValid(outputIndex);
455 return m_OutputBindings[outputIndex].second.GetNumElements();
458 std::chrono::duration<double, std::milli> Run(
459 const std::vector<TContainer>& inputContainers,
460 std::vector<TContainer>& outputContainers)
462 for (unsigned int i = 0; i < outputContainers.size(); ++i)
464 const unsigned int expectedOutputDataSize = GetOutputSize(i);
466 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
468 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
469 if (actualOutputDataSize < expectedOutputDataSize)
471 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
472 throw armnn::Exception(
473 boost::str(boost::format("Not enough data for output #%1%: expected "
474 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
477 outputContainers[i]);
480 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
483 profiler->EnableProfiling(m_EnableProfiling);
486 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
487 const auto start_time = GetCurrentTime();
489 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
490 MakeInputTensors(inputContainers),
491 MakeOutputTensors(outputContainers));
493 const auto end_time = GetCurrentTime();
495 // if profiling is enabled print out the results
496 if (profiler && profiler->IsProfilingEnabled())
498 profiler->Print(std::cout);
501 if (ret == armnn::Status::Failure)
503 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
507 return std::chrono::duration<double, std::milli>(end_time - start_time);
511 const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
513 CheckInputIndexIsValid(inputIndex);
514 return m_InputBindings[inputIndex];
517 const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
519 return m_InputBindings;
522 const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
524 CheckOutputIndexIsValid(outputIndex);
525 return m_OutputBindings[outputIndex];
528 const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
530 return m_OutputBindings;
533 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
535 CheckOutputIndexIsValid(outputIndex);
536 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
537 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
540 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
542 CheckInputIndexIsValid(inputIndex);
543 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
544 m_InputBindings[inputIndex].second.GetQuantizationOffset());
547 std::vector<QuantizationParams> GetAllQuantizationParams() const
549 std::vector<QuantizationParams> quantizationParams;
550 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
552 quantizationParams.push_back(GetQuantizationParams(i));
554 return quantizationParams;
558 armnn::NetworkId m_NetworkIdentifier;
559 std::shared_ptr<armnn::IRuntime> m_Runtime;
561 std::vector<armnn::BindingPointInfo> m_InputBindings;
562 std::vector<armnn::BindingPointInfo> m_OutputBindings;
563 bool m_EnableProfiling;
564 std::string m_DynamicBackendsPath;
566 template<typename TContainer>
567 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
569 return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
572 template<typename TContainer>
573 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
575 return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
578 std::chrono::high_resolution_clock::time_point GetCurrentTime()
580 return std::chrono::high_resolution_clock::now();
583 std::chrono::duration<double, std::milli> GetTimeDuration(
584 std::chrono::high_resolution_clock::time_point& start_time,
585 std::chrono::high_resolution_clock::time_point& end_time)
587 return std::chrono::duration<double, std::milli>(end_time - start_time);