2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include <armnn/ArmNN.hpp>
9 #include <armnn/Logging.hpp>
10 #include <armnn/utility/Timer.hpp>
11 #include <armnn/BackendRegistry.hpp>
12 #include <armnn/utility/Assert.hpp>
14 #if defined(ARMNN_SERIALIZER)
15 #include "armnnDeserializer/IDeserializer.hpp"
17 #if defined(ARMNN_TF_LITE_PARSER)
18 #include <armnnTfLiteParser/ITfLiteParser.hpp>
20 #if defined(ARMNN_ONNX_PARSER)
21 #include <armnnOnnxParser/IOnnxParser.hpp>
24 #include <Filesystem.hpp>
25 #include <HeapProfiling.hpp>
26 #include <TensorIOUtils.hpp>
28 #include "armnn/utility/StringUtils.hpp"
29 #include <boost/exception/exception.hpp>
30 #include <boost/exception/diagnostic_information.hpp>
31 #include <boost/format.hpp>
32 #include <boost/program_options.hpp>
33 #include <boost/variant.hpp>
41 #include <type_traits>
46 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
47 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
49 if (backendIds.empty())
54 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
57 for (const auto& backendId : backendIds)
59 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
62 if (invalidBackendIds)
64 if (!invalidBackendIds.value().empty())
66 invalidBackendIds.value() += ", ";
68 invalidBackendIds.value() += backendId;
75 } // anonymous namespace
77 namespace InferenceModelInternal
79 using BindingPointInfo = armnn::BindingPointInfo;
81 using QuantizationParams = std::pair<float,int32_t>;
85 std::string m_ModelPath;
86 std::vector<std::string> m_InputBindings;
87 std::vector<armnn::TensorShape> m_InputShapes;
88 std::vector<std::string> m_OutputBindings;
89 std::vector<armnn::BackendId> m_ComputeDevices;
90 std::string m_DynamicBackendsPath;
93 bool m_VisualizePostOptimizationModel;
94 bool m_EnableFp16TurboMode;
95 bool m_EnableBf16TurboMode;
96 bool m_PrintIntermediateLayers;
97 bool m_ParseUnsupported;
102 , m_IsModelBinary(true)
103 , m_VisualizePostOptimizationModel(false)
104 , m_EnableFp16TurboMode(false)
105 , m_EnableBf16TurboMode(false)
106 , m_PrintIntermediateLayers(false)
107 , m_ParseUnsupported(false)
111 } // namespace InferenceModelInternal
113 template <typename IParser>
114 struct CreateNetworkImpl
117 using Params = InferenceModelInternal::Params;
119 static armnn::INetworkPtr Create(const Params& params,
120 std::vector<armnn::BindingPointInfo>& inputBindings,
121 std::vector<armnn::BindingPointInfo>& outputBindings)
123 const std::string& modelPath = params.m_ModelPath;
125 // Create a network from a file on disk
126 auto parser(IParser::Create());
128 std::map<std::string, armnn::TensorShape> inputShapes;
129 if (!params.m_InputShapes.empty())
131 const size_t numInputShapes = params.m_InputShapes.size();
132 const size_t numInputBindings = params.m_InputBindings.size();
133 if (numInputShapes < numInputBindings)
135 throw armnn::Exception(boost::str(boost::format(
136 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
137 % numInputBindings % numInputShapes));
140 for (size_t i = 0; i < numInputShapes; i++)
142 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
146 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
147 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
150 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
151 // Handle text and binary input differently by calling the corresponding parser function
152 network = (params.m_IsModelBinary ?
153 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
154 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
157 for (const std::string& inputLayerName : params.m_InputBindings)
159 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
162 for (const std::string& outputLayerName : params.m_OutputBindings)
164 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
171 #if defined(ARMNN_SERIALIZER)
173 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
176 using IParser = armnnDeserializer::IDeserializer;
177 using Params = InferenceModelInternal::Params;
179 static armnn::INetworkPtr Create(const Params& params,
180 std::vector<armnn::BindingPointInfo>& inputBindings,
181 std::vector<armnn::BindingPointInfo>& outputBindings)
183 auto parser(IParser::Create());
184 ARMNN_ASSERT(parser);
186 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
189 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
191 std::error_code errorCode;
192 fs::path pathToFile(params.m_ModelPath);
193 if (!fs::exists(pathToFile, errorCode))
195 throw armnn::FileNotFoundException(boost::str(
196 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
199 CHECK_LOCATION().AsString()));
201 std::ifstream file(params.m_ModelPath, std::ios::binary);
203 network = parser->CreateNetworkFromBinary(file);
206 unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
208 for (const std::string& inputLayerName : params.m_InputBindings)
210 armnnDeserializer::BindingPointInfo inputBinding =
211 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
212 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
215 for (const std::string& outputLayerName : params.m_OutputBindings)
217 armnnDeserializer::BindingPointInfo outputBinding =
218 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
219 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
227 #if defined(ARMNN_TF_LITE_PARSER)
229 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
232 using IParser = armnnTfLiteParser::ITfLiteParser;
233 using Params = InferenceModelInternal::Params;
235 static armnn::INetworkPtr Create(const Params& params,
236 std::vector<armnn::BindingPointInfo>& inputBindings,
237 std::vector<armnn::BindingPointInfo>& outputBindings)
239 const std::string& modelPath = params.m_ModelPath;
241 // Create a network from a file on disk
242 IParser::TfLiteParserOptions options;
243 options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
244 auto parser(IParser::Create(options));
246 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
249 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
250 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
253 for (const std::string& inputLayerName : params.m_InputBindings)
255 armnn::BindingPointInfo inputBinding =
256 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
257 inputBindings.push_back(inputBinding);
260 for (const std::string& outputLayerName : params.m_OutputBindings)
262 armnn::BindingPointInfo outputBinding =
263 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
264 outputBindings.push_back(outputBinding);
272 #if defined(ARMNN_ONNX_PARSER)
274 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
277 using IParser = armnnOnnxParser::IOnnxParser;
278 using Params = InferenceModelInternal::Params;
279 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
281 static armnn::INetworkPtr Create(const Params& params,
282 std::vector<BindingPointInfo>& inputBindings,
283 std::vector<BindingPointInfo>& outputBindings)
285 const std::string& modelPath = params.m_ModelPath;
287 // Create a network from a file on disk
288 auto parser(IParser::Create());
290 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
293 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
294 network = (params.m_IsModelBinary ?
295 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
296 parser->CreateNetworkFromTextFile(modelPath.c_str()));
299 for (const std::string& inputLayerName : params.m_InputBindings)
301 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
302 inputBindings.push_back(inputBinding);
305 for (const std::string& outputLayerName : params.m_OutputBindings)
307 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
308 outputBindings.push_back(outputBinding);
318 template <typename IParser, typename TDataType>
322 using DataType = TDataType;
323 using Params = InferenceModelInternal::Params;
324 using QuantizationParams = InferenceModelInternal::QuantizationParams;
325 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
327 struct CommandLineOptions
329 std::string m_ModelDir;
330 std::vector<std::string> m_ComputeDevices;
331 std::string m_DynamicBackendsPath;
332 bool m_VisualizePostOptimizationModel;
333 bool m_EnableFp16TurboMode;
334 bool m_EnableBf16TurboMode;
335 std::string m_Labels;
337 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
339 std::vector<armnn::BackendId> backendIds;
340 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
345 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
347 namespace po = boost::program_options;
349 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
351 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
352 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
355 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
356 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
357 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
358 default_value(defaultComputes, armnn::stringUtils::StringConcat(defaultComputes, ", "))->
359 multitoken(), backendsMessage.c_str())
360 ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
361 "Path where to load any available dynamic backend from. "
362 "If left empty (the default), dynamic backends will not be used.")
363 ("labels,l", po::value<std::string>(&options.m_Labels),
364 "Text file containing one image filename - correct label pair per line, "
365 "used to test the accuracy of the network.")
366 ("visualize-optimized-model,v",
367 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
368 "Produce a dot file useful for visualizing the graph post optimization."
369 "The file will have the same name as the model with the .dot extention.")
370 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
371 "If this option is enabled FP32 layers, weights and biases will be converted "
372 "to FP16 where the backend supports it.")
373 ("bf16-turbo-mode", po::value<bool>(&options.m_EnableBf16TurboMode)->default_value(false),
374 "If this option is enabled FP32 layers, weights and biases will be converted "
375 "to BF16 where the backend supports it.");
378 InferenceModel(const Params& params,
379 bool enableProfiling,
380 const std::string& dynamicBackendsPath,
381 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
382 : m_EnableProfiling(enableProfiling)
383 , m_DynamicBackendsPath(dynamicBackendsPath)
391 armnn::IRuntime::CreationOptions options;
392 options.m_EnableGpuProfiling = m_EnableProfiling;
393 options.m_DynamicBackendsPath = m_DynamicBackendsPath;
394 m_Runtime = std::move(armnn::IRuntime::Create(options));
397 std::string invalidBackends;
398 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
400 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
403 const auto parsing_start_time = armnn::GetTimeNow();
404 armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
406 ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
407 << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
409 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
411 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
413 armnn::OptimizerOptions options;
414 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
415 options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
416 options.m_Debug = params.m_PrintIntermediateLayers;
418 const auto optimization_start_time = armnn::GetTimeNow();
419 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
421 ARMNN_LOG(info) << "Optimization time: " << std::setprecision(2)
422 << std::fixed << armnn::GetTimeDuration(optimization_start_time).count() << " ms\n";
426 throw armnn::Exception("Optimize returned nullptr");
430 if (params.m_VisualizePostOptimizationModel)
432 fs::path filename = params.m_ModelPath;
433 filename.replace_extension("dot");
434 std::fstream file(filename.c_str(), std::ios_base::out);
435 optNet->SerializeToDot(file);
440 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
441 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
444 if (ret == armnn::Status::Failure)
446 throw armnn::Exception("IRuntime::LoadNetwork failed");
450 void CheckInputIndexIsValid(unsigned int inputIndex) const
452 if (m_InputBindings.size() < inputIndex + 1)
454 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
458 void CheckOutputIndexIsValid(unsigned int outputIndex) const
460 if (m_OutputBindings.size() < outputIndex + 1)
462 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
466 unsigned int GetInputSize(unsigned int inputIndex = 0u) const
468 CheckInputIndexIsValid(inputIndex);
469 return m_InputBindings[inputIndex].second.GetNumElements();
472 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
474 CheckOutputIndexIsValid(outputIndex);
475 return m_OutputBindings[outputIndex].second.GetNumElements();
478 std::chrono::duration<double, std::milli> Run(
479 const std::vector<TContainer>& inputContainers,
480 std::vector<TContainer>& outputContainers)
482 for (unsigned int i = 0; i < outputContainers.size(); ++i)
484 const unsigned int expectedOutputDataSize = GetOutputSize(i);
486 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
488 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
489 if (actualOutputDataSize < expectedOutputDataSize)
491 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
492 throw armnn::Exception(
493 boost::str(boost::format("Not enough data for output #%1%: expected "
494 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
497 outputContainers[i]);
500 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
503 profiler->EnableProfiling(m_EnableProfiling);
506 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
507 const auto start_time = armnn::GetTimeNow();
509 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
510 MakeInputTensors(inputContainers),
511 MakeOutputTensors(outputContainers));
513 const auto duration = armnn::GetTimeDuration(start_time);
515 // if profiling is enabled print out the results
516 if (profiler && profiler->IsProfilingEnabled())
518 profiler->Print(std::cout);
521 if (ret == armnn::Status::Failure)
523 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
531 const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
533 CheckInputIndexIsValid(inputIndex);
534 return m_InputBindings[inputIndex];
537 const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
539 return m_InputBindings;
542 const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
544 CheckOutputIndexIsValid(outputIndex);
545 return m_OutputBindings[outputIndex];
548 const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
550 return m_OutputBindings;
553 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
555 CheckOutputIndexIsValid(outputIndex);
556 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
557 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
560 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
562 CheckInputIndexIsValid(inputIndex);
563 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
564 m_InputBindings[inputIndex].second.GetQuantizationOffset());
567 std::vector<QuantizationParams> GetAllQuantizationParams() const
569 std::vector<QuantizationParams> quantizationParams;
570 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
572 quantizationParams.push_back(GetQuantizationParams(i));
574 return quantizationParams;
578 armnn::NetworkId m_NetworkIdentifier;
579 std::shared_ptr<armnn::IRuntime> m_Runtime;
581 std::vector<armnn::BindingPointInfo> m_InputBindings;
582 std::vector<armnn::BindingPointInfo> m_OutputBindings;
583 bool m_EnableProfiling;
584 std::string m_DynamicBackendsPath;
586 template<typename TContainer>
587 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
589 return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
592 template<typename TContainer>
593 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
595 return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);