2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include <armnn/ArmNN.hpp>
9 #include <armnn/BackendRegistry.hpp>
10 #include <armnn/utility/Assert.hpp>
12 #if defined(ARMNN_SERIALIZER)
13 #include "armnnDeserializer/IDeserializer.hpp"
15 #if defined(ARMNN_TF_LITE_PARSER)
16 #include <armnnTfLiteParser/ITfLiteParser.hpp>
18 #if defined(ARMNN_ONNX_PARSER)
19 #include <armnnOnnxParser/IOnnxParser.hpp>
22 #include <HeapProfiling.hpp>
23 #include <TensorIOUtils.hpp>
25 #include <boost/algorithm/string/join.hpp>
26 #include <boost/exception/exception.hpp>
27 #include <boost/exception/diagnostic_information.hpp>
28 #include <boost/format.hpp>
29 #include <boost/program_options.hpp>
30 #include <boost/filesystem.hpp>
31 #include <boost/variant.hpp>
40 #include <type_traits>
45 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
46 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
48 if (backendIds.empty())
53 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
56 for (const auto& backendId : backendIds)
58 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
61 if (invalidBackendIds)
63 if (!invalidBackendIds.value().empty())
65 invalidBackendIds.value() += ", ";
67 invalidBackendIds.value() += backendId;
74 } // anonymous namespace
76 namespace InferenceModelInternal
78 using BindingPointInfo = armnn::BindingPointInfo;
80 using QuantizationParams = std::pair<float,int32_t>;
84 std::string m_ModelPath;
85 std::vector<std::string> m_InputBindings;
86 std::vector<armnn::TensorShape> m_InputShapes;
87 std::vector<std::string> m_OutputBindings;
88 std::vector<armnn::BackendId> m_ComputeDevices;
89 std::string m_DynamicBackendsPath;
92 bool m_VisualizePostOptimizationModel;
93 bool m_EnableFp16TurboMode;
94 bool m_EnableBf16TurboMode;
95 bool m_PrintIntermediateLayers;
96 bool m_ParseUnsupported;
101 , m_IsModelBinary(true)
102 , m_VisualizePostOptimizationModel(false)
103 , m_EnableFp16TurboMode(false)
104 , m_EnableBf16TurboMode(false)
105 , m_PrintIntermediateLayers(false)
106 , m_ParseUnsupported(false)
110 } // namespace InferenceModelInternal
112 template <typename IParser>
113 struct CreateNetworkImpl
116 using Params = InferenceModelInternal::Params;
118 static armnn::INetworkPtr Create(const Params& params,
119 std::vector<armnn::BindingPointInfo>& inputBindings,
120 std::vector<armnn::BindingPointInfo>& outputBindings)
122 const std::string& modelPath = params.m_ModelPath;
124 // Create a network from a file on disk
125 auto parser(IParser::Create());
127 std::map<std::string, armnn::TensorShape> inputShapes;
128 if (!params.m_InputShapes.empty())
130 const size_t numInputShapes = params.m_InputShapes.size();
131 const size_t numInputBindings = params.m_InputBindings.size();
132 if (numInputShapes < numInputBindings)
134 throw armnn::Exception(boost::str(boost::format(
135 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
136 % numInputBindings % numInputShapes));
139 for (size_t i = 0; i < numInputShapes; i++)
141 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
145 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
146 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
149 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
150 // Handle text and binary input differently by calling the corresponding parser function
151 network = (params.m_IsModelBinary ?
152 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
153 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
156 for (const std::string& inputLayerName : params.m_InputBindings)
158 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
161 for (const std::string& outputLayerName : params.m_OutputBindings)
163 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
170 #if defined(ARMNN_SERIALIZER)
172 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
175 using IParser = armnnDeserializer::IDeserializer;
176 using Params = InferenceModelInternal::Params;
178 static armnn::INetworkPtr Create(const Params& params,
179 std::vector<armnn::BindingPointInfo>& inputBindings,
180 std::vector<armnn::BindingPointInfo>& outputBindings)
182 auto parser(IParser::Create());
183 ARMNN_ASSERT(parser);
185 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
188 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
190 boost::system::error_code errorCode;
191 boost::filesystem::path pathToFile(params.m_ModelPath);
192 if (!boost::filesystem::exists(pathToFile, errorCode))
194 throw armnn::FileNotFoundException(boost::str(
195 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
198 CHECK_LOCATION().AsString()));
200 std::ifstream file(params.m_ModelPath, std::ios::binary);
202 network = parser->CreateNetworkFromBinary(file);
205 unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
207 for (const std::string& inputLayerName : params.m_InputBindings)
209 armnnDeserializer::BindingPointInfo inputBinding =
210 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
211 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
214 for (const std::string& outputLayerName : params.m_OutputBindings)
216 armnnDeserializer::BindingPointInfo outputBinding =
217 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
218 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
226 #if defined(ARMNN_TF_LITE_PARSER)
228 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
231 using IParser = armnnTfLiteParser::ITfLiteParser;
232 using Params = InferenceModelInternal::Params;
234 static armnn::INetworkPtr Create(const Params& params,
235 std::vector<armnn::BindingPointInfo>& inputBindings,
236 std::vector<armnn::BindingPointInfo>& outputBindings)
238 const std::string& modelPath = params.m_ModelPath;
240 // Create a network from a file on disk
241 IParser::TfLiteParserOptions options;
242 options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
243 auto parser(IParser::Create(options));
245 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
248 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
249 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
252 for (const std::string& inputLayerName : params.m_InputBindings)
254 armnn::BindingPointInfo inputBinding =
255 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
256 inputBindings.push_back(inputBinding);
259 for (const std::string& outputLayerName : params.m_OutputBindings)
261 armnn::BindingPointInfo outputBinding =
262 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
263 outputBindings.push_back(outputBinding);
271 #if defined(ARMNN_ONNX_PARSER)
273 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
276 using IParser = armnnOnnxParser::IOnnxParser;
277 using Params = InferenceModelInternal::Params;
278 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
280 static armnn::INetworkPtr Create(const Params& params,
281 std::vector<BindingPointInfo>& inputBindings,
282 std::vector<BindingPointInfo>& outputBindings)
284 const std::string& modelPath = params.m_ModelPath;
286 // Create a network from a file on disk
287 auto parser(IParser::Create());
289 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
292 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
293 network = (params.m_IsModelBinary ?
294 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
295 parser->CreateNetworkFromTextFile(modelPath.c_str()));
298 for (const std::string& inputLayerName : params.m_InputBindings)
300 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
301 inputBindings.push_back(inputBinding);
304 for (const std::string& outputLayerName : params.m_OutputBindings)
306 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
307 outputBindings.push_back(outputBinding);
317 template <typename IParser, typename TDataType>
321 using DataType = TDataType;
322 using Params = InferenceModelInternal::Params;
323 using QuantizationParams = InferenceModelInternal::QuantizationParams;
324 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
326 struct CommandLineOptions
328 std::string m_ModelDir;
329 std::vector<std::string> m_ComputeDevices;
330 std::string m_DynamicBackendsPath;
331 bool m_VisualizePostOptimizationModel;
332 bool m_EnableFp16TurboMode;
333 bool m_EnableBf16TurboMode;
334 std::string m_Labels;
336 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
338 std::vector<armnn::BackendId> backendIds;
339 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
344 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
346 namespace po = boost::program_options;
348 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
350 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
351 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
354 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
355 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
356 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
357 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
358 multitoken(), backendsMessage.c_str())
359 ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
360 "Path where to load any available dynamic backend from. "
361 "If left empty (the default), dynamic backends will not be used.")
362 ("labels,l", po::value<std::string>(&options.m_Labels),
363 "Text file containing one image filename - correct label pair per line, "
364 "used to test the accuracy of the network.")
365 ("visualize-optimized-model,v",
366 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
367 "Produce a dot file useful for visualizing the graph post optimization."
368 "The file will have the same name as the model with the .dot extention.")
369 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
370 "If this option is enabled FP32 layers, weights and biases will be converted "
371 "to FP16 where the backend supports it.")
372 ("bf16-turbo-mode", po::value<bool>(&options.m_EnableBf16TurboMode)->default_value(false),
373 "If this option is enabled FP32 layers, weights and biases will be converted "
374 "to BF16 where the backend supports it.");
377 InferenceModel(const Params& params,
378 bool enableProfiling,
379 const std::string& dynamicBackendsPath,
380 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
381 : m_EnableProfiling(enableProfiling)
382 , m_DynamicBackendsPath(dynamicBackendsPath)
390 armnn::IRuntime::CreationOptions options;
391 options.m_EnableGpuProfiling = m_EnableProfiling;
392 options.m_DynamicBackendsPath = m_DynamicBackendsPath;
393 m_Runtime = std::move(armnn::IRuntime::Create(options));
396 std::string invalidBackends;
397 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
399 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
402 armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
404 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
406 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
408 armnn::OptimizerOptions options;
409 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
410 options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
411 options.m_Debug = params.m_PrintIntermediateLayers;
413 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
416 throw armnn::Exception("Optimize returned nullptr");
420 if (params.m_VisualizePostOptimizationModel)
422 boost::filesystem::path filename = params.m_ModelPath;
423 filename.replace_extension("dot");
424 std::fstream file(filename.c_str(), std::ios_base::out);
425 optNet->SerializeToDot(file);
430 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
431 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
434 if (ret == armnn::Status::Failure)
436 throw armnn::Exception("IRuntime::LoadNetwork failed");
440 void CheckInputIndexIsValid(unsigned int inputIndex) const
442 if (m_InputBindings.size() < inputIndex + 1)
444 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
448 void CheckOutputIndexIsValid(unsigned int outputIndex) const
450 if (m_OutputBindings.size() < outputIndex + 1)
452 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
456 unsigned int GetInputSize(unsigned int inputIndex = 0u) const
458 CheckInputIndexIsValid(inputIndex);
459 return m_InputBindings[inputIndex].second.GetNumElements();
462 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
464 CheckOutputIndexIsValid(outputIndex);
465 return m_OutputBindings[outputIndex].second.GetNumElements();
468 std::chrono::duration<double, std::milli> Run(
469 const std::vector<TContainer>& inputContainers,
470 std::vector<TContainer>& outputContainers)
472 for (unsigned int i = 0; i < outputContainers.size(); ++i)
474 const unsigned int expectedOutputDataSize = GetOutputSize(i);
476 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
478 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
479 if (actualOutputDataSize < expectedOutputDataSize)
481 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
482 throw armnn::Exception(
483 boost::str(boost::format("Not enough data for output #%1%: expected "
484 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
487 outputContainers[i]);
490 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
493 profiler->EnableProfiling(m_EnableProfiling);
496 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
497 const auto start_time = GetCurrentTime();
499 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
500 MakeInputTensors(inputContainers),
501 MakeOutputTensors(outputContainers));
503 const auto end_time = GetCurrentTime();
505 // if profiling is enabled print out the results
506 if (profiler && profiler->IsProfilingEnabled())
508 profiler->Print(std::cout);
511 if (ret == armnn::Status::Failure)
513 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
517 return std::chrono::duration<double, std::milli>(end_time - start_time);
521 const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
523 CheckInputIndexIsValid(inputIndex);
524 return m_InputBindings[inputIndex];
527 const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
529 return m_InputBindings;
532 const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
534 CheckOutputIndexIsValid(outputIndex);
535 return m_OutputBindings[outputIndex];
538 const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
540 return m_OutputBindings;
543 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
545 CheckOutputIndexIsValid(outputIndex);
546 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
547 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
550 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
552 CheckInputIndexIsValid(inputIndex);
553 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
554 m_InputBindings[inputIndex].second.GetQuantizationOffset());
557 std::vector<QuantizationParams> GetAllQuantizationParams() const
559 std::vector<QuantizationParams> quantizationParams;
560 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
562 quantizationParams.push_back(GetQuantizationParams(i));
564 return quantizationParams;
568 armnn::NetworkId m_NetworkIdentifier;
569 std::shared_ptr<armnn::IRuntime> m_Runtime;
571 std::vector<armnn::BindingPointInfo> m_InputBindings;
572 std::vector<armnn::BindingPointInfo> m_OutputBindings;
573 bool m_EnableProfiling;
574 std::string m_DynamicBackendsPath;
576 template<typename TContainer>
577 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
579 return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
582 template<typename TContainer>
583 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
585 return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
588 std::chrono::high_resolution_clock::time_point GetCurrentTime()
590 return std::chrono::high_resolution_clock::now();
593 std::chrono::duration<double, std::milli> GetTimeDuration(
594 std::chrono::high_resolution_clock::time_point& start_time,
595 std::chrono::high_resolution_clock::time_point& end_time)
597 return std::chrono::duration<double, std::milli>(end_time - start_time);