2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <armnn/ArmNN.hpp>
8 #if defined(ARMNN_SERIALIZER)
9 #include "armnnDeserializer/IDeserializer.hpp"
11 #if defined(ARMNN_TF_LITE_PARSER)
12 #include <armnnTfLiteParser/ITfLiteParser.hpp>
14 #if defined(ARMNN_ONNX_PARSER)
15 #include <armnnOnnxParser/IOnnxParser.hpp>
18 #include <HeapProfiling.hpp>
19 #include <TensorIOUtils.hpp>
21 #include <backendsCommon/BackendRegistry.hpp>
23 #include <boost/algorithm/string/join.hpp>
24 #include <boost/exception/exception.hpp>
25 #include <boost/exception/diagnostic_information.hpp>
26 #include <boost/log/trivial.hpp>
27 #include <boost/format.hpp>
28 #include <boost/program_options.hpp>
29 #include <boost/filesystem.hpp>
30 #include <boost/lexical_cast.hpp>
31 #include <boost/variant.hpp>
40 #include <type_traits>
45 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
46 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
48 if (backendIds.empty())
53 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
56 for (const auto& backendId : backendIds)
58 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
61 if (invalidBackendIds)
63 if (!invalidBackendIds.value().empty())
65 invalidBackendIds.value() += ", ";
67 invalidBackendIds.value() += backendId;
74 } // anonymous namespace
76 namespace InferenceModelInternal
78 using BindingPointInfo = armnn::BindingPointInfo;
80 using QuantizationParams = std::pair<float,int32_t>;
84 std::string m_ModelPath;
85 std::vector<std::string> m_InputBindings;
86 std::vector<armnn::TensorShape> m_InputShapes;
87 std::vector<std::string> m_OutputBindings;
88 std::vector<armnn::BackendId> m_ComputeDevices;
89 std::string m_DynamicBackendsPath;
92 bool m_VisualizePostOptimizationModel;
93 bool m_EnableFp16TurboMode;
94 bool m_PrintIntermediateLayers;
99 , m_IsModelBinary(true)
100 , m_VisualizePostOptimizationModel(false)
101 , m_EnableFp16TurboMode(false)
102 , m_PrintIntermediateLayers(false)
106 } // namespace InferenceModelInternal
108 template <typename IParser>
109 struct CreateNetworkImpl
112 using Params = InferenceModelInternal::Params;
114 static armnn::INetworkPtr Create(const Params& params,
115 std::vector<armnn::BindingPointInfo>& inputBindings,
116 std::vector<armnn::BindingPointInfo>& outputBindings)
118 const std::string& modelPath = params.m_ModelPath;
120 // Create a network from a file on disk
121 auto parser(IParser::Create());
123 std::map<std::string, armnn::TensorShape> inputShapes;
124 if (!params.m_InputShapes.empty())
126 const size_t numInputShapes = params.m_InputShapes.size();
127 const size_t numInputBindings = params.m_InputBindings.size();
128 if (numInputShapes < numInputBindings)
130 throw armnn::Exception(boost::str(boost::format(
131 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
132 % numInputBindings % numInputShapes));
135 for (size_t i = 0; i < numInputShapes; i++)
137 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
141 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
142 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
145 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
146 // Handle text and binary input differently by calling the corresponding parser function
147 network = (params.m_IsModelBinary ?
148 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
149 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
152 for (const std::string& inputLayerName : params.m_InputBindings)
154 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
157 for (const std::string& outputLayerName : params.m_OutputBindings)
159 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
166 #if defined(ARMNN_SERIALIZER)
168 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
171 using IParser = armnnDeserializer::IDeserializer;
172 using Params = InferenceModelInternal::Params;
174 static armnn::INetworkPtr Create(const Params& params,
175 std::vector<armnn::BindingPointInfo>& inputBindings,
176 std::vector<armnn::BindingPointInfo>& outputBindings)
178 auto parser(IParser::Create());
179 BOOST_ASSERT(parser);
181 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
184 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
186 boost::system::error_code errorCode;
187 boost::filesystem::path pathToFile(params.m_ModelPath);
188 if (!boost::filesystem::exists(pathToFile, errorCode))
190 throw armnn::FileNotFoundException(boost::str(
191 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
194 CHECK_LOCATION().AsString()));
196 std::ifstream file(params.m_ModelPath, std::ios::binary);
198 network = parser->CreateNetworkFromBinary(file);
201 unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
203 for (const std::string& inputLayerName : params.m_InputBindings)
205 armnnDeserializer::BindingPointInfo inputBinding =
206 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
207 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
210 for (const std::string& outputLayerName : params.m_OutputBindings)
212 armnnDeserializer::BindingPointInfo outputBinding =
213 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
214 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
222 #if defined(ARMNN_TF_LITE_PARSER)
224 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
227 using IParser = armnnTfLiteParser::ITfLiteParser;
228 using Params = InferenceModelInternal::Params;
230 static armnn::INetworkPtr Create(const Params& params,
231 std::vector<armnn::BindingPointInfo>& inputBindings,
232 std::vector<armnn::BindingPointInfo>& outputBindings)
234 const std::string& modelPath = params.m_ModelPath;
236 // Create a network from a file on disk
237 auto parser(IParser::Create());
239 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
242 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
243 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
246 for (const std::string& inputLayerName : params.m_InputBindings)
248 armnn::BindingPointInfo inputBinding =
249 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
250 inputBindings.push_back(inputBinding);
253 for (const std::string& outputLayerName : params.m_OutputBindings)
255 armnn::BindingPointInfo outputBinding =
256 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
257 outputBindings.push_back(outputBinding);
265 #if defined(ARMNN_ONNX_PARSER)
267 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
270 using IParser = armnnOnnxParser::IOnnxParser;
271 using Params = InferenceModelInternal::Params;
272 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
274 static armnn::INetworkPtr Create(const Params& params,
275 std::vector<BindingPointInfo>& inputBindings,
276 std::vector<BindingPointInfo>& outputBindings)
278 const std::string& modelPath = params.m_ModelPath;
280 // Create a network from a file on disk
281 auto parser(IParser::Create());
283 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
286 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
287 network = (params.m_IsModelBinary ?
288 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
289 parser->CreateNetworkFromTextFile(modelPath.c_str()));
292 for (const std::string& inputLayerName : params.m_InputBindings)
294 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
295 inputBindings.push_back(inputBinding);
298 for (const std::string& outputLayerName : params.m_OutputBindings)
300 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
301 outputBindings.push_back(outputBinding);
311 template <typename IParser, typename TDataType>
315 using DataType = TDataType;
316 using Params = InferenceModelInternal::Params;
317 using QuantizationParams = InferenceModelInternal::QuantizationParams;
318 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
320 struct CommandLineOptions
322 std::string m_ModelDir;
323 std::vector<std::string> m_ComputeDevices;
324 std::string m_DynamicBackendsPath;
325 bool m_VisualizePostOptimizationModel;
326 bool m_EnableFp16TurboMode;
327 std::string m_Labels;
329 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
331 std::vector<armnn::BackendId> backendIds;
332 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
337 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
339 namespace po = boost::program_options;
341 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
343 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
344 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
347 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
348 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
349 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
350 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
351 multitoken(), backendsMessage.c_str())
352 ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
353 "Path where to load any available dynamic backend from. "
354 "If left empty (the default), dynamic backends will not be used.")
355 ("labels,l", po::value<std::string>(&options.m_Labels),
356 "Text file containing one image filename - correct label pair per line, "
357 "used to test the accuracy of the network.")
358 ("visualize-optimized-model,v",
359 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
360 "Produce a dot file useful for visualizing the graph post optimization."
361 "The file will have the same name as the model with the .dot extention.")
362 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
363 "If this option is enabled FP32 layers, weights and biases will be converted "
364 "to FP16 where the backend supports it.");
367 InferenceModel(const Params& params,
368 bool enableProfiling,
369 const std::string& dynamicBackendsPath,
370 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
371 : m_EnableProfiling(enableProfiling)
372 , m_DynamicBackendsPath(dynamicBackendsPath)
380 armnn::IRuntime::CreationOptions options;
381 options.m_EnableGpuProfiling = m_EnableProfiling;
382 options.m_DynamicBackendsPath = m_DynamicBackendsPath;
383 m_Runtime = std::move(armnn::IRuntime::Create(options));
386 std::string invalidBackends;
387 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
389 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
392 armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
394 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
396 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
398 armnn::OptimizerOptions options;
399 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
400 options.m_Debug = params.m_PrintIntermediateLayers;
402 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
405 throw armnn::Exception("Optimize returned nullptr");
409 if (params.m_VisualizePostOptimizationModel)
411 boost::filesystem::path filename = params.m_ModelPath;
412 filename.replace_extension("dot");
413 std::fstream file(filename.c_str(), std::ios_base::out);
414 optNet->SerializeToDot(file);
419 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
420 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
423 if (ret == armnn::Status::Failure)
425 throw armnn::Exception("IRuntime::LoadNetwork failed");
429 void CheckInputIndexIsValid(unsigned int inputIndex) const
431 if (m_InputBindings.size() < inputIndex + 1)
433 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
437 void CheckOutputIndexIsValid(unsigned int outputIndex) const
439 if (m_OutputBindings.size() < outputIndex + 1)
441 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
445 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
447 CheckOutputIndexIsValid(outputIndex);
448 return m_OutputBindings[outputIndex].second.GetNumElements();
451 std::chrono::duration<double, std::milli> Run(
452 const std::vector<TContainer>& inputContainers,
453 std::vector<TContainer>& outputContainers)
455 for (unsigned int i = 0; i < outputContainers.size(); ++i)
457 const unsigned int expectedOutputDataSize = GetOutputSize(i);
459 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
461 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
462 if (actualOutputDataSize < expectedOutputDataSize)
464 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
465 throw armnn::Exception(
466 boost::str(boost::format("Not enough data for output #%1%: expected "
467 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
470 outputContainers[i]);
473 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
476 profiler->EnableProfiling(m_EnableProfiling);
479 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
480 const auto start_time = GetCurrentTime();
482 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
483 MakeInputTensors(inputContainers),
484 MakeOutputTensors(outputContainers));
486 const auto end_time = GetCurrentTime();
488 // if profiling is enabled print out the results
489 if (profiler && profiler->IsProfilingEnabled())
491 profiler->Print(std::cout);
494 if (ret == armnn::Status::Failure)
496 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
500 return std::chrono::duration<double, std::milli>(end_time - start_time);
504 const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
506 CheckInputIndexIsValid(inputIndex);
507 return m_InputBindings[inputIndex];
510 const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
512 return m_InputBindings;
515 const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
517 CheckOutputIndexIsValid(outputIndex);
518 return m_OutputBindings[outputIndex];
521 const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
523 return m_OutputBindings;
526 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
528 CheckOutputIndexIsValid(outputIndex);
529 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
530 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
533 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
535 CheckInputIndexIsValid(inputIndex);
536 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
537 m_InputBindings[inputIndex].second.GetQuantizationOffset());
540 std::vector<QuantizationParams> GetAllQuantizationParams() const
542 std::vector<QuantizationParams> quantizationParams;
543 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
545 quantizationParams.push_back(GetQuantizationParams(i));
547 return quantizationParams;
551 armnn::NetworkId m_NetworkIdentifier;
552 std::shared_ptr<armnn::IRuntime> m_Runtime;
554 std::vector<armnn::BindingPointInfo> m_InputBindings;
555 std::vector<armnn::BindingPointInfo> m_OutputBindings;
556 bool m_EnableProfiling;
557 std::string m_DynamicBackendsPath;
559 template<typename TContainer>
560 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
562 return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
565 template<typename TContainer>
566 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
568 return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
571 std::chrono::high_resolution_clock::time_point GetCurrentTime()
573 return std::chrono::high_resolution_clock::now();
576 std::chrono::duration<double, std::milli> GetTimeDuration(
577 std::chrono::high_resolution_clock::time_point& start_time,
578 std::chrono::high_resolution_clock::time_point& end_time)
580 return std::chrono::duration<double, std::milli>(end_time - start_time);