2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <armnn/ArmNN.hpp>
8 #if defined(ARMNN_SERIALIZER)
9 #include "armnnDeserializer/IDeserializer.hpp"
11 #if defined(ARMNN_TF_LITE_PARSER)
12 #include <armnnTfLiteParser/ITfLiteParser.hpp>
14 #if defined(ARMNN_ONNX_PARSER)
15 #include <armnnOnnxParser/IOnnxParser.hpp>
18 #include <HeapProfiling.hpp>
20 #include <backendsCommon/BackendRegistry.hpp>
22 #include <boost/algorithm/string/join.hpp>
23 #include <boost/exception/exception.hpp>
24 #include <boost/exception/diagnostic_information.hpp>
25 #include <boost/log/trivial.hpp>
26 #include <boost/format.hpp>
27 #include <boost/program_options.hpp>
28 #include <boost/filesystem.hpp>
29 #include <boost/lexical_cast.hpp>
30 #include <boost/variant.hpp>
39 #include <type_traits>
44 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
45 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
47 if (backendIds.empty())
52 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
55 for (const auto& backendId : backendIds)
57 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
60 if (invalidBackendIds)
62 if (!invalidBackendIds.value().empty())
64 invalidBackendIds.value() += ", ";
66 invalidBackendIds.value() += backendId;
73 } // anonymous namespace
75 namespace InferenceModelInternal
77 // This needs to go when the armnnCaffeParser, armnnTfParser and armnnTfLiteParser
78 // definitions of BindingPointInfo gets consolidated.
79 using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
81 using QuantizationParams = std::pair<float,int32_t>;
85 std::string m_ModelPath;
86 std::vector<std::string> m_InputBindings;
87 std::vector<armnn::TensorShape> m_InputShapes;
88 std::vector<std::string> m_OutputBindings;
89 std::vector<armnn::BackendId> m_ComputeDevices;
90 bool m_EnableProfiling;
93 bool m_VisualizePostOptimizationModel;
94 bool m_EnableFp16TurboMode;
97 : m_ComputeDevices{"CpuRef"}
98 , m_EnableProfiling(false)
100 , m_IsModelBinary(true)
101 , m_VisualizePostOptimizationModel(false)
102 , m_EnableFp16TurboMode(false)
106 } // namespace InferenceModelInternal
108 template <typename IParser>
109 struct CreateNetworkImpl
112 using Params = InferenceModelInternal::Params;
113 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
115 static armnn::INetworkPtr Create(const Params& params,
116 std::vector<BindingPointInfo>& inputBindings,
117 std::vector<BindingPointInfo>& outputBindings)
119 const std::string& modelPath = params.m_ModelPath;
121 // Create a network from a file on disk
122 auto parser(IParser::Create());
124 std::map<std::string, armnn::TensorShape> inputShapes;
125 if (!params.m_InputShapes.empty())
127 const size_t numInputShapes = params.m_InputShapes.size();
128 const size_t numInputBindings = params.m_InputBindings.size();
129 if (numInputShapes < numInputBindings)
131 throw armnn::Exception(boost::str(boost::format(
132 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
133 % numInputBindings % numInputShapes));
136 for (size_t i = 0; i < numInputShapes; i++)
138 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
142 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
143 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
146 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
147 // Handle text and binary input differently by calling the corresponding parser function
148 network = (params.m_IsModelBinary ?
149 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
150 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
153 for (const std::string& inputLayerName : params.m_InputBindings)
155 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
158 for (const std::string& outputLayerName : params.m_OutputBindings)
160 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
167 #if defined(ARMNN_SERIALIZER)
169 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
172 using IParser = armnnDeserializer::IDeserializer;
173 using Params = InferenceModelInternal::Params;
174 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
176 static armnn::INetworkPtr Create(const Params& params,
177 std::vector<BindingPointInfo>& inputBindings,
178 std::vector<BindingPointInfo>& outputBindings)
180 auto parser(IParser::Create());
181 BOOST_ASSERT(parser);
183 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
186 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
188 boost::system::error_code errorCode;
189 boost::filesystem::path pathToFile(params.m_ModelPath);
190 if (!boost::filesystem::exists(pathToFile, errorCode))
192 throw armnn::FileNotFoundException(boost::str(
193 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
196 CHECK_LOCATION().AsString()));
198 std::ifstream file(params.m_ModelPath, std::ios::binary);
200 network = parser->CreateNetworkFromBinary(file);
203 unsigned int subGraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
205 for (const std::string& inputLayerName : params.m_InputBindings)
207 armnnDeserializer::BindingPointInfo inputBinding =
208 parser->GetNetworkInputBindingInfo(subGraphId, inputLayerName);
209 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
212 for (const std::string& outputLayerName : params.m_OutputBindings)
214 armnnDeserializer::BindingPointInfo outputBinding =
215 parser->GetNetworkOutputBindingInfo(subGraphId, outputLayerName);
216 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
224 #if defined(ARMNN_TF_LITE_PARSER)
226 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
229 using IParser = armnnTfLiteParser::ITfLiteParser;
230 using Params = InferenceModelInternal::Params;
231 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
233 static armnn::INetworkPtr Create(const Params& params,
234 std::vector<BindingPointInfo>& inputBindings,
235 std::vector<BindingPointInfo>& outputBindings)
237 const std::string& modelPath = params.m_ModelPath;
239 // Create a network from a file on disk
240 auto parser(IParser::Create());
242 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
245 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
246 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
249 for (const std::string& inputLayerName : params.m_InputBindings)
251 BindingPointInfo inputBinding =
252 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
253 inputBindings.push_back(inputBinding);
256 for (const std::string& outputLayerName : params.m_OutputBindings)
258 BindingPointInfo outputBinding =
259 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
260 outputBindings.push_back(outputBinding);
268 #if defined(ARMNN_ONNX_PARSER)
270 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
273 using IParser = armnnOnnxParser::IOnnxParser;
274 using Params = InferenceModelInternal::Params;
275 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
277 static armnn::INetworkPtr Create(const Params& params,
278 std::vector<BindingPointInfo>& inputBindings,
279 std::vector<BindingPointInfo>& outputBindings)
281 const std::string& modelPath = params.m_ModelPath;
283 // Create a network from a file on disk
284 auto parser(IParser::Create());
286 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
289 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
290 network = (params.m_IsModelBinary ?
291 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
292 parser->CreateNetworkFromTextFile(modelPath.c_str()));
295 for (const std::string& inputLayerName : params.m_InputBindings)
297 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
298 inputBindings.push_back(inputBinding);
301 for (const std::string& outputLayerName : params.m_OutputBindings)
303 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
304 outputBindings.push_back(outputBinding);
312 template<typename TContainer>
313 inline armnn::InputTensors MakeInputTensors(
314 const std::vector<InferenceModelInternal::BindingPointInfo>& inputBindings,
315 const std::vector<TContainer>& inputDataContainers)
317 armnn::InputTensors inputTensors;
319 const size_t numInputs = inputBindings.size();
320 if (numInputs != inputDataContainers.size())
322 throw armnn::Exception(boost::str(boost::format("Number of inputs does not match number of "
323 "tensor data containers: %1% != %2%") % numInputs % inputDataContainers.size()));
326 for (size_t i = 0; i < numInputs; i++)
328 const InferenceModelInternal::BindingPointInfo& inputBinding = inputBindings[i];
329 const TContainer& inputData = inputDataContainers[i];
331 boost::apply_visitor([&](auto&& value)
333 if (value.size() != inputBinding.second.GetNumElements())
335 throw armnn::Exception("Input tensor has incorrect size");
338 armnn::ConstTensor inputTensor(inputBinding.second, value.data());
339 inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
347 template<typename TContainer>
348 inline armnn::OutputTensors MakeOutputTensors(
349 const std::vector<InferenceModelInternal::BindingPointInfo>& outputBindings,
350 std::vector<TContainer>& outputDataContainers)
352 armnn::OutputTensors outputTensors;
354 const size_t numOutputs = outputBindings.size();
355 if (numOutputs != outputDataContainers.size())
357 throw armnn::Exception(boost::str(boost::format("Number of outputs does not match number of "
358 "tensor data containers: %1% != %2%") % numOutputs % outputDataContainers.size()));
361 for (size_t i = 0; i < numOutputs; i++)
363 const InferenceModelInternal::BindingPointInfo& outputBinding = outputBindings[i];
364 TContainer& outputData = outputDataContainers[i];
366 boost::apply_visitor([&](auto&& value)
368 if (value.size() != outputBinding.second.GetNumElements())
370 throw armnn::Exception("Output tensor has incorrect size");
373 armnn::Tensor outputTensor(outputBinding.second, value.data());
374 outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor));
379 return outputTensors;
382 template <typename IParser, typename TDataType>
386 using DataType = TDataType;
387 using Params = InferenceModelInternal::Params;
388 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
389 using QuantizationParams = InferenceModelInternal::QuantizationParams;
390 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
392 struct CommandLineOptions
394 std::string m_ModelDir;
395 std::vector<std::string> m_ComputeDevices;
396 bool m_VisualizePostOptimizationModel;
397 bool m_EnableFp16TurboMode;
399 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
401 std::vector<armnn::BackendId> backendIds;
402 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
407 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
409 namespace po = boost::program_options;
411 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
413 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
414 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
417 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
418 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
419 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
420 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
421 multitoken(), backendsMessage.c_str())
422 ("visualize-optimized-model,v",
423 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
424 "Produce a dot file useful for visualizing the graph post optimization."
425 "The file will have the same name as the model with the .dot extention.")
426 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
427 "If this option is enabled FP32 layers, weights and biases will be converted "
428 "to FP16 where the backend supports it.");
431 InferenceModel(const Params& params, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
432 : m_EnableProfiling(params.m_EnableProfiling)
440 armnn::IRuntime::CreationOptions options;
441 options.m_EnableGpuProfiling = m_EnableProfiling;
442 m_Runtime = std::move(armnn::IRuntime::Create(options));
445 std::string invalidBackends;
446 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
448 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
451 armnn::INetworkPtr network =
452 CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
454 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
456 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
458 armnn::OptimizerOptions options;
459 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
461 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
464 throw armnn::Exception("Optimize returned nullptr");
468 if (params.m_VisualizePostOptimizationModel)
470 boost::filesystem::path filename = params.m_ModelPath;
471 filename.replace_extension("dot");
472 std::fstream file(filename.c_str(),file.out);
473 optNet->SerializeToDot(file);
478 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
479 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
482 if (ret == armnn::Status::Failure)
484 throw armnn::Exception("IRuntime::LoadNetwork failed");
488 void CheckInputIndexIsValid(unsigned int inputIndex) const
490 if (m_InputBindings.size() < inputIndex + 1)
492 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
496 void CheckOutputIndexIsValid(unsigned int outputIndex) const
498 if (m_OutputBindings.size() < outputIndex + 1)
500 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
504 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
506 CheckOutputIndexIsValid(outputIndex);
507 return m_OutputBindings[outputIndex].second.GetNumElements();
510 std::chrono::duration<double, std::milli> Run(
511 const std::vector<TContainer>& inputContainers,
512 std::vector<TContainer>& outputContainers)
514 for (unsigned int i = 0; i < outputContainers.size(); ++i)
516 const unsigned int expectedOutputDataSize = GetOutputSize(i);
518 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
520 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
521 if (actualOutputDataSize < expectedOutputDataSize)
523 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
524 throw armnn::Exception(
525 boost::str(boost::format("Not enough data for output #%1%: expected "
526 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
529 outputContainers[i]);
532 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
535 profiler->EnableProfiling(m_EnableProfiling);
538 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
539 const auto start_time = GetCurrentTime();
541 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
542 MakeInputTensors(inputContainers),
543 MakeOutputTensors(outputContainers));
545 const auto end_time = GetCurrentTime();
547 // if profiling is enabled print out the results
548 if (profiler && profiler->IsProfilingEnabled())
550 profiler->Print(std::cout);
553 if (ret == armnn::Status::Failure)
555 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
559 return std::chrono::duration<double, std::milli>(end_time - start_time);
563 const BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
565 CheckInputIndexIsValid(inputIndex);
566 return m_InputBindings[inputIndex];
569 const std::vector<BindingPointInfo>& GetInputBindingInfos() const
571 return m_InputBindings;
574 const BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
576 CheckOutputIndexIsValid(outputIndex);
577 return m_OutputBindings[outputIndex];
580 const std::vector<BindingPointInfo>& GetOutputBindingInfos() const
582 return m_OutputBindings;
585 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
587 CheckOutputIndexIsValid(outputIndex);
588 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
589 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
592 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
594 CheckInputIndexIsValid(inputIndex);
595 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
596 m_InputBindings[inputIndex].second.GetQuantizationOffset());
599 std::vector<QuantizationParams> GetAllQuantizationParams() const
601 std::vector<QuantizationParams> quantizationParams;
602 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
604 quantizationParams.push_back(GetQuantizationParams(i));
606 return quantizationParams;
610 armnn::NetworkId m_NetworkIdentifier;
611 std::shared_ptr<armnn::IRuntime> m_Runtime;
613 std::vector<InferenceModelInternal::BindingPointInfo> m_InputBindings;
614 std::vector<InferenceModelInternal::BindingPointInfo> m_OutputBindings;
615 bool m_EnableProfiling;
617 template<typename TContainer>
618 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
620 return ::MakeInputTensors(m_InputBindings, inputDataContainers);
623 template<typename TContainer>
624 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
626 return ::MakeOutputTensors(m_OutputBindings, outputDataContainers);
629 std::chrono::high_resolution_clock::time_point GetCurrentTime()
631 return std::chrono::high_resolution_clock::now();
634 std::chrono::duration<double, std::milli> GetTimeDuration(
635 std::chrono::high_resolution_clock::time_point& start_time,
636 std::chrono::high_resolution_clock::time_point& end_time)
638 return std::chrono::duration<double, std::milli>(end_time - start_time);