2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <armnn/ArmNN.hpp>
8 #if defined(ARMNN_TF_LITE_PARSER)
9 #include <armnnTfLiteParser/ITfLiteParser.hpp>
12 #include <HeapProfiling.hpp>
13 #if defined(ARMNN_ONNX_PARSER)
14 #include <armnnOnnxParser/IOnnxParser.hpp>
17 #include <backendsCommon/BackendRegistry.hpp>
19 #include <boost/algorithm/string/join.hpp>
20 #include <boost/exception/exception.hpp>
21 #include <boost/exception/diagnostic_information.hpp>
22 #include <boost/log/trivial.hpp>
23 #include <boost/format.hpp>
24 #include <boost/program_options.hpp>
25 #include <boost/filesystem.hpp>
26 #include <boost/lexical_cast.hpp>
27 #include <boost/variant.hpp>
35 #include <type_traits>
40 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
41 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
43 if (backendIds.empty())
48 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
51 for (const auto& backendId : backendIds)
53 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
56 if (invalidBackendIds)
58 if (!invalidBackendIds.value().empty())
60 invalidBackendIds.value() += ", ";
62 invalidBackendIds.value() += backendId;
69 } // anonymous namespace
71 namespace InferenceModelInternal
73 // This needs to go when the armnnCaffeParser, armnnTfParser and armnnTfLiteParser
74 // definitions of BindingPointInfo gets consolidated.
75 using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
77 using QuantizationParams = std::pair<float,int32_t>;
81 std::string m_ModelPath;
82 std::vector<std::string> m_InputBindings;
83 std::vector<armnn::TensorShape> m_InputShapes;
84 std::vector<std::string> m_OutputBindings;
85 std::vector<armnn::BackendId> m_ComputeDevices;
86 bool m_EnableProfiling;
89 bool m_VisualizePostOptimizationModel;
90 bool m_EnableFp16TurboMode;
93 : m_ComputeDevices{"CpuRef"}
94 , m_EnableProfiling(false)
96 , m_IsModelBinary(true)
97 , m_VisualizePostOptimizationModel(false)
98 , m_EnableFp16TurboMode(false)
102 } // namespace InferenceModelInternal
104 template <typename IParser>
105 struct CreateNetworkImpl
108 using Params = InferenceModelInternal::Params;
109 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
111 static armnn::INetworkPtr Create(const Params& params,
112 std::vector<BindingPointInfo>& inputBindings,
113 std::vector<BindingPointInfo>& outputBindings)
115 const std::string& modelPath = params.m_ModelPath;
117 // Create a network from a file on disk
118 auto parser(IParser::Create());
120 std::map<std::string, armnn::TensorShape> inputShapes;
121 if (!params.m_InputShapes.empty())
123 const size_t numInputShapes = params.m_InputShapes.size();
124 const size_t numInputBindings = params.m_InputBindings.size();
125 if (numInputShapes < numInputBindings)
127 throw armnn::Exception(boost::str(boost::format(
128 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
129 % numInputBindings % numInputShapes));
132 for (size_t i = 0; i < numInputShapes; i++)
134 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
138 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
139 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
142 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
143 // Handle text and binary input differently by calling the corresponding parser function
144 network = (params.m_IsModelBinary ?
145 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
146 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
149 for (const std::string& inputLayerName : params.m_InputBindings)
151 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
154 for (const std::string& outputLayerName : params.m_OutputBindings)
156 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
163 #if defined(ARMNN_TF_LITE_PARSER)
165 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
168 using IParser = armnnTfLiteParser::ITfLiteParser;
169 using Params = InferenceModelInternal::Params;
170 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
172 static armnn::INetworkPtr Create(const Params& params,
173 std::vector<BindingPointInfo>& inputBindings,
174 std::vector<BindingPointInfo>& outputBindings)
176 const std::string& modelPath = params.m_ModelPath;
178 // Create a network from a file on disk
179 auto parser(IParser::Create());
181 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
184 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
185 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
188 for (const std::string& inputLayerName : params.m_InputBindings)
190 BindingPointInfo inputBinding =
191 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
192 inputBindings.push_back(inputBinding);
195 for (const std::string& outputLayerName : params.m_OutputBindings)
197 BindingPointInfo outputBinding =
198 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
199 outputBindings.push_back(outputBinding);
207 #if defined(ARMNN_ONNX_PARSER)
209 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
212 using IParser = armnnOnnxParser::IOnnxParser;
213 using Params = InferenceModelInternal::Params;
214 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
216 static armnn::INetworkPtr Create(const Params& params,
217 std::vector<BindingPointInfo>& inputBindings,
218 std::vector<BindingPointInfo>& outputBindings)
220 const std::string& modelPath = params.m_ModelPath;
222 // Create a network from a file on disk
223 auto parser(IParser::Create());
225 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
228 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
229 network = (params.m_IsModelBinary ?
230 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
231 parser->CreateNetworkFromTextFile(modelPath.c_str()));
234 for (const std::string& inputLayerName : params.m_InputBindings)
236 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
237 inputBindings.push_back(inputBinding);
240 for (const std::string& outputLayerName : params.m_OutputBindings)
242 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
243 outputBindings.push_back(outputBinding);
251 template<typename TContainer>
252 inline armnn::InputTensors MakeInputTensors(
253 const std::vector<InferenceModelInternal::BindingPointInfo>& inputBindings,
254 const std::vector<TContainer>& inputDataContainers)
256 armnn::InputTensors inputTensors;
258 const size_t numInputs = inputBindings.size();
259 if (numInputs != inputDataContainers.size())
261 throw armnn::Exception(boost::str(boost::format("Number of inputs does not match number of "
262 "tensor data containers: %1% != %2%") % numInputs % inputDataContainers.size()));
265 for (size_t i = 0; i < numInputs; i++)
267 const InferenceModelInternal::BindingPointInfo& inputBinding = inputBindings[i];
268 const TContainer& inputData = inputDataContainers[i];
270 boost::apply_visitor([&](auto&& value)
272 if (value.size() != inputBinding.second.GetNumElements())
274 throw armnn::Exception("Input tensor has incorrect size");
277 armnn::ConstTensor inputTensor(inputBinding.second, value.data());
278 inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor));
286 template<typename TContainer>
287 inline armnn::OutputTensors MakeOutputTensors(
288 const std::vector<InferenceModelInternal::BindingPointInfo>& outputBindings,
289 std::vector<TContainer>& outputDataContainers)
291 armnn::OutputTensors outputTensors;
293 const size_t numOutputs = outputBindings.size();
294 if (numOutputs != outputDataContainers.size())
296 throw armnn::Exception(boost::str(boost::format("Number of outputs does not match number of "
297 "tensor data containers: %1% != %2%") % numOutputs % outputDataContainers.size()));
300 for (size_t i = 0; i < numOutputs; i++)
302 const InferenceModelInternal::BindingPointInfo& outputBinding = outputBindings[i];
303 TContainer& outputData = outputDataContainers[i];
305 boost::apply_visitor([&](auto&& value)
307 if (value.size() != outputBinding.second.GetNumElements())
309 throw armnn::Exception("Output tensor has incorrect size");
312 armnn::Tensor outputTensor(outputBinding.second, value.data());
313 outputTensors.push_back(std::make_pair(outputBinding.first, outputTensor));
318 return outputTensors;
321 template <typename IParser, typename TDataType>
325 using DataType = TDataType;
326 using Params = InferenceModelInternal::Params;
327 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
328 using QuantizationParams = InferenceModelInternal::QuantizationParams;
329 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
331 struct CommandLineOptions
333 std::string m_ModelDir;
334 std::vector<std::string> m_ComputeDevices;
335 bool m_VisualizePostOptimizationModel;
336 bool m_EnableFp16TurboMode;
338 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
340 std::vector<armnn::BackendId> backendIds;
341 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
346 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
348 namespace po = boost::program_options;
350 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
352 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
353 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
356 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
357 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
358 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
359 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
360 multitoken(), backendsMessage.c_str())
361 ("visualize-optimized-model,v",
362 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
363 "Produce a dot file useful for visualizing the graph post optimization."
364 "The file will have the same name as the model with the .dot extention.")
365 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
366 "If this option is enabled FP32 layers, weights and biases will be converted "
367 "to FP16 where the backend supports it.");
370 InferenceModel(const Params& params, const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
371 : m_EnableProfiling(params.m_EnableProfiling)
379 armnn::IRuntime::CreationOptions options;
380 options.m_EnableGpuProfiling = m_EnableProfiling;
381 m_Runtime = std::move(armnn::IRuntime::Create(options));
384 std::string invalidBackends;
385 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
387 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
390 armnn::INetworkPtr network =
391 CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
393 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
395 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
397 armnn::OptimizerOptions options;
398 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
400 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
403 throw armnn::Exception("Optimize returned nullptr");
407 if (params.m_VisualizePostOptimizationModel)
409 boost::filesystem::path filename = params.m_ModelPath;
410 filename.replace_extension("dot");
411 std::fstream file(filename.c_str(),file.out);
412 optNet->SerializeToDot(file);
417 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
418 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
421 if (ret == armnn::Status::Failure)
423 throw armnn::Exception("IRuntime::LoadNetwork failed");
427 void CheckInputIndexIsValid(unsigned int inputIndex) const
429 if (m_InputBindings.size() < inputIndex + 1)
431 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
435 void CheckOutputIndexIsValid(unsigned int outputIndex) const
437 if (m_OutputBindings.size() < outputIndex + 1)
439 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
443 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
445 CheckOutputIndexIsValid(outputIndex);
446 return m_OutputBindings[outputIndex].second.GetNumElements();
449 void Run(const std::vector<TContainer>& inputContainers, std::vector<TContainer>& outputContainers)
451 for (unsigned int i = 0; i < outputContainers.size(); ++i)
453 const unsigned int expectedOutputDataSize = GetOutputSize(i);
455 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
457 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
458 if (actualOutputDataSize < expectedOutputDataSize)
460 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
461 throw armnn::Exception(
462 boost::str(boost::format("Not enough data for output #%1%: expected "
463 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
466 outputContainers[i]);
469 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
472 profiler->EnableProfiling(m_EnableProfiling);
475 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
476 MakeInputTensors(inputContainers),
477 MakeOutputTensors(outputContainers));
479 // if profiling is enabled print out the results
480 if (profiler && profiler->IsProfilingEnabled())
482 profiler->Print(std::cout);
485 if (ret == armnn::Status::Failure)
487 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
491 const BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
493 CheckInputIndexIsValid(inputIndex);
494 return m_InputBindings[inputIndex];
497 const std::vector<BindingPointInfo>& GetInputBindingInfos() const
499 return m_InputBindings;
502 const BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
504 CheckOutputIndexIsValid(outputIndex);
505 return m_OutputBindings[outputIndex];
508 const std::vector<BindingPointInfo>& GetOutputBindingInfos() const
510 return m_OutputBindings;
513 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
515 CheckOutputIndexIsValid(outputIndex);
516 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
517 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
520 std::vector<QuantizationParams> GetAllQuantizationParams() const
522 std::vector<QuantizationParams> quantizationParams;
523 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
525 quantizationParams.push_back(GetQuantizationParams(i));
527 return quantizationParams;
531 armnn::NetworkId m_NetworkIdentifier;
532 std::shared_ptr<armnn::IRuntime> m_Runtime;
534 std::vector<InferenceModelInternal::BindingPointInfo> m_InputBindings;
535 std::vector<InferenceModelInternal::BindingPointInfo> m_OutputBindings;
536 bool m_EnableProfiling;
538 template<typename TContainer>
539 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
541 return ::MakeInputTensors(m_InputBindings, inputDataContainers);
544 template<typename TContainer>
545 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
547 return ::MakeOutputTensors(m_OutputBindings, outputDataContainers);