2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <armnn/ArmNN.hpp>
8 #if defined(ARMNN_SERIALIZER)
9 #include "armnnDeserializer/IDeserializer.hpp"
11 #if defined(ARMNN_TF_LITE_PARSER)
12 #include <armnnTfLiteParser/ITfLiteParser.hpp>
14 #if defined(ARMNN_ONNX_PARSER)
15 #include <armnnOnnxParser/IOnnxParser.hpp>
18 #include <HeapProfiling.hpp>
19 #include <TensorIOUtils.hpp>
21 #include <backendsCommon/BackendRegistry.hpp>
23 #include <boost/algorithm/string/join.hpp>
24 #include <boost/exception/exception.hpp>
25 #include <boost/exception/diagnostic_information.hpp>
26 #include <boost/log/trivial.hpp>
27 #include <boost/format.hpp>
28 #include <boost/program_options.hpp>
29 #include <boost/filesystem.hpp>
30 #include <boost/lexical_cast.hpp>
31 #include <boost/variant.hpp>
40 #include <type_traits>
45 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
46 armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
48 if (backendIds.empty())
53 armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
56 for (const auto& backendId : backendIds)
58 if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
61 if (invalidBackendIds)
63 if (!invalidBackendIds.value().empty())
65 invalidBackendIds.value() += ", ";
67 invalidBackendIds.value() += backendId;
74 } // anonymous namespace
76 namespace InferenceModelInternal
78 using BindingPointInfo = armnn::BindingPointInfo;
80 using QuantizationParams = std::pair<float,int32_t>;
84 std::string m_ModelPath;
85 std::vector<std::string> m_InputBindings;
86 std::vector<armnn::TensorShape> m_InputShapes;
87 std::vector<std::string> m_OutputBindings;
88 std::vector<armnn::BackendId> m_ComputeDevices;
91 bool m_VisualizePostOptimizationModel;
92 bool m_EnableFp16TurboMode;
95 : m_ComputeDevices{"CpuRef"}
97 , m_IsModelBinary(true)
98 , m_VisualizePostOptimizationModel(false)
99 , m_EnableFp16TurboMode(false)
103 } // namespace InferenceModelInternal
105 template <typename IParser>
106 struct CreateNetworkImpl
109 using Params = InferenceModelInternal::Params;
111 static armnn::INetworkPtr Create(const Params& params,
112 std::vector<armnn::BindingPointInfo>& inputBindings,
113 std::vector<armnn::BindingPointInfo>& outputBindings)
115 const std::string& modelPath = params.m_ModelPath;
117 // Create a network from a file on disk
118 auto parser(IParser::Create());
120 std::map<std::string, armnn::TensorShape> inputShapes;
121 if (!params.m_InputShapes.empty())
123 const size_t numInputShapes = params.m_InputShapes.size();
124 const size_t numInputBindings = params.m_InputBindings.size();
125 if (numInputShapes < numInputBindings)
127 throw armnn::Exception(boost::str(boost::format(
128 "Not every input has its tensor shape specified: expected=%1%, got=%2%")
129 % numInputBindings % numInputShapes));
132 for (size_t i = 0; i < numInputShapes; i++)
134 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
138 std::vector<std::string> requestedOutputs = params.m_OutputBindings;
139 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
142 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
143 // Handle text and binary input differently by calling the corresponding parser function
144 network = (params.m_IsModelBinary ?
145 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
146 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
149 for (const std::string& inputLayerName : params.m_InputBindings)
151 inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
154 for (const std::string& outputLayerName : params.m_OutputBindings)
156 outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
163 #if defined(ARMNN_SERIALIZER)
165 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
168 using IParser = armnnDeserializer::IDeserializer;
169 using Params = InferenceModelInternal::Params;
171 static armnn::INetworkPtr Create(const Params& params,
172 std::vector<armnn::BindingPointInfo>& inputBindings,
173 std::vector<armnn::BindingPointInfo>& outputBindings)
175 auto parser(IParser::Create());
176 BOOST_ASSERT(parser);
178 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
181 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
183 boost::system::error_code errorCode;
184 boost::filesystem::path pathToFile(params.m_ModelPath);
185 if (!boost::filesystem::exists(pathToFile, errorCode))
187 throw armnn::FileNotFoundException(boost::str(
188 boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
191 CHECK_LOCATION().AsString()));
193 std::ifstream file(params.m_ModelPath, std::ios::binary);
195 network = parser->CreateNetworkFromBinary(file);
198 unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
200 for (const std::string& inputLayerName : params.m_InputBindings)
202 armnnDeserializer::BindingPointInfo inputBinding =
203 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
204 inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
207 for (const std::string& outputLayerName : params.m_OutputBindings)
209 armnnDeserializer::BindingPointInfo outputBinding =
210 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
211 outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
219 #if defined(ARMNN_TF_LITE_PARSER)
221 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
224 using IParser = armnnTfLiteParser::ITfLiteParser;
225 using Params = InferenceModelInternal::Params;
227 static armnn::INetworkPtr Create(const Params& params,
228 std::vector<armnn::BindingPointInfo>& inputBindings,
229 std::vector<armnn::BindingPointInfo>& outputBindings)
231 const std::string& modelPath = params.m_ModelPath;
233 // Create a network from a file on disk
234 auto parser(IParser::Create());
236 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
239 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
240 network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
243 for (const std::string& inputLayerName : params.m_InputBindings)
245 armnn::BindingPointInfo inputBinding =
246 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
247 inputBindings.push_back(inputBinding);
250 for (const std::string& outputLayerName : params.m_OutputBindings)
252 armnn::BindingPointInfo outputBinding =
253 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
254 outputBindings.push_back(outputBinding);
262 #if defined(ARMNN_ONNX_PARSER)
264 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
267 using IParser = armnnOnnxParser::IOnnxParser;
268 using Params = InferenceModelInternal::Params;
269 using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
271 static armnn::INetworkPtr Create(const Params& params,
272 std::vector<BindingPointInfo>& inputBindings,
273 std::vector<BindingPointInfo>& outputBindings)
275 const std::string& modelPath = params.m_ModelPath;
277 // Create a network from a file on disk
278 auto parser(IParser::Create());
280 armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
283 ARMNN_SCOPED_HEAP_PROFILING("Parsing");
284 network = (params.m_IsModelBinary ?
285 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
286 parser->CreateNetworkFromTextFile(modelPath.c_str()));
289 for (const std::string& inputLayerName : params.m_InputBindings)
291 BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
292 inputBindings.push_back(inputBinding);
295 for (const std::string& outputLayerName : params.m_OutputBindings)
297 BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
298 outputBindings.push_back(outputBinding);
308 template <typename IParser, typename TDataType>
312 using DataType = TDataType;
313 using Params = InferenceModelInternal::Params;
314 using QuantizationParams = InferenceModelInternal::QuantizationParams;
315 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
317 struct CommandLineOptions
319 std::string m_ModelDir;
320 std::vector<std::string> m_ComputeDevices;
321 bool m_VisualizePostOptimizationModel;
322 bool m_EnableFp16TurboMode;
323 std::string m_Labels;
325 std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
327 std::vector<armnn::BackendId> backendIds;
328 std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
333 static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
335 namespace po = boost::program_options;
337 const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
339 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
340 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
343 ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
344 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
345 ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
346 default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
347 multitoken(), backendsMessage.c_str())
348 ("labels,l", po::value<std::string>(&options.m_Labels),
349 "Text file containing one image filename - correct label pair per line, "
350 "used to test the accuracy of the network.")
351 ("visualize-optimized-model,v",
352 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
353 "Produce a dot file useful for visualizing the graph post optimization."
354 "The file will have the same name as the model with the .dot extention.")
355 ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
356 "If this option is enabled FP32 layers, weights and biases will be converted "
357 "to FP16 where the backend supports it.");
360 InferenceModel(const Params& params,
361 bool enableProfiling,
362 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
363 : m_EnableProfiling(enableProfiling)
371 armnn::IRuntime::CreationOptions options;
372 options.m_EnableGpuProfiling = m_EnableProfiling;
373 m_Runtime = std::move(armnn::IRuntime::Create(options));
376 std::string invalidBackends;
377 if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
379 throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
382 armnn::INetworkPtr network =
383 CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
385 armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
387 ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
389 armnn::OptimizerOptions options;
390 options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
392 optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
395 throw armnn::Exception("Optimize returned nullptr");
399 if (params.m_VisualizePostOptimizationModel)
401 boost::filesystem::path filename = params.m_ModelPath;
402 filename.replace_extension("dot");
403 std::fstream file(filename.c_str(),file.out);
404 optNet->SerializeToDot(file);
409 ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
410 ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
413 if (ret == armnn::Status::Failure)
415 throw armnn::Exception("IRuntime::LoadNetwork failed");
419 void CheckInputIndexIsValid(unsigned int inputIndex) const
421 if (m_InputBindings.size() < inputIndex + 1)
423 throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
427 void CheckOutputIndexIsValid(unsigned int outputIndex) const
429 if (m_OutputBindings.size() < outputIndex + 1)
431 throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
435 unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
437 CheckOutputIndexIsValid(outputIndex);
438 return m_OutputBindings[outputIndex].second.GetNumElements();
441 std::chrono::duration<double, std::milli> Run(
442 const std::vector<TContainer>& inputContainers,
443 std::vector<TContainer>& outputContainers)
445 for (unsigned int i = 0; i < outputContainers.size(); ++i)
447 const unsigned int expectedOutputDataSize = GetOutputSize(i);
449 boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
451 const unsigned int actualOutputDataSize = boost::numeric_cast<unsigned int>(value.size());
452 if (actualOutputDataSize < expectedOutputDataSize)
454 unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
455 throw armnn::Exception(
456 boost::str(boost::format("Not enough data for output #%1%: expected "
457 "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
460 outputContainers[i]);
463 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
466 profiler->EnableProfiling(m_EnableProfiling);
469 // Start timer to record inference time in EnqueueWorkload (in milliseconds)
470 const auto start_time = GetCurrentTime();
472 armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
473 MakeInputTensors(inputContainers),
474 MakeOutputTensors(outputContainers));
476 const auto end_time = GetCurrentTime();
478 // if profiling is enabled print out the results
479 if (profiler && profiler->IsProfilingEnabled())
481 profiler->Print(std::cout);
484 if (ret == armnn::Status::Failure)
486 throw armnn::Exception("IRuntime::EnqueueWorkload failed");
490 return std::chrono::duration<double, std::milli>(end_time - start_time);
494 const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
496 CheckInputIndexIsValid(inputIndex);
497 return m_InputBindings[inputIndex];
500 const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
502 return m_InputBindings;
505 const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
507 CheckOutputIndexIsValid(outputIndex);
508 return m_OutputBindings[outputIndex];
511 const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
513 return m_OutputBindings;
516 QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
518 CheckOutputIndexIsValid(outputIndex);
519 return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
520 m_OutputBindings[outputIndex].second.GetQuantizationOffset());
523 QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
525 CheckInputIndexIsValid(inputIndex);
526 return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
527 m_InputBindings[inputIndex].second.GetQuantizationOffset());
530 std::vector<QuantizationParams> GetAllQuantizationParams() const
532 std::vector<QuantizationParams> quantizationParams;
533 for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
535 quantizationParams.push_back(GetQuantizationParams(i));
537 return quantizationParams;
541 armnn::NetworkId m_NetworkIdentifier;
542 std::shared_ptr<armnn::IRuntime> m_Runtime;
544 std::vector<armnn::BindingPointInfo> m_InputBindings;
545 std::vector<armnn::BindingPointInfo> m_OutputBindings;
546 bool m_EnableProfiling;
548 template<typename TContainer>
549 armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
551 return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
554 template<typename TContainer>
555 armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
557 return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
560 std::chrono::high_resolution_clock::time_point GetCurrentTime()
562 return std::chrono::high_resolution_clock::now();
565 std::chrono::duration<double, std::milli> GetTimeDuration(
566 std::chrono::high_resolution_clock::time_point& start_time,
567 std::chrono::high_resolution_clock::time_point& end_time)
569 return std::chrono::duration<double, std::milli>(end_time - start_time);