IVGCVSW-4487 Remove boost::filesystem
[platform/upstream/armnn.git] / tests / InferenceModel.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include <armnn/ArmNN.hpp>
9 #include <armnn/Logging.hpp>
10 #include <armnn/utility/Timer.hpp>
11 #include <armnn/BackendRegistry.hpp>
12 #include <armnn/utility/Assert.hpp>
13
14 #if defined(ARMNN_SERIALIZER)
15 #include "armnnDeserializer/IDeserializer.hpp"
16 #endif
17 #if defined(ARMNN_TF_LITE_PARSER)
18 #include <armnnTfLiteParser/ITfLiteParser.hpp>
19 #endif
20 #if defined(ARMNN_ONNX_PARSER)
21 #include <armnnOnnxParser/IOnnxParser.hpp>
22 #endif
23
24 #include <Filesystem.hpp>
25 #include <HeapProfiling.hpp>
26 #include <TensorIOUtils.hpp>
27
28 #include "armnn/utility/StringUtils.hpp"
29 #include <boost/exception/exception.hpp>
30 #include <boost/exception/diagnostic_information.hpp>
31 #include <boost/format.hpp>
32 #include <boost/program_options.hpp>
33 #include <boost/variant.hpp>
34
35 #include <algorithm>
36 #include <iterator>
37 #include <fstream>
38 #include <map>
39 #include <string>
40 #include <vector>
41 #include <type_traits>
42
43 namespace
44 {
45
46 inline bool CheckRequestedBackendsAreValid(const std::vector<armnn::BackendId>& backendIds,
47                                            armnn::Optional<std::string&> invalidBackendIds = armnn::EmptyOptional())
48 {
49     if (backendIds.empty())
50     {
51         return false;
52     }
53
54     armnn::BackendIdSet validBackendIds = armnn::BackendRegistryInstance().GetBackendIds();
55
56     bool allValid = true;
57     for (const auto& backendId : backendIds)
58     {
59         if (std::find(validBackendIds.begin(), validBackendIds.end(), backendId) == validBackendIds.end())
60         {
61             allValid = false;
62             if (invalidBackendIds)
63             {
64                 if (!invalidBackendIds.value().empty())
65                 {
66                     invalidBackendIds.value() += ", ";
67                 }
68                 invalidBackendIds.value() += backendId;
69             }
70         }
71     }
72     return allValid;
73 }
74
75 } // anonymous namespace
76
77 namespace InferenceModelInternal
78 {
79 using BindingPointInfo = armnn::BindingPointInfo;
80
81 using QuantizationParams = std::pair<float,int32_t>;
82
83 struct Params
84 {
85     std::string                     m_ModelPath;
86     std::vector<std::string>        m_InputBindings;
87     std::vector<armnn::TensorShape> m_InputShapes;
88     std::vector<std::string>        m_OutputBindings;
89     std::vector<armnn::BackendId>   m_ComputeDevices;
90     std::string                     m_DynamicBackendsPath;
91     size_t                          m_SubgraphId;
92     bool                            m_IsModelBinary;
93     bool                            m_VisualizePostOptimizationModel;
94     bool                            m_EnableFp16TurboMode;
95     bool                            m_EnableBf16TurboMode;
96     bool                            m_PrintIntermediateLayers;
97     bool                            m_ParseUnsupported;
98
99     Params()
100         : m_ComputeDevices{}
101         , m_SubgraphId(0)
102         , m_IsModelBinary(true)
103         , m_VisualizePostOptimizationModel(false)
104         , m_EnableFp16TurboMode(false)
105         , m_EnableBf16TurboMode(false)
106         , m_PrintIntermediateLayers(false)
107         , m_ParseUnsupported(false)
108     {}
109 };
110
111 } // namespace InferenceModelInternal
112
113 template <typename IParser>
114 struct CreateNetworkImpl
115 {
116 public:
117     using Params = InferenceModelInternal::Params;
118
119     static armnn::INetworkPtr Create(const Params& params,
120                                      std::vector<armnn::BindingPointInfo>& inputBindings,
121                                      std::vector<armnn::BindingPointInfo>& outputBindings)
122     {
123         const std::string& modelPath = params.m_ModelPath;
124
125         // Create a network from a file on disk
126         auto parser(IParser::Create());
127
128         std::map<std::string, armnn::TensorShape> inputShapes;
129         if (!params.m_InputShapes.empty())
130         {
131             const size_t numInputShapes   = params.m_InputShapes.size();
132             const size_t numInputBindings = params.m_InputBindings.size();
133             if (numInputShapes < numInputBindings)
134             {
135                 throw armnn::Exception(boost::str(boost::format(
136                     "Not every input has its tensor shape specified: expected=%1%, got=%2%")
137                     % numInputBindings % numInputShapes));
138             }
139
140             for (size_t i = 0; i < numInputShapes; i++)
141             {
142                 inputShapes[params.m_InputBindings[i]] = params.m_InputShapes[i];
143             }
144         }
145
146         std::vector<std::string> requestedOutputs = params.m_OutputBindings;
147         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
148
149         {
150             ARMNN_SCOPED_HEAP_PROFILING("Parsing");
151             // Handle text and binary input differently by calling the corresponding parser function
152             network = (params.m_IsModelBinary ?
153                 parser->CreateNetworkFromBinaryFile(modelPath.c_str(), inputShapes, requestedOutputs) :
154                 parser->CreateNetworkFromTextFile(modelPath.c_str(), inputShapes, requestedOutputs));
155         }
156
157         for (const std::string& inputLayerName : params.m_InputBindings)
158         {
159             inputBindings.push_back(parser->GetNetworkInputBindingInfo(inputLayerName));
160         }
161
162         for (const std::string& outputLayerName : params.m_OutputBindings)
163         {
164             outputBindings.push_back(parser->GetNetworkOutputBindingInfo(outputLayerName));
165         }
166
167         return network;
168     }
169 };
170
171 #if defined(ARMNN_SERIALIZER)
172 template <>
173 struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
174 {
175 public:
176     using IParser          = armnnDeserializer::IDeserializer;
177     using Params           = InferenceModelInternal::Params;
178
179     static armnn::INetworkPtr Create(const Params& params,
180                                      std::vector<armnn::BindingPointInfo>& inputBindings,
181                                      std::vector<armnn::BindingPointInfo>& outputBindings)
182     {
183         auto parser(IParser::Create());
184         ARMNN_ASSERT(parser);
185
186         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
187
188         {
189             ARMNN_SCOPED_HEAP_PROFILING("Parsing");
190
191             std::error_code errorCode;
192             fs::path pathToFile(params.m_ModelPath);
193             if (!fs::exists(pathToFile, errorCode))
194             {
195                 throw armnn::FileNotFoundException(boost::str(
196                                                    boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
197                                                    params.m_ModelPath %
198                                                    errorCode %
199                                                    CHECK_LOCATION().AsString()));
200             }
201             std::ifstream file(params.m_ModelPath, std::ios::binary);
202
203             network = parser->CreateNetworkFromBinary(file);
204         }
205
206         unsigned int subgraphId = boost::numeric_cast<unsigned int>(params.m_SubgraphId);
207
208         for (const std::string& inputLayerName : params.m_InputBindings)
209         {
210             armnnDeserializer::BindingPointInfo inputBinding =
211                 parser->GetNetworkInputBindingInfo(subgraphId, inputLayerName);
212             inputBindings.push_back(std::make_pair(inputBinding.m_BindingId, inputBinding.m_TensorInfo));
213         }
214
215         for (const std::string& outputLayerName : params.m_OutputBindings)
216         {
217             armnnDeserializer::BindingPointInfo outputBinding =
218                 parser->GetNetworkOutputBindingInfo(subgraphId, outputLayerName);
219             outputBindings.push_back(std::make_pair(outputBinding.m_BindingId, outputBinding.m_TensorInfo));
220         }
221
222         return network;
223     }
224 };
225 #endif
226
227 #if defined(ARMNN_TF_LITE_PARSER)
228 template <>
229 struct CreateNetworkImpl<armnnTfLiteParser::ITfLiteParser>
230 {
231 public:
232     using IParser = armnnTfLiteParser::ITfLiteParser;
233     using Params = InferenceModelInternal::Params;
234
235     static armnn::INetworkPtr Create(const Params& params,
236                                      std::vector<armnn::BindingPointInfo>& inputBindings,
237                                      std::vector<armnn::BindingPointInfo>& outputBindings)
238     {
239         const std::string& modelPath = params.m_ModelPath;
240
241         // Create a network from a file on disk
242         IParser::TfLiteParserOptions options;
243         options.m_StandInLayerForUnsupported = params.m_ParseUnsupported;
244         auto parser(IParser::Create(options));
245
246         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
247
248         {
249             ARMNN_SCOPED_HEAP_PROFILING("Parsing");
250             network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
251         }
252
253         for (const std::string& inputLayerName : params.m_InputBindings)
254         {
255             armnn::BindingPointInfo inputBinding =
256                 parser->GetNetworkInputBindingInfo(params.m_SubgraphId, inputLayerName);
257             inputBindings.push_back(inputBinding);
258         }
259
260         for (const std::string& outputLayerName : params.m_OutputBindings)
261         {
262             armnn::BindingPointInfo outputBinding =
263                 parser->GetNetworkOutputBindingInfo(params.m_SubgraphId, outputLayerName);
264             outputBindings.push_back(outputBinding);
265         }
266
267         return network;
268     }
269 };
270 #endif
271
272 #if defined(ARMNN_ONNX_PARSER)
273 template <>
274 struct CreateNetworkImpl<armnnOnnxParser::IOnnxParser>
275 {
276 public:
277     using IParser = armnnOnnxParser::IOnnxParser;
278     using Params = InferenceModelInternal::Params;
279     using BindingPointInfo = InferenceModelInternal::BindingPointInfo;
280
281     static armnn::INetworkPtr Create(const Params& params,
282                                      std::vector<BindingPointInfo>& inputBindings,
283                                      std::vector<BindingPointInfo>& outputBindings)
284     {
285         const std::string& modelPath = params.m_ModelPath;
286
287         // Create a network from a file on disk
288         auto parser(IParser::Create());
289
290         armnn::INetworkPtr network{nullptr, [](armnn::INetwork *){}};
291
292         {
293             ARMNN_SCOPED_HEAP_PROFILING("Parsing");
294             network = (params.m_IsModelBinary ?
295                 parser->CreateNetworkFromBinaryFile(modelPath.c_str()) :
296                 parser->CreateNetworkFromTextFile(modelPath.c_str()));
297         }
298
299         for (const std::string& inputLayerName : params.m_InputBindings)
300         {
301             BindingPointInfo inputBinding = parser->GetNetworkInputBindingInfo(inputLayerName);
302             inputBindings.push_back(inputBinding);
303         }
304
305         for (const std::string& outputLayerName : params.m_OutputBindings)
306         {
307             BindingPointInfo outputBinding = parser->GetNetworkOutputBindingInfo(outputLayerName);
308             outputBindings.push_back(outputBinding);
309         }
310
311         return network;
312     }
313 };
314 #endif
315
316
317
318 template <typename IParser, typename TDataType>
319 class InferenceModel
320 {
321 public:
322     using DataType           = TDataType;
323     using Params             = InferenceModelInternal::Params;
324     using QuantizationParams = InferenceModelInternal::QuantizationParams;
325     using TContainer         = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
326
327     struct CommandLineOptions
328     {
329         std::string m_ModelDir;
330         std::vector<std::string> m_ComputeDevices;
331         std::string m_DynamicBackendsPath;
332         bool m_VisualizePostOptimizationModel;
333         bool m_EnableFp16TurboMode;
334         bool m_EnableBf16TurboMode;
335         std::string m_Labels;
336
337         std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
338         {
339             std::vector<armnn::BackendId> backendIds;
340             std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
341             return backendIds;
342         }
343     };
344
345     static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
346     {
347         namespace po = boost::program_options;
348
349         const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
350
351         const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
352                                           + armnn::BackendRegistryInstance().GetBackendIdsAsString();
353
354         desc.add_options()
355             ("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
356                 "Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
357             ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
358                 default_value(defaultComputes, armnn::stringUtils::StringConcat(defaultComputes, ", "))->
359                 multitoken(), backendsMessage.c_str())
360             ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
361                 "Path where to load any available dynamic backend from. "
362                 "If left empty (the default), dynamic backends will not be used.")
363             ("labels,l", po::value<std::string>(&options.m_Labels),
364                 "Text file containing one image filename - correct label pair per line, "
365                 "used to test the accuracy of the network.")
366             ("visualize-optimized-model,v",
367                 po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
368              "Produce a dot file useful for visualizing the graph post optimization."
369                 "The file will have the same name as the model with the .dot extention.")
370             ("fp16-turbo-mode", po::value<bool>(&options.m_EnableFp16TurboMode)->default_value(false),
371                 "If this option is enabled FP32 layers, weights and biases will be converted "
372                 "to FP16 where the backend supports it.")
373             ("bf16-turbo-mode", po::value<bool>(&options.m_EnableBf16TurboMode)->default_value(false),
374                 "If this option is enabled FP32 layers, weights and biases will be converted "
375                 "to BF16 where the backend supports it.");
376     }
377
378     InferenceModel(const Params& params,
379                    bool enableProfiling,
380                    const std::string& dynamicBackendsPath,
381                    const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
382         : m_EnableProfiling(enableProfiling)
383         , m_DynamicBackendsPath(dynamicBackendsPath)
384     {
385         if (runtime)
386         {
387             m_Runtime = runtime;
388         }
389         else
390         {
391             armnn::IRuntime::CreationOptions options;
392             options.m_EnableGpuProfiling = m_EnableProfiling;
393             options.m_DynamicBackendsPath = m_DynamicBackendsPath;
394             m_Runtime = std::move(armnn::IRuntime::Create(options));
395         }
396
397         std::string invalidBackends;
398         if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
399         {
400             throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
401         }
402
403         const auto parsing_start_time = armnn::GetTimeNow();
404         armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
405
406         ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2)
407                         << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n";
408
409         armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
410         {
411             ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
412
413             armnn::OptimizerOptions options;
414             options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
415             options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode;
416             options.m_Debug = params.m_PrintIntermediateLayers;
417
418             const auto optimization_start_time = armnn::GetTimeNow();
419             optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
420
421             ARMNN_LOG(info) << "Optimization time: " << std::setprecision(2)
422                             << std::fixed << armnn::GetTimeDuration(optimization_start_time).count() << " ms\n";
423
424             if (!optNet)
425             {
426                 throw armnn::Exception("Optimize returned nullptr");
427             }
428         }
429
430         if (params.m_VisualizePostOptimizationModel)
431         {
432             fs::path filename = params.m_ModelPath;
433             filename.replace_extension("dot");
434             std::fstream file(filename.c_str(), std::ios_base::out);
435             optNet->SerializeToDot(file);
436         }
437
438         armnn::Status ret;
439         {
440             ARMNN_SCOPED_HEAP_PROFILING("LoadNetwork");
441             ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, std::move(optNet));
442         }
443
444         if (ret == armnn::Status::Failure)
445         {
446             throw armnn::Exception("IRuntime::LoadNetwork failed");
447         }
448     }
449
450     void CheckInputIndexIsValid(unsigned int inputIndex) const
451     {
452         if (m_InputBindings.size() < inputIndex + 1)
453         {
454             throw armnn::Exception(boost::str(boost::format("Input index out of range: %1%") % inputIndex));
455         }
456     }
457
458     void CheckOutputIndexIsValid(unsigned int outputIndex) const
459     {
460         if (m_OutputBindings.size() < outputIndex + 1)
461         {
462             throw armnn::Exception(boost::str(boost::format("Output index out of range: %1%") % outputIndex));
463         }
464     }
465
466     unsigned int GetInputSize(unsigned int inputIndex = 0u) const
467     {
468         CheckInputIndexIsValid(inputIndex);
469         return m_InputBindings[inputIndex].second.GetNumElements();
470     }
471
472     unsigned int GetOutputSize(unsigned int outputIndex = 0u) const
473     {
474         CheckOutputIndexIsValid(outputIndex);
475         return m_OutputBindings[outputIndex].second.GetNumElements();
476     }
477
478     std::chrono::duration<double, std::milli> Run(
479             const std::vector<TContainer>& inputContainers,
480             std::vector<TContainer>& outputContainers)
481     {
482         for (unsigned int i = 0; i < outputContainers.size(); ++i)
483         {
484             const unsigned int expectedOutputDataSize = GetOutputSize(i);
485
486             boost::apply_visitor([expectedOutputDataSize, i](auto&& value)
487             {
488                 const unsigned int actualOutputDataSize   = boost::numeric_cast<unsigned int>(value.size());
489                 if (actualOutputDataSize < expectedOutputDataSize)
490                 {
491                     unsigned int outputIndex = boost::numeric_cast<unsigned int>(i);
492                     throw armnn::Exception(
493                             boost::str(boost::format("Not enough data for output #%1%: expected "
494                             "%2% elements, got %3%") % outputIndex % expectedOutputDataSize % actualOutputDataSize));
495                 }
496             },
497             outputContainers[i]);
498         }
499
500         std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkIdentifier);
501         if (profiler)
502         {
503             profiler->EnableProfiling(m_EnableProfiling);
504         }
505
506         // Start timer to record inference time in EnqueueWorkload (in milliseconds)
507         const auto start_time = armnn::GetTimeNow();
508
509         armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier,
510                                                        MakeInputTensors(inputContainers),
511                                                        MakeOutputTensors(outputContainers));
512
513         const auto duration = armnn::GetTimeDuration(start_time);
514
515         // if profiling is enabled print out the results
516         if (profiler && profiler->IsProfilingEnabled())
517         {
518             profiler->Print(std::cout);
519         }
520
521         if (ret == armnn::Status::Failure)
522         {
523             throw armnn::Exception("IRuntime::EnqueueWorkload failed");
524         }
525         else
526         {
527             return duration;
528         }
529     }
530
531     const armnn::BindingPointInfo& GetInputBindingInfo(unsigned int inputIndex = 0u) const
532     {
533         CheckInputIndexIsValid(inputIndex);
534         return m_InputBindings[inputIndex];
535     }
536
537     const std::vector<armnn::BindingPointInfo>& GetInputBindingInfos() const
538     {
539         return m_InputBindings;
540     }
541
542     const armnn::BindingPointInfo& GetOutputBindingInfo(unsigned int outputIndex = 0u) const
543     {
544         CheckOutputIndexIsValid(outputIndex);
545         return m_OutputBindings[outputIndex];
546     }
547
548     const std::vector<armnn::BindingPointInfo>& GetOutputBindingInfos() const
549     {
550         return m_OutputBindings;
551     }
552
553     QuantizationParams GetQuantizationParams(unsigned int outputIndex = 0u) const
554     {
555         CheckOutputIndexIsValid(outputIndex);
556         return std::make_pair(m_OutputBindings[outputIndex].second.GetQuantizationScale(),
557                               m_OutputBindings[outputIndex].second.GetQuantizationOffset());
558     }
559
560     QuantizationParams GetInputQuantizationParams(unsigned int inputIndex = 0u) const
561     {
562         CheckInputIndexIsValid(inputIndex);
563         return std::make_pair(m_InputBindings[inputIndex].second.GetQuantizationScale(),
564                               m_InputBindings[inputIndex].second.GetQuantizationOffset());
565     }
566
567     std::vector<QuantizationParams> GetAllQuantizationParams() const
568     {
569         std::vector<QuantizationParams> quantizationParams;
570         for (unsigned int i = 0u; i < m_OutputBindings.size(); i++)
571         {
572             quantizationParams.push_back(GetQuantizationParams(i));
573         }
574         return quantizationParams;
575     }
576
577 private:
578     armnn::NetworkId m_NetworkIdentifier;
579     std::shared_ptr<armnn::IRuntime> m_Runtime;
580
581     std::vector<armnn::BindingPointInfo> m_InputBindings;
582     std::vector<armnn::BindingPointInfo> m_OutputBindings;
583     bool m_EnableProfiling;
584     std::string m_DynamicBackendsPath;
585
586     template<typename TContainer>
587     armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
588     {
589         return armnnUtils::MakeInputTensors(m_InputBindings, inputDataContainers);
590     }
591
592     template<typename TContainer>
593     armnn::OutputTensors MakeOutputTensors(std::vector<TContainer>& outputDataContainers)
594     {
595         return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers);
596     }
597 };