Replace uses of non-standard C++:
[platform/upstream/armnn.git] / tests / NetworkExecutionUtils / NetworkExecutionUtils.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include <armnn/ArmNN.hpp>
6 #include <armnn/TypesUtils.hpp>
7
8 #if defined(ARMNN_SERIALIZER)
9 #include "armnnDeserializer/IDeserializer.hpp"
10 #endif
11 #if defined(ARMNN_CAFFE_PARSER)
12 #include "armnnCaffeParser/ICaffeParser.hpp"
13 #endif
14 #if defined(ARMNN_TF_PARSER)
15 #include "armnnTfParser/ITfParser.hpp"
16 #endif
17 #if defined(ARMNN_TF_LITE_PARSER)
18 #include "armnnTfLiteParser/ITfLiteParser.hpp"
19 #endif
20 #if defined(ARMNN_ONNX_PARSER)
21 #include "armnnOnnxParser/IOnnxParser.hpp"
22 #endif
23 #include "CsvReader.hpp"
24 #include "../InferenceTest.hpp"
25
26 #include <Logging.hpp>
27 #include <Profiling.hpp>
28
29 #include <boost/algorithm/string/trim.hpp>
30 #include <boost/algorithm/string/split.hpp>
31 #include <boost/algorithm/string/classification.hpp>
32 #include <boost/program_options.hpp>
33 #include <boost/variant.hpp>
34
35 #include <iostream>
36 #include <fstream>
37 #include <functional>
38 #include <future>
39 #include <algorithm>
40 #include <iterator>
41
42 namespace
43 {
44
45 // Configure boost::program_options for command-line parsing and validation.
46 namespace po = boost::program_options;
47
48 template<typename T, typename TParseElementFunc>
49 std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
50 {
51     std::vector<T> result;
52     // Processes line-by-line.
53     std::string line;
54     while (std::getline(stream, line))
55     {
56         std::vector<std::string> tokens;
57         try
58         {
59             // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call.
60             boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on);
61         }
62         catch (const std::exception& e)
63         {
64             BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what();
65             continue;
66         }
67         for (const std::string& token : tokens)
68         {
69             if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
70             {
71                 try
72                 {
73                     result.push_back(parseElementFunc(token));
74                 }
75                 catch (const std::exception&)
76                 {
77                     BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored.";
78                 }
79             }
80         }
81     }
82
83     return result;
84 }
85
86 bool CheckOption(const po::variables_map& vm,
87                  const char* option)
88 {
89     // Check that the given option is valid.
90     if (option == nullptr)
91     {
92         return false;
93     }
94
95     // Check whether 'option' is provided.
96     return vm.find(option) != vm.end();
97 }
98
99 void CheckOptionDependency(const po::variables_map& vm,
100                            const char* option,
101                            const char* required)
102 {
103     // Check that the given options are valid.
104     if (option == nullptr || required == nullptr)
105     {
106         throw po::error("Invalid option to check dependency for");
107     }
108
109     // Check that if 'option' is provided, 'required' is also provided.
110     if (CheckOption(vm, option) && !vm[option].defaulted())
111     {
112         if (CheckOption(vm, required) == 0 || vm[required].defaulted())
113         {
114             throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
115         }
116     }
117 }
118
119 void CheckOptionDependencies(const po::variables_map& vm)
120 {
121     CheckOptionDependency(vm, "model-path", "model-format");
122     CheckOptionDependency(vm, "model-path", "input-name");
123     CheckOptionDependency(vm, "model-path", "input-tensor-data");
124     CheckOptionDependency(vm, "model-path", "output-name");
125     CheckOptionDependency(vm, "input-tensor-shape", "model-path");
126 }
127
128 template<armnn::DataType NonQuantizedType>
129 auto ParseDataArray(std::istream & stream);
130
131 template<armnn::DataType QuantizedType>
132 auto ParseDataArray(std::istream& stream,
133                     const float& quantizationScale,
134                     const int32_t& quantizationOffset);
135
136 template<>
137 auto ParseDataArray<armnn::DataType::Float32>(std::istream & stream)
138 {
139     return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
140 }
141
142 template<>
143 auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
144 {
145     return ParseArrayImpl<int>(stream, [](const std::string & s) { return std::stoi(s); });
146 }
147
148 template<>
149 auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream)
150 {
151     return ParseArrayImpl<uint8_t>(stream,
152                                    [](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
153 }
154
155 template<>
156 auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
157                                                       const float& quantizationScale,
158                                                       const int32_t& quantizationOffset)
159 {
160     return ParseArrayImpl<uint8_t>(stream,
161                                    [&quantizationScale, &quantizationOffset](const std::string & s)
162                                    {
163                                        return boost::numeric_cast<uint8_t>(
164                                            armnn::Quantize<uint8_t>(std::stof(s),
165                                                                      quantizationScale,
166                                                                      quantizationOffset));
167                                    });
168 }
169 std::vector<unsigned int> ParseArray(std::istream& stream)
170 {
171     return ParseArrayImpl<unsigned int>(stream,
172         [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
173 }
174
175 std::vector<std::string> ParseStringList(const std::string & inputString, const char * delimiter)
176 {
177     std::stringstream stream(inputString);
178     return ParseArrayImpl<std::string>(stream, [](const std::string& s) { return boost::trim_copy(s); }, delimiter);
179 }
180
181 void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
182 {
183     // Mark the duplicate devices as 'Undefined'.
184     for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
185     {
186         for (auto j = std::next(i); j != computeDevices.end(); ++j)
187         {
188             if (*j == *i)
189             {
190                 *j = armnn::Compute::Undefined;
191             }
192         }
193     }
194
195     // Remove 'Undefined' devices.
196     computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
197                          computeDevices.end());
198 }
199
200 struct TensorPrinter : public boost::static_visitor<>
201 {
202     TensorPrinter(const std::string& binding, const armnn::TensorInfo& info, const std::string& outputTensorFile)
203         : m_OutputBinding(binding)
204         , m_Scale(info.GetQuantizationScale())
205         , m_Offset(info.GetQuantizationOffset())
206         , m_OutputTensorFile(outputTensorFile)
207     {}
208
209     void operator()(const std::vector<float>& values)
210     {
211         ForEachValue(values, [](float value)
212             {
213                 printf("%f ", value);
214             });
215         WriteToFile(values);
216     }
217
218     void operator()(const std::vector<uint8_t>& values)
219     {
220         auto& scale = m_Scale;
221         auto& offset = m_Offset;
222         std::vector<float> dequantizedValues;
223         ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
224             {
225                 auto dequantizedValue = armnn::Dequantize(value, scale, offset);
226                 printf("%f ", dequantizedValue);
227                 dequantizedValues.push_back(dequantizedValue);
228             });
229         WriteToFile(dequantizedValues);
230     }
231
232     void operator()(const std::vector<int>& values)
233     {
234         ForEachValue(values, [](int value)
235             {
236                 printf("%d ", value);
237             });
238         WriteToFile(values);
239     }
240
241 private:
242     template<typename Container, typename Delegate>
243     void ForEachValue(const Container& c, Delegate delegate)
244     {
245         std::cout << m_OutputBinding << ": ";
246         for (const auto& value : c)
247         {
248             delegate(value);
249         }
250         printf("\n");
251     }
252
253     template<typename T>
254     void WriteToFile(const std::vector<T>& values)
255     {
256         if (!m_OutputTensorFile.empty())
257         {
258             std::ofstream outputTensorFile;
259             outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
260             if (outputTensorFile.is_open())
261             {
262                 outputTensorFile << m_OutputBinding << ": ";
263                 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
264             }
265             else
266             {
267                 BOOST_LOG_TRIVIAL(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
268             }
269             outputTensorFile.close();
270         }
271     }
272
273     std::string m_OutputBinding;
274     float m_Scale=0.0f;
275     int m_Offset=0;
276     std::string m_OutputTensorFile;
277 };
278
279
280 } // namespace
281
282 template<typename TParser, typename TDataType>
283 int MainImpl(const char* modelPath,
284              bool isModelBinary,
285              const std::vector<armnn::BackendId>& computeDevices,
286              const std::string& dynamicBackendsPath,
287              const std::vector<string>& inputNames,
288              const std::vector<std::unique_ptr<armnn::TensorShape>>& inputTensorShapes,
289              const std::vector<string>& inputTensorDataFilePaths,
290              const std::vector<string>& inputTypes,
291              bool quantizeInput,
292              const std::vector<string>& outputTypes,
293              const std::vector<string>& outputNames,
294              const std::vector<string>& outputTensorFiles,
295              bool enableProfiling,
296              bool enableFp16TurboMode,
297              const double& thresholdTime,
298              bool printIntermediate,
299              const size_t subgraphId,
300              bool enableLayerDetails = false,
301              const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
302 {
303     using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
304
305     std::vector<TContainer> inputDataContainers;
306
307     try
308     {
309         // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
310         typename InferenceModel<TParser, TDataType>::Params params;
311         params.m_ModelPath = modelPath;
312         params.m_IsModelBinary = isModelBinary;
313         params.m_ComputeDevices = computeDevices;
314         params.m_DynamicBackendsPath = dynamicBackendsPath;
315         params.m_PrintIntermediateLayers = printIntermediate;
316         params.m_VisualizePostOptimizationModel = enableLayerDetails;
317
318         for(const std::string& inputName: inputNames)
319         {
320             params.m_InputBindings.push_back(inputName);
321         }
322
323         for(unsigned int i = 0; i < inputTensorShapes.size(); ++i)
324         {
325             params.m_InputShapes.push_back(*inputTensorShapes[i]);
326         }
327
328         for(const std::string& outputName: outputNames)
329         {
330             params.m_OutputBindings.push_back(outputName);
331         }
332
333         params.m_SubgraphId = subgraphId;
334         params.m_EnableFp16TurboMode = enableFp16TurboMode;
335         InferenceModel<TParser, TDataType> model(params, enableProfiling, dynamicBackendsPath, runtime);
336
337         for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i)
338         {
339             std::ifstream inputTensorFile(inputTensorDataFilePaths[i]);
340
341             if (inputTypes[i].compare("float") == 0)
342             {
343                 if (quantizeInput)
344                 {
345                     auto inputBinding = model.GetInputBindingInfo();
346                     inputDataContainers.push_back(
347                             ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile,
348                                                          inputBinding.second.GetQuantizationScale(),
349                                                          inputBinding.second.GetQuantizationOffset()));
350                 }
351                 else
352                 {
353                     inputDataContainers.push_back(
354                             ParseDataArray<armnn::DataType::Float32>(inputTensorFile));
355                 }
356             }
357             else if (inputTypes[i].compare("int") == 0)
358             {
359                 inputDataContainers.push_back(
360                     ParseDataArray<armnn::DataType::Signed32>(inputTensorFile));
361             }
362             else if (inputTypes[i].compare("qasymm8") == 0)
363             {
364                 inputDataContainers.push_back(
365                     ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile));
366             }
367             else
368             {
369                 BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << inputTypes[i] << "\". ";
370                 return EXIT_FAILURE;
371             }
372
373             inputTensorFile.close();
374         }
375
376         const size_t numOutputs = params.m_OutputBindings.size();
377         std::vector<TContainer> outputDataContainers;
378
379         for (unsigned int i = 0; i < numOutputs; ++i)
380         {
381             if (outputTypes[i].compare("float") == 0)
382             {
383                 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
384             }
385             else if (outputTypes[i].compare("int") == 0)
386             {
387                 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
388             }
389             else if (outputTypes[i].compare("qasymm8") == 0)
390             {
391                 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
392             }
393             else
394             {
395                 BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << outputTypes[i] << "\". ";
396                 return EXIT_FAILURE;
397             }
398         }
399
400         // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
401         auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
402
403         // Print output tensors
404         const auto& infosOut = model.GetOutputBindingInfos();
405         for (size_t i = 0; i < numOutputs; i++)
406         {
407             const armnn::TensorInfo& infoOut = infosOut[i].second;
408             auto outputTensorFile = outputTensorFiles.empty() ? "" : outputTensorFiles[i];
409             TensorPrinter printer(params.m_OutputBindings[i], infoOut, outputTensorFile);
410             boost::apply_visitor(printer, outputDataContainers[i]);
411         }
412
413         BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2)
414                                 << std::fixed << inference_duration.count() << " ms";
415
416         // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
417         if (thresholdTime != 0.0)
418         {
419             BOOST_LOG_TRIVIAL(info) << "Threshold time: " << std::setprecision(2)
420                                     << std::fixed << thresholdTime << " ms";
421             auto thresholdMinusInference = thresholdTime - inference_duration.count();
422             BOOST_LOG_TRIVIAL(info) << "Threshold time - Inference time: " << std::setprecision(2)
423                                     << std::fixed << thresholdMinusInference << " ms" << "\n";
424
425             if (thresholdMinusInference < 0)
426             {
427                 BOOST_LOG_TRIVIAL(fatal) << "Elapsed inference time is greater than provided threshold time.\n";
428                 return EXIT_FAILURE;
429             }
430         }
431
432
433     }
434     catch (armnn::Exception const& e)
435     {
436         BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what();
437         return EXIT_FAILURE;
438     }
439
440     return EXIT_SUCCESS;
441 }
442
443 // This will run a test
444 int RunTest(const std::string& format,
445             const std::string& inputTensorShapesStr,
446             const vector<armnn::BackendId>& computeDevice,
447             const std::string& dynamicBackendsPath,
448             const std::string& path,
449             const std::string& inputNames,
450             const std::string& inputTensorDataFilePaths,
451             const std::string& inputTypes,
452             bool quantizeInput,
453             const std::string& outputTypes,
454             const std::string& outputNames,
455             const std::string& outputTensorFiles,
456             bool enableProfiling,
457             bool enableFp16TurboMode,
458             const double& thresholdTime,
459             bool printIntermediate,
460             const size_t subgraphId,
461             bool enableLayerDetails = false,
462             const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
463 {
464     std::string modelFormat = boost::trim_copy(format);
465     std::string modelPath = boost::trim_copy(path);
466     std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
467     std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ";");
468     std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
469         inputTensorDataFilePaths, ",");
470     std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
471     std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
472     std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
473     std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
474
475     // Parse model binary flag from the model-format string we got from the command-line
476     bool isModelBinary;
477     if (modelFormat.find("bin") != std::string::npos)
478     {
479         isModelBinary = true;
480     }
481     else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
482     {
483         isModelBinary = false;
484     }
485     else
486     {
487         BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
488         return EXIT_FAILURE;
489     }
490
491     if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
492     {
493         BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
494         return EXIT_FAILURE;
495     }
496
497     if ((inputTensorDataFilePathsVector.size() != 0) &&
498         (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
499     {
500         BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
501         return EXIT_FAILURE;
502     }
503
504     if ((outputTensorFilesVector.size() != 0) &&
505         (outputTensorFilesVector.size() != outputNamesVector.size()))
506     {
507         BOOST_LOG_TRIVIAL(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
508         return EXIT_FAILURE;
509     }
510
511     if (inputTypesVector.size() == 0)
512     {
513         //Defaults the value of all inputs to "float"
514         inputTypesVector.assign(inputNamesVector.size(), "float");
515     }
516     else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
517     {
518         BOOST_LOG_TRIVIAL(fatal) << "input-name and input-type must have the same amount of elements.";
519         return EXIT_FAILURE;
520     }
521
522     if (outputTypesVector.size() == 0)
523     {
524         //Defaults the value of all outputs to "float"
525         outputTypesVector.assign(outputNamesVector.size(), "float");
526     }
527     else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
528     {
529         BOOST_LOG_TRIVIAL(fatal) << "output-name and output-type must have the same amount of elements.";
530         return EXIT_FAILURE;
531     }
532
533     // Parse input tensor shape from the string we got from the command-line.
534     std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
535
536     if (!inputTensorShapesVector.empty())
537     {
538         inputTensorShapes.reserve(inputTensorShapesVector.size());
539
540         for(const std::string& shape : inputTensorShapesVector)
541         {
542             std::stringstream ss(shape);
543             std::vector<unsigned int> dims = ParseArray(ss);
544
545             try
546             {
547                 // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
548                 inputTensorShapes.push_back(std::make_unique<armnn::TensorShape>(dims.size(), dims.data()));
549             }
550             catch (const armnn::InvalidArgumentException& e)
551             {
552                 BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
553                 return EXIT_FAILURE;
554             }
555         }
556     }
557
558     // Check that threshold time is not less than zero
559     if (thresholdTime < 0)
560     {
561         BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a commoand line argument is less than zero.";
562         return EXIT_FAILURE;
563     }
564
565     // Forward to implementation based on the parser type
566     if (modelFormat.find("armnn") != std::string::npos)
567     {
568 #if defined(ARMNN_SERIALIZER)
569     return MainImpl<armnnDeserializer::IDeserializer, float>(
570         modelPath.c_str(), isModelBinary, computeDevice,
571         dynamicBackendsPath, inputNamesVector, inputTensorShapes,
572         inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
573         outputTypesVector, outputNamesVector, outputTensorFilesVector, enableProfiling,
574         enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId, enableLayerDetails, runtime);
575 #else
576     BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
577     return EXIT_FAILURE;
578 #endif
579     }
580     else if (modelFormat.find("caffe") != std::string::npos)
581     {
582 #if defined(ARMNN_CAFFE_PARSER)
583         return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
584                                                                dynamicBackendsPath,
585                                                                inputNamesVector, inputTensorShapes,
586                                                                inputTensorDataFilePathsVector, inputTypesVector,
587                                                                quantizeInput, outputTypesVector, outputNamesVector,
588                                                                outputTensorFilesVector, enableProfiling,
589                                                                enableFp16TurboMode, thresholdTime,
590                                                                printIntermediate, subgraphId, enableLayerDetails,
591                                                                runtime);
592 #else
593         BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
594         return EXIT_FAILURE;
595 #endif
596     }
597     else if (modelFormat.find("onnx") != std::string::npos)
598 {
599 #if defined(ARMNN_ONNX_PARSER)
600     return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
601                                                          dynamicBackendsPath,
602                                                          inputNamesVector, inputTensorShapes,
603                                                          inputTensorDataFilePathsVector, inputTypesVector,
604                                                          quantizeInput, outputTypesVector, outputNamesVector,
605                                                          outputTensorFilesVector, enableProfiling, enableFp16TurboMode,
606                                                          thresholdTime,printIntermediate, subgraphId,
607                                                          enableLayerDetails, runtime);
608 #else
609     BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
610     return EXIT_FAILURE;
611 #endif
612     }
613     else if (modelFormat.find("tensorflow") != std::string::npos)
614     {
615 #if defined(ARMNN_TF_PARSER)
616         return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
617                                                          dynamicBackendsPath,
618                                                          inputNamesVector, inputTensorShapes,
619                                                          inputTensorDataFilePathsVector, inputTypesVector,
620                                                          quantizeInput, outputTypesVector, outputNamesVector,
621                                                          outputTensorFilesVector, enableProfiling, enableFp16TurboMode,
622                                                          thresholdTime,printIntermediate, subgraphId,
623                                                          enableLayerDetails, runtime);
624 #else
625         BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
626         return EXIT_FAILURE;
627 #endif
628     }
629     else if(modelFormat.find("tflite") != std::string::npos)
630     {
631 #if defined(ARMNN_TF_LITE_PARSER)
632         if (! isModelBinary)
633         {
634             BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
635               for tflite files";
636             return EXIT_FAILURE;
637         }
638         return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
639                                                                  dynamicBackendsPath,
640                                                                  inputNamesVector, inputTensorShapes,
641                                                                  inputTensorDataFilePathsVector, inputTypesVector,
642                                                                  quantizeInput, outputTypesVector, outputNamesVector,
643                                                                  outputTensorFilesVector, enableProfiling,
644                                                                  enableFp16TurboMode, thresholdTime, printIntermediate,
645                                                                  subgraphId, enableLayerDetails, runtime);
646 #else
647         BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
648             "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
649         return EXIT_FAILURE;
650 #endif
651     }
652     else
653     {
654         BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
655                                  "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
656         return EXIT_FAILURE;
657     }
658 }
659
660 int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
661                const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
662                const bool printIntermediate, bool enableLayerDetails = false)
663 {
664     std::string modelFormat;
665     std::string modelPath;
666     std::string inputNames;
667     std::string inputTensorShapes;
668     std::string inputTensorDataFilePaths;
669     std::string outputNames;
670     std::string inputTypes;
671     std::string outputTypes;
672     std::string dynamicBackendsPath;
673     std::string outputTensorFiles;
674
675     size_t subgraphId = 0;
676
677     const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
678                                       + std::string("Possible choices: ")
679                                       + armnn::BackendRegistryInstance().GetBackendIdsAsString();
680
681     po::options_description desc("Options");
682     try
683     {
684         desc.add_options()
685         ("model-format,f", po::value(&modelFormat),
686          "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
687          "tensorflow-text.")
688         ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
689          ".tflite, .onnx")
690         ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
691          backendsMessage.c_str())
692         ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
693          "Path where to load any available dynamic backend from. "
694          "If left empty (the default), dynamic backends will not be used.")
695         ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
696         ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
697          "executed. Defaults to 0.")
698         ("input-tensor-shape,s", po::value(&inputTensorShapes),
699          "The shape of the input tensors in the network as a flat array of integers separated by comma. "
700          "Several shapes can be passed separating them by semicolon. "
701          "This parameter is optional, depending on the network.")
702         ("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
703          "Path to files containing the input data as a flat array separated by whitespace. "
704          "Several paths can be passed separating them by comma.")
705         ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
706          "If unset, defaults to \"float\" for all defined inputs. "
707          "Accepted values (float, int or qasymm8).")
708         ("quantize-input,q",po::bool_switch()->default_value(false),
709          "If this option is enabled, all float inputs will be quantized to qasymm8. "
710          "If unset, default to not quantized. "
711          "Accepted values (true or false)")
712         ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
713          "If unset, defaults to \"float\" for all defined outputs. "
714          "Accepted values (float, int or qasymm8).")
715         ("output-name,o", po::value(&outputNames),
716          "Identifier of the output tensors in the network separated by comma.")
717         ("write-outputs-to-file,w", po::value(&outputTensorFiles),
718          "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
719          "If left empty (the default), the output tensors will not be written to a file.");
720     }
721     catch (const std::exception& e)
722     {
723         // Coverity points out that default_value(...) can throw a bad_lexical_cast,
724         // and that desc.add_options() can throw boost::io::too_few_args.
725         // They really won't in any of these cases.
726         BOOST_ASSERT_MSG(false, "Caught unexpected exception");
727         BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
728         return EXIT_FAILURE;
729     }
730
731     std::vector<const char*> clOptions;
732     clOptions.reserve(csvRow.values.size());
733     for (const std::string& value : csvRow.values)
734     {
735         clOptions.push_back(value.c_str());
736     }
737
738     po::variables_map vm;
739     try
740     {
741         po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
742
743         po::notify(vm);
744
745         CheckOptionDependencies(vm);
746     }
747     catch (const po::error& e)
748     {
749         std::cerr << e.what() << std::endl << std::endl;
750         std::cerr << desc << std::endl;
751         return EXIT_FAILURE;
752     }
753
754     // Get the value of the switch arguments.
755     bool quantizeInput = vm["quantize-input"].as<bool>();
756
757     // Get the preferred order of compute devices.
758     std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
759
760     // Remove duplicates from the list of compute devices.
761     RemoveDuplicateDevices(computeDevices);
762
763     // Check that the specified compute devices are valid.
764     std::string invalidBackends;
765     if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
766     {
767         BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: "
768                                  << invalidBackends;
769         return EXIT_FAILURE;
770     }
771
772     return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
773                    inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
774                    enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId,
775                    enableLayerDetails);
776 }