2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include "armnn/ArmNN.hpp"
7 #include <armnn/TypesUtils.hpp>
9 #if defined(ARMNN_CAFFE_PARSER)
10 #include "armnnCaffeParser/ICaffeParser.hpp"
12 #if defined(ARMNN_TF_PARSER)
13 #include "armnnTfParser/ITfParser.hpp"
15 #if defined(ARMNN_TF_LITE_PARSER)
16 #include "armnnTfLiteParser/ITfLiteParser.hpp"
18 #if defined(ARMNN_ONNX_PARSER)
19 #include "armnnOnnxParser/IOnnxParser.hpp"
21 #include "CsvReader.hpp"
22 #include "../InferenceTest.hpp"
24 #include <Logging.hpp>
25 #include <Profiling.hpp>
27 #include <boost/algorithm/string/trim.hpp>
28 #include <boost/algorithm/string/split.hpp>
29 #include <boost/algorithm/string/classification.hpp>
30 #include <boost/program_options.hpp>
42 // Configure boost::program_options for command-line parsing and validation.
43 namespace po = boost::program_options;
45 template<typename T, typename TParseElementFunc>
46 std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc)
48 std::vector<T> result;
49 // Processes line-by-line.
51 while (std::getline(stream, line))
53 std::vector<std::string> tokens;
56 // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call.
57 boost::split(tokens, line, boost::algorithm::is_any_of("\t ,;:"), boost::token_compress_on);
59 catch (const std::exception& e)
61 BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what();
64 for (const std::string& token : tokens)
66 if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
70 result.push_back(parseElementFunc(token));
72 catch (const std::exception&)
74 BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored.";
83 bool CheckOption(const po::variables_map& vm,
86 // Check that the given option is valid.
87 if (option == nullptr)
92 // Check whether 'option' is provided.
93 return vm.find(option) != vm.end();
96 void CheckOptionDependency(const po::variables_map& vm,
100 // Check that the given options are valid.
101 if (option == nullptr || required == nullptr)
103 throw po::error("Invalid option to check dependency for");
106 // Check that if 'option' is provided, 'required' is also provided.
107 if (CheckOption(vm, option) && !vm[option].defaulted())
109 if (CheckOption(vm, required) == 0 || vm[required].defaulted())
111 throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
116 void CheckOptionDependencies(const po::variables_map& vm)
118 CheckOptionDependency(vm, "model-path", "model-format");
119 CheckOptionDependency(vm, "model-path", "input-name");
120 CheckOptionDependency(vm, "model-path", "input-tensor-data");
121 CheckOptionDependency(vm, "model-path", "output-name");
122 CheckOptionDependency(vm, "input-tensor-shape", "model-path");
126 std::vector<T> ParseArray(std::istream& stream);
129 std::vector<float> ParseArray(std::istream& stream)
131 return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
135 std::vector<unsigned int> ParseArray(std::istream& stream)
137 return ParseArrayImpl<unsigned int>(stream,
138 [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
141 void PrintArray(const std::vector<float>& v)
143 for (size_t i = 0; i < v.size(); i++)
150 void RemoveDuplicateDevices(std::vector<armnn::Compute>& computeDevices)
152 // Mark the duplicate devices as 'Undefined'.
153 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
155 for (auto j = std::next(i); j != computeDevices.end(); ++j)
159 *j = armnn::Compute::Undefined;
164 // Remove 'Undefined' devices.
165 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
166 computeDevices.end());
169 bool CheckDevicesAreValid(const std::vector<armnn::Compute>& computeDevices)
171 return (!computeDevices.empty()
172 && std::none_of(computeDevices.begin(), computeDevices.end(),
173 [](armnn::Compute c){ return c == armnn::Compute::Undefined; }));
178 template<typename TParser, typename TDataType>
179 int MainImpl(const char* modelPath,
181 const std::vector<armnn::Compute>& computeDevice,
182 const char* inputName,
183 const armnn::TensorShape* inputTensorShape,
184 const char* inputTensorDataFilePath,
185 const char* outputName,
186 bool enableProfiling,
187 const size_t subgraphId,
188 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
190 // Loads input tensor.
191 std::vector<TDataType> input;
193 std::ifstream inputTensorFile(inputTensorDataFilePath);
194 if (!inputTensorFile.good())
196 BOOST_LOG_TRIVIAL(fatal) << "Failed to load input tensor data file from " << inputTensorDataFilePath;
199 input = ParseArray<TDataType>(inputTensorFile);
204 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
205 typename InferenceModel<TParser, TDataType>::Params params;
206 params.m_ModelPath = modelPath;
207 params.m_IsModelBinary = isModelBinary;
208 params.m_ComputeDevice = computeDevice;
209 params.m_InputBinding = inputName;
210 params.m_InputTensorShape = inputTensorShape;
211 params.m_OutputBinding = outputName;
212 params.m_EnableProfiling = enableProfiling;
213 params.m_SubgraphId = subgraphId;
214 InferenceModel<TParser, TDataType> model(params, runtime);
216 // Executes the model.
217 std::vector<TDataType> output(model.GetOutputSize());
218 model.Run(input, output);
220 // Prints the output tensor.
223 catch (armnn::Exception const& e)
225 BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what();
232 // This will run a test
233 int RunTest(const std::string& modelFormat,
234 const std::string& inputTensorShapeStr,
235 const vector<armnn::Compute>& computeDevice,
236 const std::string& modelPath,
237 const std::string& inputName,
238 const std::string& inputTensorDataFilePath,
239 const std::string& outputName,
240 bool enableProfiling,
241 const size_t subgraphId,
242 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
244 // Parse model binary flag from the model-format string we got from the command-line
246 if (modelFormat.find("bin") != std::string::npos)
248 isModelBinary = true;
250 else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
252 isModelBinary = false;
256 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
260 // Parse input tensor shape from the string we got from the command-line.
261 std::unique_ptr<armnn::TensorShape> inputTensorShape;
262 if (!inputTensorShapeStr.empty())
264 std::stringstream ss(inputTensorShapeStr);
265 std::vector<unsigned int> dims = ParseArray<unsigned int>(ss);
269 // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
270 inputTensorShape = std::make_unique<armnn::TensorShape>(dims.size(), dims.data());
272 catch (const armnn::InvalidArgumentException& e)
274 BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
279 // Forward to implementation based on the parser type
280 if (modelFormat.find("caffe") != std::string::npos)
282 #if defined(ARMNN_CAFFE_PARSER)
283 return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
284 inputName.c_str(), inputTensorShape.get(),
285 inputTensorDataFilePath.c_str(), outputName.c_str(),
286 enableProfiling, subgraphId, runtime);
288 BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
292 else if (modelFormat.find("onnx") != std::string::npos)
294 #if defined(ARMNN_ONNX_PARSER)
295 return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
296 inputName.c_str(), inputTensorShape.get(),
297 inputTensorDataFilePath.c_str(), outputName.c_str(),
298 enableProfiling, subgraphId, runtime);
300 BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
304 else if (modelFormat.find("tensorflow") != std::string::npos)
306 #if defined(ARMNN_TF_PARSER)
307 return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
308 inputName.c_str(), inputTensorShape.get(),
309 inputTensorDataFilePath.c_str(), outputName.c_str(),
310 enableProfiling, subgraphId, runtime);
312 BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
316 else if(modelFormat.find("tflite") != std::string::npos)
318 #if defined(ARMNN_TF_LITE_PARSER)
321 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
325 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
326 inputName.c_str(), inputTensorShape.get(),
327 inputTensorDataFilePath.c_str(), outputName.c_str(),
328 enableProfiling, subgraphId, runtime);
330 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
331 "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
337 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
338 "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
343 int RunCsvTest(const armnnUtils::CsvRow &csvRow,
344 const std::shared_ptr<armnn::IRuntime>& runtime)
346 std::string modelFormat;
347 std::string modelPath;
348 std::string inputName;
349 std::string inputTensorShapeStr;
350 std::string inputTensorDataFilePath;
351 std::string outputName;
353 size_t subgraphId = 0;
355 po::options_description desc("Options");
359 ("model-format,f", po::value(&modelFormat),
360 "caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or tensorflow-text.")
361 ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt, .tflite,"
363 ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
364 "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
365 ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
366 ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
367 "executed. Defaults to 0")
368 ("input-tensor-shape,s", po::value(&inputTensorShapeStr),
369 "The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
370 "This parameter is optional, depending on the network.")
371 ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
372 "Path to a file containing the input data as a flat array separated by whitespace.")
373 ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
374 ("event-based-profiling,e", po::bool_switch()->default_value(false),
375 "Enables built in profiler. If unset, defaults to off.");
377 catch (const std::exception& e)
379 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
380 // and that desc.add_options() can throw boost::io::too_few_args.
381 // They really won't in any of these cases.
382 BOOST_ASSERT_MSG(false, "Caught unexpected exception");
383 BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
387 std::vector<const char*> clOptions;
388 clOptions.reserve(csvRow.values.size());
389 for (const std::string& value : csvRow.values)
391 clOptions.push_back(value.c_str());
394 po::variables_map vm;
397 po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
401 CheckOptionDependencies(vm);
403 catch (const po::error& e)
405 std::cerr << e.what() << std::endl << std::endl;
406 std::cerr << desc << std::endl;
410 // Remove leading and trailing whitespaces from the parsed arguments.
411 boost::trim(modelFormat);
412 boost::trim(modelPath);
413 boost::trim(inputName);
414 boost::trim(inputTensorShapeStr);
415 boost::trim(inputTensorDataFilePath);
416 boost::trim(outputName);
418 // Get the value of the switch arguments.
419 bool enableProfiling = vm["event-based-profiling"].as<bool>();
421 // Get the preferred order of compute devices.
422 std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
424 // Remove duplicates from the list of compute devices.
425 RemoveDuplicateDevices(computeDevices);
427 // Check that the specified compute devices are valid.
428 if (!CheckDevicesAreValid(computeDevices))
430 BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains an invalid compute";
434 return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
435 modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId, runtime);
438 int main(int argc, const char* argv[])
440 // Configures logging for both the ARMNN library and this test program.
442 armnn::LogSeverity level = armnn::LogSeverity::Info;
444 armnn::LogSeverity level = armnn::LogSeverity::Debug;
446 armnn::ConfigureLogging(true, true, level);
447 armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level);
449 std::string testCasesFile;
451 std::string modelFormat;
452 std::string modelPath;
453 std::string inputName;
454 std::string inputTensorShapeStr;
455 std::string inputTensorDataFilePath;
456 std::string outputName;
458 size_t subgraphId = 0;
460 po::options_description desc("Options");
464 ("help", "Display usage information")
465 ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. "
466 "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, "
467 "as they are expected to be defined in the file for each test in particular.")
468 ("concurrent,n", po::bool_switch()->default_value(false),
469 "Whether or not the test cases should be executed in parallel")
470 ("model-format,f", po::value(&modelFormat),
471 "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.")
472 ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt,"
474 ("compute,c", po::value<std::vector<armnn::Compute>>()->multitoken(),
475 "The preferred order of devices to run layers on by default. Possible choices: CpuAcc, CpuRef, GpuAcc")
476 ("input-name,i", po::value(&inputName), "Identifier of the input tensor in the network.")
477 ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
479 ("input-tensor-shape,s", po::value(&inputTensorShapeStr),
480 "The shape of the input tensor in the network as a flat array of integers separated by whitespace. "
481 "This parameter is optional, depending on the network.")
482 ("input-tensor-data,d", po::value(&inputTensorDataFilePath),
483 "Path to a file containing the input data as a flat array separated by whitespace.")
484 ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
485 ("event-based-profiling,e", po::bool_switch()->default_value(false),
486 "Enables built in profiler. If unset, defaults to off.");
488 catch (const std::exception& e)
490 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
491 // and that desc.add_options() can throw boost::io::too_few_args.
492 // They really won't in any of these cases.
493 BOOST_ASSERT_MSG(false, "Caught unexpected exception");
494 BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
498 // Parses the command-line.
499 po::variables_map vm;
502 po::store(po::parse_command_line(argc, argv, desc), vm);
504 if (CheckOption(vm, "help") || argc <= 1)
506 std::cout << "Executes a neural network model using the provided input tensor. " << std::endl;
507 std::cout << "Prints the resulting output tensor." << std::endl;
508 std::cout << std::endl;
509 std::cout << desc << std::endl;
515 catch (const po::error& e)
517 std::cerr << e.what() << std::endl << std::endl;
518 std::cerr << desc << std::endl;
522 // Get the value of the switch arguments.
523 bool concurrent = vm["concurrent"].as<bool>();
524 bool enableProfiling = vm["event-based-profiling"].as<bool>();
526 // Check whether we have to load test cases from a file.
527 if (CheckOption(vm, "test-cases"))
529 // Check that the file exists.
530 if (!boost::filesystem::exists(testCasesFile))
532 BOOST_LOG_TRIVIAL(fatal) << "Given file \"" << testCasesFile << "\" does not exist";
536 // Parse CSV file and extract test cases
537 armnnUtils::CsvReader reader;
538 std::vector<armnnUtils::CsvRow> testCases = reader.ParseFile(testCasesFile);
540 // Check that there is at least one test case to run
541 if (testCases.empty())
543 BOOST_LOG_TRIVIAL(fatal) << "Given file \"" << testCasesFile << "\" has no test cases";
548 armnn::IRuntime::CreationOptions options;
549 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
551 const std::string executableName("ExecuteNetwork");
553 // Check whether we need to run the test cases concurrently
556 std::vector<std::future<int>> results;
557 results.reserve(testCases.size());
559 // Run each test case in its own thread
560 for (auto& testCase : testCases)
562 testCase.values.insert(testCase.values.begin(), executableName);
563 results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime)));
567 for (auto& result : results)
569 if (result.get() != EXIT_SUCCESS)
577 // Run tests sequentially
578 for (auto& testCase : testCases)
580 testCase.values.insert(testCase.values.begin(), executableName);
581 if (RunCsvTest(testCase, runtime) != EXIT_SUCCESS)
590 else // Run single test
592 // Get the preferred order of compute devices.
593 std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
595 // Remove duplicates from the list of compute devices.
596 RemoveDuplicateDevices(computeDevices);
598 // Check that the specified compute devices are valid.
599 if (!CheckDevicesAreValid(computeDevices))
601 BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains an invalid compute";
607 CheckOptionDependencies(vm);
609 catch (const po::error& e)
611 std::cerr << e.what() << std::endl << std::endl;
612 std::cerr << desc << std::endl;
616 return RunTest(modelFormat, inputTensorShapeStr, computeDevices,
617 modelPath, inputName, inputTensorDataFilePath, outputName, enableProfiling, subgraphId);