6 #include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp" 9 int main(
int argc,
const char* argv[])
19 std::string testCasesFile;
21 std::string modelFormat;
22 std::string modelPath;
23 std::string inputNames;
24 std::string inputTensorShapes;
25 std::string inputTensorDataFilePaths;
26 std::string outputNames;
27 std::string inputTypes;
28 std::string outputTypes;
29 std::string dynamicBackendsPath;
30 std::string outputTensorFiles;
33 std::string outgoingCaptureFile;
34 std::string incomingCaptureFile;
35 uint32_t counterCapturePeriod;
37 double thresholdTime = 0.0;
39 size_t subgraphId = 0;
41 const std::string backendsMessage =
"REQUIRED: Which device to run layers on by default. Possible choices: " 43 po::options_description desc(
"Options");
47 (
"help",
"Display usage information")
48 (
"compute,c", po::value<std::vector<std::string>>()->multitoken()->required(),
49 backendsMessage.c_str())
50 (
"test-cases,t", po::value(&testCasesFile),
"Path to a CSV file containing test cases to run. " 51 "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, " 52 "as they are expected to be defined in the file for each test in particular.")
53 (
"concurrent,n", po::bool_switch()->default_value(
false),
54 "Whether or not the test cases should be executed in parallel")
55 (
"model-format,f", po::value(&modelFormat)->required(),
56 "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or " 58 (
"model-path,m", po::value(&modelPath)->required(),
"Path to model file, e.g. .armnn, .caffemodel, " 59 ".prototxt, .tflite, .onnx")
60 (
"dynamic-backends-path,b", po::value(&dynamicBackendsPath),
61 "Path where to load any available dynamic backend from. " 62 "If left empty (the default), dynamic backends will not be used.")
63 (
"input-name,i", po::value(&inputNames),
64 "Identifier of the input tensors in the network separated by comma.")
65 (
"subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0),
"Id of the subgraph to be executed." 67 (
"input-tensor-shape,s", po::value(&inputTensorShapes),
68 "The shape of the input tensors in the network as a flat array of integers separated by comma." 69 "Several shapes can be passed by separating them with a colon (:)." 70 "This parameter is optional, depending on the network.")
71 (
"input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(
""),
72 "Path to files containing the input data as a flat array separated by whitespace. " 73 "Several paths can be passed by separating them with a comma. If not specified, the network will be run " 74 "with dummy data (useful for profiling).")
75 (
"input-type,y",po::value(&inputTypes),
"The type of the input tensors in the network separated by comma. " 76 "If unset, defaults to \"float\" for all defined inputs. " 77 "Accepted values (float, int or qasymm8)")
78 (
"quantize-input,q",po::bool_switch()->default_value(
false),
79 "If this option is enabled, all float inputs will be quantized to qasymm8. " 80 "If unset, default to not quantized. " 81 "Accepted values (true or false)")
82 (
"output-type,z",po::value(&outputTypes),
83 "The type of the output tensors in the network separated by comma. " 84 "If unset, defaults to \"float\" for all defined outputs. " 85 "Accepted values (float, int or qasymm8).")
86 (
"output-name,o", po::value(&outputNames),
87 "Identifier of the output tensors in the network separated by comma.")
88 (
"write-outputs-to-file,w", po::value(&outputTensorFiles),
89 "Comma-separated list of output file paths keyed with the binding-id of the output slot. " 90 "If left empty (the default), the output tensors will not be written to a file.")
91 (
"event-based-profiling,e", po::bool_switch()->default_value(
false),
92 "Enables built in profiler. If unset, defaults to off.")
93 (
"visualize-optimized-model,v", po::bool_switch()->default_value(
false),
94 "Enables built optimized model visualizer. If unset, defaults to off.")
95 (
"fp16-turbo-mode,h", po::bool_switch()->default_value(
false),
"If this option is enabled, FP32 layers, " 96 "weights and biases will be converted to FP16 where the backend supports it")
97 (
"threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
98 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual " 99 "inference time is greater than the threshold time, the test will fail. By default, no threshold " 101 (
"print-intermediate-layers,p", po::bool_switch()->default_value(
false),
102 "If this option is enabled, the output of every graph layer will be printed.")
103 (
"enable-external-profiling,a", po::bool_switch()->default_value(
false),
104 "If enabled external profiling will be switched on")
105 (
"outgoing-capture-file,j", po::value(&outgoingCaptureFile),
106 "If specified the outgoing external profiling packets will be captured in this binary file")
107 (
"incoming-capture-file,k", po::value(&incomingCaptureFile),
108 "If specified the incoming external profiling packets will be captured in this binary file")
109 (
"file-only-external-profiling,g", po::bool_switch()->default_value(
false),
110 "If enabled then the 'file-only' test mode of external profiling will be enabled")
111 (
"counter-capture-period,u", po::value<uint32_t>(&counterCapturePeriod)->default_value(150u),
112 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test")
113 (
"parse-unsupported", po::bool_switch()->default_value(
false),
114 "Add unsupported operators as stand-in layers (where supported by parser)");
116 catch (
const std::exception& e)
121 BOOST_ASSERT_MSG(
false,
"Caught unexpected exception");
122 ARMNN_LOG(fatal) <<
"Fatal internal error: " << e.what();
127 po::variables_map vm;
130 po::store(po::parse_command_line(argc, argv, desc), vm);
132 if (CheckOption(vm,
"help") || argc <= 1)
134 std::cout <<
"Executes a neural network model using the provided input tensor. " << std::endl;
135 std::cout <<
"Prints the resulting output tensor." << std::endl;
136 std::cout << std::endl;
137 std::cout << desc << std::endl;
143 catch (
const po::error& e)
145 std::cerr << e.what() << std::endl << std::endl;
146 std::cerr << desc << std::endl;
151 bool concurrent = vm[
"concurrent"].as<
bool>();
152 bool enableProfiling = vm[
"event-based-profiling"].as<
bool>();
153 bool enableLayerDetails = vm[
"visualize-optimized-model"].as<
bool>();
154 bool enableFp16TurboMode = vm[
"fp16-turbo-mode"].as<
bool>();
155 bool quantizeInput = vm[
"quantize-input"].as<
bool>();
156 bool printIntermediate = vm[
"print-intermediate-layers"].as<
bool>();
157 bool enableExternalProfiling = vm[
"enable-external-profiling"].as<
bool>();
158 bool fileOnlyExternalProfiling = vm[
"file-only-external-profiling"].as<
bool>();
159 bool parseUnsupported = vm[
"parse-unsupported"].as<
bool>();
163 if (CheckOption(vm,
"test-cases"))
166 if (!boost::filesystem::exists(testCasesFile))
168 ARMNN_LOG(fatal) <<
"Given file \"" << testCasesFile <<
"\" does not exist";
174 std::vector<armnnUtils::CsvRow> testCases = reader.
ParseFile(testCasesFile);
177 if (testCases.empty())
179 ARMNN_LOG(fatal) <<
"Given file \"" << testCasesFile <<
"\" has no test cases";
194 const std::string executableName(
"ExecuteNetwork");
199 std::vector<std::future<int>> results;
200 results.reserve(testCases.size());
203 for (
auto& testCase : testCases)
205 testCase.values.insert(testCase.values.begin(), executableName);
206 results.push_back(std::async(std::launch::async,
RunCsvTest, std::cref(testCase), std::cref(runtime),
207 enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
208 enableLayerDetails, parseUnsupported));
212 for (
auto& result : results)
214 if (result.get() != EXIT_SUCCESS)
223 for (
auto& testCase : testCases)
225 testCase.values.insert(testCase.values.begin(), executableName);
226 if (
RunCsvTest(testCase, runtime, enableProfiling,
227 enableFp16TurboMode, thresholdTime, printIntermediate,
228 enableLayerDetails, parseUnsupported) != EXIT_SUCCESS)
240 const std::string computeOption(
"compute");
241 std::vector<std::string> computeDevicesAsStrings =
242 CheckOption(vm, computeOption.c_str()) ?
243 vm[computeOption].as<std::vector<std::string>>() :
244 std::vector<std::string>();
245 std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
248 RemoveDuplicateDevices(computeDevices);
252 CheckOptionDependencies(vm);
254 catch (
const po::error& e)
256 std::cerr << e.what() << std::endl << std::endl;
257 std::cerr << desc << std::endl;
271 return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
272 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
273 outputTensorFiles, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
274 subgraphId, enableLayerDetails, parseUnsupported, runtime);
std::string GetBackendIdsAsString() const
std::string m_IncomingCaptureFile
int main(int argc, const char *argv[])
int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr< armnn::IRuntime > &runtime, const bool enableProfiling, const bool enableFp16TurboMode, const double &thresholdTime, const bool printIntermediate, bool enableLayerDetails=false, bool parseUnuspported=false)
void ConfigureLogging(bool printToStandardOutput, bool printToDebugOutput, LogSeverity severity)
int RunTest(const std::string &format, const std::string &inputTensorShapesStr, const vector< armnn::BackendId > &computeDevices, const std::string &dynamicBackendsPath, const std::string &path, const std::string &inputNames, const std::string &inputTensorDataFilePaths, const std::string &inputTypes, bool quantizeInput, const std::string &outputTypes, const std::string &outputNames, const std::string &outputTensorFiles, bool enableProfiling, bool enableFp16TurboMode, const double &thresholdTime, bool printIntermediate, const size_t subgraphId, bool enableLayerDetails=false, bool parseUnsupported=false, const std::shared_ptr< armnn::IRuntime > &runtime=nullptr)
ExternalProfilingOptions m_ProfilingOptions
#define ARMNN_LOG(severity)
BackendRegistry & BackendRegistryInstance()
bool m_EnableGpuProfiling
static std::vector< CsvRow > ParseFile(const std::string &csvFile)
static IRuntimePtr Create(const CreationOptions &options)
std::string m_DynamicBackendsPath
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::string m_OutgoingCaptureFile