}
int RunCsvTest(const armnnUtils::CsvRow &csvRow,
- const std::shared_ptr<armnn::IRuntime>& runtime)
+ const std::shared_ptr<armnn::IRuntime>& runtime, const bool enableProfiling)
{
std::string modelFormat;
std::string modelPath;
"This parameter is optional, depending on the network.")
("input-tensor-data,d", po::value(&inputTensorDataFilePath),
"Path to a file containing the input data as a flat array separated by whitespace.")
- ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.")
- ("event-based-profiling,e", po::bool_switch()->default_value(false),
- "Enables built in profiler. If unset, defaults to off.");
+ ("output-name,o", po::value(&outputName), "Identifier of the output tensor in the network.");
}
catch (const std::exception& e)
{
boost::trim(inputTensorDataFilePath);
boost::trim(outputName);
- // Get the value of the switch arguments.
- bool enableProfiling = vm["event-based-profiling"].as<bool>();
-
// Get the preferred order of compute devices.
std::vector<armnn::Compute> computeDevices = vm["compute"].as<std::vector<armnn::Compute>>();
// Create runtime
armnn::IRuntime::CreationOptions options;
+ options.m_EnableGpuProfiling = enableProfiling;
+
std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
const std::string executableName("ExecuteNetwork");
for (auto& testCase : testCases)
{
testCase.values.insert(testCase.values.begin(), executableName);
- results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime)));
+ results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
+ enableProfiling));
}
// Check results
for (auto& testCase : testCases)
{
testCase.values.insert(testCase.values.begin(), executableName);
- if (RunCsvTest(testCase, runtime) != EXIT_SUCCESS)
+ if (RunCsvTest(testCase, runtime, enableProfiling) != EXIT_SUCCESS)
{
return EXIT_FAILURE;
}