modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<YoloInferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<YoloInferenceModel>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}
std::string outputNames;
std::string inputTypes;
std::string outputTypes;
+ std::string dynamicBackendsPath;
double thresholdTime = 0.0;
".prototxt, .tflite, .onnx")
("compute,c", po::value<std::vector<std::string>>()->multitoken(),
backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("input-name,i", po::value(&inputNames),
"Identifier of the input tensors in the network separated by comma.")
("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
"This parameter is optional, depending on the network.")
("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
"Path to files containing the input data as a flat array separated by whitespace. "
- "Several paths can be passed separating them by comma. ")
+ "Several paths can be passed separating them by comma.")
("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
"If unset, defaults to \"float\" for all defined inputs. "
"Accepted values (float, int or qasymm8)")
{
// Get the preferred order of compute devices. If none are specified, default to using CpuRef
const std::string computeOption("compute");
- std::vector<std::string> computeDevicesAsStrings = CheckOption(vm, computeOption.c_str()) ?
- vm[computeOption].as<std::vector<std::string>>() :
- std::vector<std::string>({ "CpuRef" });
+ std::vector<std::string> computeDevicesAsStrings =
+ CheckOption(vm, computeOption.c_str()) ?
+ vm[computeOption].as<std::vector<std::string>>() :
+ std::vector<std::string>();
std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
// Remove duplicates from the list of compute devices.
RemoveDuplicateDevices(computeDevices);
- // Check that the specified compute devices are valid.
- std::string invalidBackends;
- if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
- {
- BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: "
- << invalidBackends;
- return EXIT_FAILURE;
- }
-
try
{
CheckOptionDependencies(vm);
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
}
std::vector<armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_OutputBindings;
std::vector<armnn::BackendId> m_ComputeDevices;
+ std::string m_DynamicBackendsPath;
size_t m_SubgraphId;
bool m_IsModelBinary;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
Params()
- : m_ComputeDevices{"CpuRef"}
+ : m_ComputeDevices{}
, m_SubgraphId(0)
, m_IsModelBinary(true)
, m_VisualizePostOptimizationModel(false)
{
std::string m_ModelDir;
std::vector<std::string> m_ComputeDevices;
+ std::string m_DynamicBackendsPath;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
std::string m_Labels;
("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
multitoken(), backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&options.m_DynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("labels,l", po::value<std::string>(&options.m_Labels),
"Text file containing one image filename - correct label pair per line, "
"used to test the accuracy of the network.")
InferenceModel(const Params& params,
bool enableProfiling,
+ const std::string& dynamicBackendsPath,
const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
: m_EnableProfiling(enableProfiling)
+ , m_DynamicBackendsPath(dynamicBackendsPath)
{
if (runtime)
{
{
armnn::IRuntime::CreationOptions options;
options.m_EnableGpuProfiling = m_EnableProfiling;
+ options.m_DynamicBackendsPath = m_DynamicBackendsPath;
m_Runtime = std::move(armnn::IRuntime::Create(options));
}
throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
}
- armnn::INetworkPtr network =
- CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
+ armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings);
- armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork *){}};
+ armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}};
{
ARMNN_SCOPED_HEAP_PROFILING("Optimizing");
std::vector<armnn::BindingPointInfo> m_InputBindings;
std::vector<armnn::BindingPointInfo> m_OutputBindings;
bool m_EnableProfiling;
+ std::string m_DynamicBackendsPath;
template<typename TContainer>
armnn::InputTensors MakeInputTensors(const std::vector<TContainer>& inputDataContainers)
unsigned int m_IterationCount;
std::string m_InferenceTimesFile;
bool m_EnableProfiling;
+ std::string m_DynamicBackendsPath;
InferenceTestOptions()
- : m_IterationCount(0),
- m_EnableProfiling(0)
+ : m_IterationCount(0)
+ , m_EnableProfiling(0)
+ , m_DynamicBackendsPath()
{}
};
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<InferenceModel>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<InferenceModel>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}
int MainImpl(const char* modelPath,
bool isModelBinary,
const std::vector<armnn::BackendId>& computeDevices,
+ const std::string& dynamicBackendsPath,
const std::vector<string>& inputNames,
const std::vector<std::unique_ptr<armnn::TensorShape>>& inputTensorShapes,
const std::vector<string>& inputTensorDataFilePaths,
params.m_ModelPath = modelPath;
params.m_IsModelBinary = isModelBinary;
params.m_ComputeDevices = computeDevices;
+ params.m_DynamicBackendsPath = dynamicBackendsPath;
for(const std::string& inputName: inputNames)
{
params.m_SubgraphId = subgraphId;
params.m_EnableFp16TurboMode = enableFp16TurboMode;
- InferenceModel<TParser, TDataType> model(params, enableProfiling, runtime);
+ InferenceModel<TParser, TDataType> model(params, enableProfiling, dynamicBackendsPath, runtime);
for(unsigned int i = 0; i < inputTensorDataFilePaths.size(); ++i)
{
int RunTest(const std::string& format,
const std::string& inputTensorShapesStr,
const vector<armnn::BackendId>& computeDevice,
+ const std::string& dynamicBackendsPath,
const std::string& path,
const std::string& inputNames,
const std::string& inputTensorDataFilePaths,
#if defined(ARMNN_SERIALIZER)
return MainImpl<armnnDeserializer::IDeserializer, float>(
modelPath.c_str(), isModelBinary, computeDevice,
- inputNamesVector, inputTensorShapes,
+ dynamicBackendsPath, inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector, quantizeInput,
outputTypesVector, outputNamesVector, enableProfiling,
enableFp16TurboMode, thresholdTime, subgraphId, runtime);
{
#if defined(ARMNN_CAFFE_PARSER)
return MainImpl<armnnCaffeParser::ICaffeParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
{
#if defined(ARMNN_ONNX_PARSER)
return MainImpl<armnnOnnxParser::IOnnxParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
{
#if defined(ARMNN_TF_PARSER)
return MainImpl<armnnTfParser::ITfParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
return EXIT_FAILURE;
}
return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(modelPath.c_str(), isModelBinary, computeDevice,
+ dynamicBackendsPath,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
quantizeInput, outputTypesVector, outputNamesVector,
std::string outputNames;
std::string inputTypes;
std::string outputTypes;
+ std::string dynamicBackendsPath;
size_t subgraphId = 0;
".tflite, .onnx")
("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
backendsMessage.c_str())
+ ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
+ "Path where to load any available dynamic backend from. "
+ "If left empty (the default), dynamic backends will not be used.")
("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
"executed. Defaults to 0.")
return EXIT_FAILURE;
}
- return RunTest(modelFormat, inputTensorShapes, computeDevices, modelPath, inputNames,
+ return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
enableProfiling, enableFp16TurboMode, thresholdTime, subgraphId);
-}
\ No newline at end of file
+}
modelParams.m_VisualizePostOptimizationModel = modelOptions.m_VisualizePostOptimizationModel;
modelParams.m_EnableFp16TurboMode = modelOptions.m_EnableFp16TurboMode;
- return std::make_unique<Model>(modelParams, commonOptions.m_EnableProfiling);
+ return std::make_unique<Model>(modelParams,
+ commonOptions.m_EnableProfiling,
+ commonOptions.m_DynamicBackendsPath);
});
});
}