template<typename TParser, typename TDataType>
int MainImpl(const char* modelPath,
bool isModelBinary,
- const std::vector<armnn::BackendId>& computeDevice,
+ const std::vector<armnn::BackendId>& computeDevices,
const char* inputName,
const armnn::TensorShape* inputTensorShape,
const char* inputTensorDataFilePath,
typename InferenceModel<TParser, TDataType>::Params params;
params.m_ModelPath = modelPath;
params.m_IsModelBinary = isModelBinary;
- params.m_ComputeDevice = computeDevice;
+ params.m_ComputeDevices = computeDevices;
params.m_InputBindings = { inputName };
params.m_InputShapes = { *inputTensorShape };
params.m_OutputBindings = { outputName };
#include <backendsCommon/BackendRegistry.hpp>
+#include <boost/algorithm/string/join.hpp>
#include <boost/exception/exception.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/log/trivial.hpp>
#include <boost/filesystem.hpp>
#include <boost/lexical_cast.hpp>
+#include <algorithm>
+#include <iterator>
#include <fstream>
#include <map>
#include <string>
std::vector<std::string> m_InputBindings;
std::vector<armnn::TensorShape> m_InputShapes;
std::vector<std::string> m_OutputBindings;
- std::vector<armnn::BackendId> m_ComputeDevice;
+ std::vector<armnn::BackendId> m_ComputeDevices;
bool m_EnableProfiling;
size_t m_SubgraphId;
bool m_IsModelBinary;
bool m_EnableFp16TurboMode;
Params()
- : m_ComputeDevice{armnn::Compute::CpuRef}
+ : m_ComputeDevices{"CpuRef"}
, m_EnableProfiling(false)
, m_SubgraphId(0)
, m_IsModelBinary(true)
struct CommandLineOptions
{
std::string m_ModelDir;
- std::vector<armnn::BackendId> m_ComputeDevice;
+ std::vector<std::string> m_ComputeDevices;
bool m_VisualizePostOptimizationModel;
bool m_EnableFp16TurboMode;
+
+ std::vector<armnn::BackendId> GetComputeDevicesAsBackendIds()
+ {
+ std::vector<armnn::BackendId> backendIds;
+ std::copy(m_ComputeDevices.begin(), m_ComputeDevices.end(), std::back_inserter(backendIds));
+ return backendIds;
+ }
};
static void AddCommandLineOptions(boost::program_options::options_description& desc, CommandLineOptions& options)
{
namespace po = boost::program_options;
- std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc, armnn::Compute::CpuRef};
+ const std::vector<std::string> defaultComputes = { "CpuAcc", "CpuRef" };
const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
+ armnn::BackendRegistryInstance().GetBackendIdsAsString();
desc.add_options()
("model-dir,m", po::value<std::string>(&options.m_ModelDir)->required(),
"Path to directory containing model files (.caffemodel/.prototxt/.tflite)")
- ("compute,c", po::value<std::vector<armnn::BackendId>>(&options.m_ComputeDevice)->default_value
- (defaultBackends), backendsMessage.c_str())
+ ("compute,c", po::value<std::vector<std::string>>(&options.m_ComputeDevices)->
+ default_value(defaultComputes, boost::algorithm::join(defaultComputes, ", "))->
+ multitoken(), backendsMessage.c_str())
("visualize-optimized-model,v",
po::value<bool>(&options.m_VisualizePostOptimizationModel)->default_value(false),
"Produce a dot file useful for visualizing the graph post optimization."
}
std::string invalidBackends;
- if (!CheckRequestedBackendsAreValid(params.m_ComputeDevice, armnn::Optional<std::string&>(invalidBackends)))
+ if (!CheckRequestedBackendsAreValid(params.m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
{
throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends);
}
armnn::OptimizerOptions options;
options.m_ReduceFp32ToFp16 = params.m_EnableFp16TurboMode;
- optNet = armnn::Optimize(*network, params.m_ComputeDevice, m_Runtime->GetDeviceSpec(), options);
+ optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options);
if (!optNet)
{
throw armnn::Exception("Optimize returned nullptr");