This patch adds ML Single API support of NNStreamer.
On MediaVision, NNFW(in-house NN Runtime) and Vivante NPU
will be controlled by inference-engine-mlapi backend
through ML Single API of NNStreamer.
As one more backends should be controlled by the inference-engine-mlapi
backend, this patch introduces a new function, SetPluginType,
which lets ML Single API know which tensor filter - NNFW or Vivante NPU -
should be used, and adds two backend types - Vivante and NNFW.
Change-Id: Iccf9fe97f5ec50c5c1bfd7bab48927e0f343f182
Signed-off-by: Inki Dae <inki.dae@samsung.com>
virtual ~IInferenceEngineCommon() {};
/**
+ * @brief Set a tensor filter plugin type.
+ * @details See #inference_backend_type_e
+ * This callback passes a tensor filter plugin type - NNFW or VIVANTE to a tensor filter plugin for NNStreamer.
+ *
+ * @since_tizen 6.0 (Optional)
+ * @param[in] type This could be one among plugin types enumerated on inference_backend_type_e.
+ */
+ virtual int SetPluginType(const int type = 0) { return type; }
+
+ /**
* @brief Set target devices.
* @details See #inference_target_type_e
* This callback passes given device types - CPU, GPU, CUSTOM or combinated one if a backend engine supports hybrid inferencea - to a backend engine.
int DumpProfileToFile(const std::string filename = "dump.txt");
private:
- int InitBackendEngine(const std::string &backend_path);
+ int InitBackendEngine(const std::string &backend_path, int backend_type);
int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers);
int CheckLayerProperty(inference_engine_layer_property &property);
INFERENCE_BACKEND_OPENCV, /**< OpenCV */
INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
INFERENCE_BACKEND_ARMNN, /**< ARMNN */
+ INFERENCE_BACKEND_VIVANTE, /** < Vivante */
+ INFERENCE_BACKEND_NNFW, /** < NNFW */
INFERENCE_BACKEND_MAX /**< Backend MAX */
} inference_backend_type_e;
INFERENCE_MODEL_DARKNET, /**< Darknet. *.cfg config file is needed. */
INFERENCE_MODEL_DLDT, /**< DLDT. *.xml config file is needed. */
INFERENCE_MODEL_ONNX, /**< ONNX */
+ INFERENCE_MODEL_VIVANTE, /**< Vivante. model specific so library and nb model files are needed. */
INFERENCE_MODEL_MAX
} inference_model_format_e;
* @since_tizen 6.0
*/
typedef struct _inference_engine_config {
- std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt) */
+ std::string backend_name; /**< a backend name which could be one among supported backends(tflite, opencv, armnn, dldt, nnstreamer) */
+ int backend_type; /**< a tensor filter plugin type for NNStreamer if a backend is NNStreamer. */
int target_devices; /**< which device or devices to be targeted for inference. (Please, refer to inference_target_type_e) */
// TODO.
} inference_engine_config;
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path)
+int InferenceEngineCommon::InitBackendEngine(const std::string &backend_path, int backend_type)
{
LOGI("lib: %s", backend_path.c_str());
mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
return INFERENCE_ENGINE_ERROR_INTERNAL;
}
+ // If a backend is nnstreamer then set a tensor filter plugin type.
+ if (backend_type == INFERENCE_BACKEND_NNFW || backend_type == INFERENCE_BACKEND_VIVANTE) {
+ int ret = mBackendHandle->SetPluginType(backend_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to set a tensor filter plugin.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+ }
+
return INFERENCE_ENGINE_ERROR_NONE;
}
std::string backendLibName = "libinference-engine-" + config->backend_name + ".so";
- int ret = InitBackendEngine(backendLibName);
+ int ret = InitBackendEngine(backendLibName, config->backend_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return ret;
}
std::string backendNameTable[INFERENCE_BACKEND_MAX] = {
[INFERENCE_BACKEND_OPENCV] = "opencv",
[INFERENCE_BACKEND_TFLITE] = "tflite",
- [INFERENCE_BACKEND_ARMNN] = "armnn"
+ [INFERENCE_BACKEND_ARMNN] = "armnn",
+ [INFERENCE_BACKEND_VIVANTE] = "nnstreamer",
+ [INFERENCE_BACKEND_NNFW] = "nnstreamer"
};
std::string backendLibName = "libinference-engine-" + backendNameTable[backend_type] + ".so";
- int ret = InitBackendEngine(backendLibName);
+ int ret = InitBackendEngine(backendLibName, backend_type);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
return ret;
}
std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
+ .backend_type = 0,
.target_devices = target_devices
};
std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
+ .backend_type = 0,
.target_devices = target_devices
};
std::cout << test_name << " inference test : backend = " << backend_name << ", target device = " << Target_Formats[target_devices] << std::endl;
inference_engine_config config = {
.backend_name = backend_name,
+ .backend_type = 0,
.target_devices = target_devices
};
class InferenceEngineTestCase_G8 : public testing::TestWithParam<ParamType_One_Int> { };
static auto InferenceEngineInit_One_Param = [](InferenceEngineCommon *engine, std::string &backend_name) -> int {
- inference_engine_config config = { backend_name, 0 };
+ inference_engine_config config = { backend_name, 0, 0 };
return engine->BindBackend(&config);
};
static auto InferenceEngineInit_Two_Params = [](InferenceEngineCommon *engine, std::string &backend_name, int &target_devices) -> int {
- inference_engine_config config = { backend_name, target_devices };
+ inference_engine_config config = { backend_name, 0, target_devices };
int ret = engine->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE)