* run efficiently without modification on Embedded hardware.
* (https://developer.arm.com/ip-products/processors/machine-learning/arm-nn)
* #MV_INFERENCE_BACKEND_MLAPI Samsung-introduced open source ML single API framework of NNStreamer, which
- * runs various NN models via tensor filters of NNStreamer.
+ * runs various NN models via tensor filters of NNStreamer. (Deprecated since 7.0)
* (https://github.com/nnstreamer/nnstreamer)
* #MV_INFERENCE_BACKEND_ONE Samsung-introduced open source inference engine called On-device Neural Engine, which
* performs inference of a given NN model on various devices such as CPU, GPU, DSP and NPU.
MV_INFERENCE_BACKEND_OPENCV, /**< OpenCV */
MV_INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
MV_INFERENCE_BACKEND_ARMNN, /**< ARMNN (Since 6.0) */
- MV_INFERENCE_BACKEND_MLAPI, /**< ML Single API of NNStreamer (Since 6.0) */
+ MV_INFERENCE_BACKEND_MLAPI, /**< @deprecated ML Single API of NNStreamer (Deprecated since 7.0) */
MV_INFERENCE_BACKEND_ONE, /**< On-device Neural Engine (Since 6.0) */
MV_INFERENCE_BACKEND_NNTRAINER, /**< NNTrainer (Since 7.0) */
MV_INFERENCE_BACKEND_SNPE, /**< SNPE Engine (Since 7.0) */
- MV_INFERENCE_BACKEND_MAX /**< Backend MAX */
+ MV_INFERENCE_BACKEND_MAX /**< @deprecated Backend MAX (Deprecated since 7.0) */
} mv_inference_backend_type_e;
/**
static bool IsConfigFilePathRequired(const int target_device_type, const int backend_type)
{
+ LOGW("DEPRECATION WARNING : MV_INFERENCE_BACKEND_MLAPI type is deprecated and will be removed from next release.");
+
// In case of MV_INFERENCE_TARGET_DEVICE_CUSTOM via MLAPI backend, config file path is required.
return (backend_type == MV_INFERENCE_BACKEND_MLAPI &&
target_device_type & MV_INFERENCE_TARGET_DEVICE_CUSTOM);