mv_machine_learning: introduce engine configuration task API
authorInki Dae <inki.dae@samsung.com>
Mon, 13 Mar 2023 03:07:54 +0000 (12:07 +0900)
committerKwanghoon Son <k.son@samsung.com>
Wed, 22 Mar 2023 04:10:03 +0000 (13:10 +0900)
[Issue type] : new feature

Introduced inference engine configuration task API, which allows user to
set user desired inference engine and its device type for inference
request. Regarding this, this patch adds below itask interfaces,
- setEngineInfo
  . Set user desired inference engine and its device type.
- getNumberOfEngine
  . Get how many inference engines are available for a given task API.
- getEngineType
  . Get inference engine type to a given index.
- getNumberOfDevice
  . Get how many device types are available for a given inference engine.
- getDeviceType
  . Get device type with a given index.

And as an actual task API implementation, this patch introduces
inference engine configuration API for image classification task group.

Change-Id: I8c8a161ea83e851d120bc04290323f2f15103da8
Signed-off-by: Inki Dae <inki.dae@samsung.com>
28 files changed:
include/mv_image_classification_internal.h
mv_machine_learning/common/include/itask.h
mv_machine_learning/common/include/mv_machine_learning_common.h [new file with mode: 0644]
mv_machine_learning/face_recognition/include/face_recognition_adapter.h
mv_machine_learning/face_recognition/include/facenet_adapter.h
mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp
mv_machine_learning/face_recognition/src/facenet_adapter.cpp
mv_machine_learning/image_classification/include/image_classification.h
mv_machine_learning/image_classification/include/image_classification_adapter.h
mv_machine_learning/image_classification/include/image_classification_type.h
mv_machine_learning/image_classification/include/mv_image_classification_open.h
mv_machine_learning/image_classification/src/image_classification.cpp
mv_machine_learning/image_classification/src/image_classification_adapter.cpp
mv_machine_learning/image_classification/src/mv_image_classification.c
mv_machine_learning/image_classification/src/mv_image_classification_open.cpp
mv_machine_learning/object_detection/include/mobilenet_v1_ssd.h
mv_machine_learning/object_detection/include/mobilenet_v2_ssd.h
mv_machine_learning/object_detection/include/object_detection.h
mv_machine_learning/object_detection/include/object_detection_adapter.h
mv_machine_learning/object_detection/include/object_detection_type.h
mv_machine_learning/object_detection/src/mobilenet_v1_ssd.cpp
mv_machine_learning/object_detection/src/mobilenet_v2_ssd.cpp
mv_machine_learning/object_detection/src/mv_object_detection_open.cpp
mv_machine_learning/object_detection/src/object_detection.cpp
mv_machine_learning/object_detection/src/object_detection_adapter.cpp
mv_machine_learning/object_detection_3d/include/object_detection_3d_adapter.h
mv_machine_learning/object_detection_3d/src/object_detection_3d_adapter.cpp
test/testsuites/machine_learning/image_classification/test_image_classification.cpp

index f71a751..19bb3d3 100644 (file)
@@ -160,6 +160,106 @@ int mv_image_classification_get_label(mv_image_classification_h handle, const ch
 int mv_image_classification_set_model(mv_image_classification_h handle, const char *model_file, const char *meta_file,
                                                                          const char *label_file);
 
+/**
+        * @brief Set user-given inference engine and device types for inference.
+        * @details Use this function to change the inference engine and device types for inference instead of default ones after calling @ref mv_image_classification_create().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle        The handle to the image classification object.
+        * @param[in] engine_type  A string of inference engine type.
+        * @param[in] device_type   A string of device type.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+        */
+int mv_image_classification_set_engine(mv_image_classification_h handle, const char *engine_type,
+                                                                          const char *device_type);
+
+/**
+        * @brief Get a number of inference engines available for image classification task API.
+        * @details Use this function to get how many inference engines are supported for image classification after calling @ref mv_image_classification_create().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[out] engine_count  A number of inference engines available for image classification API.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+        */
+int mv_image_classification_get_engine_count(mv_image_classification_h handle, unsigned int *engine_count);
+
+/**
+        * @brief Get engine type to a given inference engine index.
+        * @details Use this function to get inference engine type with a given engine index after calling @ref mv_image_classification_get_engine_count().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle        The handle to the image classification object.
+        * @param[in] engine_index  A inference engine index for getting the inference engine type.
+        * @param[out] engine_type  A string to inference engine.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Get a number of inference engines available for image classification task API by calling @ref mv_image_classification_get_engine_count()
+        */
+int mv_image_classification_get_engine_type(mv_image_classification_h handle, const unsigned int engine_index,
+                                                                                       char **engine_type);
+
+/**
+        * @brief Get a number of device types avaliable to a given inference engine.
+        * @details Use this function to get how many device types are supported for a given inference engine after calling @ref mv_image_classification_create().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[in] engine_type    A inference engine string.
+        * @param[out] device_count  A number of device types available for a given inference engine.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+        */
+int mv_image_classification_get_device_count(mv_image_classification_h handle, const char *engine_type,
+                                                                                        unsigned int *device_count);
+
+/**
+        * @brief Get device type list available.
+        * @details Use this function to get what device types are supported for current inference engine type after calling @ref mv_image_classification_configure().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[in] engine_type    A inference engine string.
+        * @param[in] device_index   A device index for getting the device type.
+        * @param[out] device_type   A string to device type.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+        * @pre Configure image classification task by calling @ref mv_image_classification_configure()
+        */
+int mv_image_classification_get_device_type(mv_image_classification_h handle, const char *engine_type,
+                                                                                       const unsigned int device_index, char **device_type);
+
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
index 940668e..19e3846 100644 (file)
@@ -27,7 +27,14 @@ template<typename T, typename V> class ITask
 public:
        virtual ~ITask() {};
        virtual void create(int type) = 0;
+       virtual void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                         const char *model_name = "") = 0;
+       virtual void setEngineInfo(const char *engine_type, const char *device_type) = 0;
        virtual void configure() = 0;
+       virtual void getNumberOfEngines(unsigned int *number_of_engines) = 0;
+       virtual void getEngineType(unsigned int engine_index, char **engine_type) = 0;
+       virtual void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) = 0;
+       virtual void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) = 0;
        virtual void prepare() = 0;
        virtual void setInput(T &t) = 0;
        virtual void perform() = 0;
diff --git a/mv_machine_learning/common/include/mv_machine_learning_common.h b/mv_machine_learning/common/include/mv_machine_learning_common.h
new file mode 100644 (file)
index 0000000..1491af9
--- /dev/null
@@ -0,0 +1,61 @@
+/**
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MV_MACHINE_LEARNING_COMMON_H__
+#define __MV_MACHINE_LEARNING_COMMON_H__
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include "mv_inference_type.h"
+
+namespace mediavision
+{
+namespace common
+{
+static std::map<std::string, int> gBackendTypeTable = {
+       { "OPENCV", MV_INFERENCE_BACKEND_OPENCV },               { "TFLITE", MV_INFERENCE_BACKEND_TFLITE },
+       { "ARMNN", MV_INFERENCE_BACKEND_ARMNN },                 { "ONE", MV_INFERENCE_BACKEND_ONE },
+       { "NNTRAINER", MV_INFERENCE_BACKEND_NNTRAINER }, { "SNPE", MV_INFERENCE_BACKEND_SNPE }
+};
+
+static std::map<std::string, int> gDeviceTypeTable = { { "CPU", MV_INFERENCE_TARGET_DEVICE_CPU },
+                                                                                                          { "GPU", MV_INFERENCE_TARGET_DEVICE_GPU },
+                                                                                                          { "NPU", MV_INFERENCE_TARGET_DEVICE_CUSTOM } };
+
+static int GetBackendType(std::string backend_type)
+{
+       auto item = gBackendTypeTable.find(backend_type);
+       if (item != gBackendTypeTable.end())
+               return item->second;
+
+       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+}
+
+static int GetDeviceType(std::string device_type)
+{
+       auto item = gDeviceTypeTable.find(device_type);
+       if (item != gDeviceTypeTable.end())
+               return item->second;
+
+       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+}
+
+} // namespace
+} // namespace
+
+#endif
\ No newline at end of file
index c63c384..46d6960 100644 (file)
@@ -44,7 +44,14 @@ public:
        }
 
        void create(int type) override;
+       void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                         const char *model_name) override;
+       void setEngineInfo(const char *engine_type, const char *device_type) override;
        void configure() override;
+       void getNumberOfEngines(unsigned int *number_of_engines) override;
+       void getEngineType(unsigned int engine_index, char **engine_type) override;
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+       void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
index 1ca246e..a5993a4 100644 (file)
@@ -38,8 +38,14 @@ public:
        ~FacenetAdapter();
 
        void create(int type) override;
-
+       void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                         const char *model_name) override;
+       void setEngineInfo(const char *engine_type, const char *device_type) override;
        void configure() override;
+       void getNumberOfEngines(unsigned int *number_of_engines) override;
+       void getEngineType(unsigned int engine_index, char **engine_type) override;
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+       void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
index bac222d..45128ec 100644 (file)
@@ -40,6 +40,15 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::create(int t
        throw InvalidOperation("Not support yet.");
 }
 
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                               const char *model_name)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
 template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
 {
        _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(FACE_RECOGNITION_META_FILE_NAME));
@@ -77,6 +86,21 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
        _face_recognition->setConfig(config);
 }
 
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
 template<typename T, typename V> void FaceRecognitionAdapter<T, V>::prepare()
 {
        int ret = _face_recognition->initialize();
index dd4ab3b..bf0db2d 100644 (file)
@@ -39,12 +39,35 @@ template<typename T, typename V> void FacenetAdapter<T, V>::create(int type)
        throw InvalidOperation("Not support yet.");
 }
 
+template<typename T, typename V>
+void FacenetAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                               const char *model_name)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
 template<typename T, typename V> void FacenetAdapter<T, V>::configure()
 {
        _facenet->parseMetaFile();
        _facenet->configure();
 }
 
+template<typename T, typename V> void FacenetAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V> void FacenetAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
 template<typename T, typename V> void FacenetAdapter<T, V>::prepare()
 {
        _facenet->prepare();
index f699a20..aa8f245 100644 (file)
@@ -36,6 +36,8 @@ class ImageClassification
 {
 private:
        void loadLabel();
+       void getEngineList();
+       void getDeviceList(const char *engine_type);
 
 protected:
        std::unique_ptr<mediavision::inference::Inference> _inference;
@@ -46,6 +48,8 @@ protected:
        std::string _modelMetaFilePath;
        std::string _modelLabelFilePath;
        std::vector<std::string> _labels;
+       std::vector<std::string> _valid_backends;
+       std::vector<std::string> _valid_devices;
        int _backendType;
        int _targetDeviceType;
 
@@ -57,6 +61,11 @@ public:
        virtual ~ImageClassification() = default;
        void parseMetaFile();
        void setUserModel(std::string model_file, std::string meta_file, std::string label_file);
+       void setEngineInfo(std::string engine_type, std::string device_type);
+       void getNumberOfEngines(unsigned int *number_of_engines);
+       void getEngineType(unsigned int engine_index, char **engine_type);
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices);
+       void getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type);
        void configure();
        void prepare();
        void preprocess(mv_source_h &mv_src);
index 4debd61..04fe6b4 100644 (file)
@@ -38,8 +38,14 @@ public:
        ~ImageClassificationAdapter();
 
        void create(int type) override;
-
+       void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                         const char *model_name) override;
+       void setEngineInfo(const char *engine_type, const char *device_type) override;
        void configure() override;
+       void getNumberOfEngines(unsigned int *number_of_engines) override;
+       void getEngineType(unsigned int engine_index, char **engine_type) override;
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+       void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
index a4c2f21..38dd401 100644 (file)
@@ -28,9 +28,7 @@ namespace machine_learning
 {
 struct ImageClassificationInput {
        mv_source_h inference_src;
-       std::string model_file;
-       std::string meta_file;
-       std::string label_file;
+       // TODO.
 };
 
 /**
index 6dc5e12..b0ef985 100644 (file)
@@ -161,6 +161,106 @@ int mv_image_classification_get_label_open(mv_image_classification_h handle, con
 int mv_image_classification_set_model_open(mv_image_classification_h handle, const char *model_file,
                                                                                   const char *meta_file, const char *label_file);
 
+/**
+        * @brief Set user-given backend and device types for inference.
+        * @details Use this function to change the backend and device types for inference instead of default ones after calling @ref mv_image_classification_create_open().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle        The handle to the image classification object.
+        * @param[in] backend_type  A string of backend type.
+        * @param[in] device_type   A string of device type.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+        */
+int mv_image_classification_set_engine_open(mv_image_classification_h handle, const char *backend_type,
+                                                                                       const char *device_type);
+
+/**
+        * @brief Get a number of inference engines available for image classification task API.
+        * @details Use this function to get how many inference engines are supported for image classification after calling @ref mv_image_classification_create_open().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[out] engine_count  A number of inference engines available for image classification API.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+        */
+int mv_image_classification_get_engine_count_open(mv_image_classification_h handle, unsigned int *engine_count);
+
+/**
+        * @brief Get engine type to a given inference engine index.
+        * @details Use this function to get inference engine type with a given engine index after calling @ref mv_image_classification_get_engine_count().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle        The handle to the image classification object.
+        * @param[in] engine_index  A inference engine index for getting the inference engine type.
+        * @param[out] engine_type  A string to inference engine.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Get a number of inference engines available for image classification task API by calling @ref mv_image_classification_get_engine_count()
+        */
+int mv_image_classification_get_engine_type_open(mv_image_classification_h handle, const unsigned int engine_index,
+                                                                                                char **engine_type);
+
+/**
+        * @brief Get a number of device types avaliable to a given inference engine.
+        * @details Use this function to get how many device types are supported for a given inference engine after calling @ref mv_image_classification_create_open().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[in] engine_type    A inference engine string.
+        * @param[out] device_count  A number of device types available for a given inference engine.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+        */
+int mv_image_classification_get_device_count_open(mv_image_classification_h handle, const char *engine_type,
+                                                                                                 unsigned int *device_count);
+
+/**
+        * @brief Get device type list available.
+        * @details Use this function to get what device types are supported for current inference engine type after calling @ref mv_image_classification_configure().
+        *
+        * @since_tizen 7.5
+        *
+        * @param[in] handle         The handle to the image classification object.
+        * @param[in] engine_type    A inference engine string.
+        * @param[in] device_index   A device index for getting the device type.
+        * @param[out] device_type   A string to device type.
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+        *
+        * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+        * @pre Configure image classification task by calling @ref mv_image_classification_configure_open()
+        */
+int mv_image_classification_get_device_type_open(mv_image_classification_h handle, const char *engine_type,
+                                                                                                const unsigned int device_index, char **device_type);
+
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
index 7baada0..16c9969 100644 (file)
 #include <algorithm>
 
 #include "machine_learning_exception.h"
+#include "mv_machine_learning_common.h"
 #include "mv_image_classification_config.h"
 #include "image_classification.h"
 
 using namespace std;
 using namespace mediavision::inference;
 using namespace MediaVision::Common;
+using namespace mediavision::common;
 using namespace mediavision::machine_learning::exception;
 
 namespace mediavision
 {
 namespace machine_learning
 {
-ImageClassification::ImageClassification() : _backendType(), _targetDeviceType()
+ImageClassification::ImageClassification()
+               : _backendType(MV_INFERENCE_BACKEND_NONE), _targetDeviceType(MV_INFERENCE_TARGET_DEVICE_NONE)
 {
        _inference = make_unique<Inference>();
        _parser = make_unique<ImageClassificationParser>();
@@ -69,6 +72,26 @@ void ImageClassification::loadLabel()
        readFile.close();
 }
 
+void ImageClassification::getEngineList()
+{
+       for (auto idx = MV_INFERENCE_BACKEND_NONE + 1; idx < MV_INFERENCE_BACKEND_MAX; ++idx) {
+               auto backend = _inference->getSupportedInferenceBackend(idx);
+               // TODO. we need to describe what inference engines are supported by each Task API,
+               //       and based on it, below inference engine types should be checked
+               //       if a given type is supported by this Task API later. As of now, tflite only.
+               if (backend.second == true && backend.first.compare("tflite") == 0)
+                       _valid_backends.push_back(backend.first);
+       }
+}
+
+void ImageClassification::getDeviceList(const char *engine_type)
+{
+       // TODO. add device types available for a given engine type later.
+       //       In default, cpu and gpu only.
+       _valid_devices.push_back("cpu");
+       _valid_devices.push_back("gpu");
+}
+
 void ImageClassification::setUserModel(string model_file, string meta_file, string label_file)
 {
        _modelFilePath = model_file;
@@ -76,17 +99,100 @@ void ImageClassification::setUserModel(string model_file, string meta_file, stri
        _modelLabelFilePath = label_file;
 }
 
+void ImageClassification::setEngineInfo(std::string engine_type, std::string device_type)
+{
+       if (engine_type.empty() || device_type.empty())
+               throw InvalidParameter("Invalid engine info.");
+
+       transform(engine_type.begin(), engine_type.end(), engine_type.begin(), ::toupper);
+       transform(device_type.begin(), device_type.end(), device_type.begin(), ::toupper);
+
+       _backendType = GetBackendType(engine_type);
+       _targetDeviceType = GetDeviceType(device_type);
+
+       LOGI("Engine type : %s => %d, Device type : %s => %d", engine_type.c_str(), GetBackendType(engine_type),
+                device_type.c_str(), GetDeviceType(device_type));
+
+       if (_backendType == MEDIA_VISION_ERROR_INVALID_PARAMETER ||
+               _targetDeviceType == MEDIA_VISION_ERROR_INVALID_PARAMETER)
+               throw InvalidParameter("backend or target device type not found.");
+}
+
+void ImageClassification::getNumberOfEngines(unsigned int *number_of_engines)
+{
+       if (!_valid_backends.empty()) {
+               *number_of_engines = _valid_backends.size();
+               return;
+       }
+
+       getEngineList();
+       *number_of_engines = _valid_backends.size();
+}
+
+void ImageClassification::getEngineType(unsigned int engine_index, char **engine_type)
+{
+       if (!_valid_backends.empty()) {
+               if (_valid_backends.size() <= engine_index)
+                       throw InvalidParameter("Invalid engine index.");
+
+               *engine_type = const_cast<char *>(_valid_backends[engine_index].data());
+               return;
+       }
+
+       getEngineList();
+
+       if (_valid_backends.size() <= engine_index)
+               throw InvalidParameter("Invalid engine index.");
+
+       *engine_type = const_cast<char *>(_valid_backends[engine_index].data());
+}
+
+void ImageClassification::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{
+       if (!_valid_devices.empty()) {
+               *number_of_devices = _valid_devices.size();
+               return;
+       }
+
+       getDeviceList(engine_type);
+       *number_of_devices = _valid_devices.size();
+}
+
+void ImageClassification::getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type)
+{
+       if (!_valid_devices.empty()) {
+               if (_valid_devices.size() <= device_index)
+                       throw InvalidParameter("Invalid device index.");
+
+               *device_type = const_cast<char *>(_valid_devices[device_index].data());
+               return;
+       }
+
+       getDeviceList(engine_type);
+
+       if (_valid_devices.size() <= device_index)
+               throw InvalidParameter("Invalid device index.");
+
+       *device_type = const_cast<char *>(_valid_devices[device_index].data());
+}
+
 void ImageClassification::parseMetaFile()
 {
+       int ret = MEDIA_VISION_ERROR_NONE;
+
        _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(MV_IMAGE_CLASSIFICATION_CONFIG_FILE_NAME));
 
-       int ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_BACKEND_TYPE), &_backendType);
-       if (ret != MEDIA_VISION_ERROR_NONE)
-               throw InvalidOperation("Fail to get backend engine type.");
+       if (_backendType == MV_INFERENCE_BACKEND_NONE) {
+               ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_BACKEND_TYPE), &_backendType);
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw InvalidOperation("Fail to get backend engine type.");
+       }
 
-       ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_TARGET_DEVICE_TYPE), &_targetDeviceType);
-       if (ret != MEDIA_VISION_ERROR_NONE)
-               throw InvalidOperation("Fail to get target device type.");
+       if (_targetDeviceType == MV_INFERENCE_TARGET_DEVICE_NONE) {
+               ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_TARGET_DEVICE_TYPE), &_targetDeviceType);
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw InvalidOperation("Fail to get target device type.");
+       }
 
        string modelDefaultPath;
 
index 4498fdd..1b474d7 100644 (file)
@@ -42,12 +42,50 @@ template<typename T, typename V> void ImageClassificationAdapter<T, V>::create(i
        throw InvalidOperation("Interface not supported.");
 }
 
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file,
+                                                                                                       const char *label_file, const char *model_name)
+{
+       _image_classification->setUserModel(string(model_file), string(meta_file), string(label_file));
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{
+       _image_classification->setEngineInfo(string(engine_type), string(device_type));
+}
+
 template<typename T, typename V> void ImageClassificationAdapter<T, V>::configure()
 {
        _image_classification->parseMetaFile();
        _image_classification->configure();
 }
 
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{
+       _image_classification->getNumberOfEngines(number_of_engines);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{
+       _image_classification->getEngineType(engine_index, engine_type);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{
+       _image_classification->getNumberOfDevices(engine_type, number_of_devices);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
+                                                                                                        char **device_type)
+{
+       _image_classification->getDeviceType(engine_type, device_index, device_type);
+}
+
 template<typename T, typename V> void ImageClassificationAdapter<T, V>::prepare()
 {
        _image_classification->prepare();
@@ -56,9 +94,6 @@ template<typename T, typename V> void ImageClassificationAdapter<T, V>::prepare(
 template<typename T, typename V> void ImageClassificationAdapter<T, V>::setInput(T &t)
 {
        _source = t;
-
-       if (!_source.model_file.empty() && !_source.meta_file.empty() && !_source.label_file.empty())
-               _image_classification->setUserModel(_source.model_file, _source.meta_file, _source.label_file);
 }
 
 template<typename T, typename V> void ImageClassificationAdapter<T, V>::perform()
index c312d4b..03014de 100644 (file)
@@ -123,4 +123,90 @@ int mv_image_classification_set_model(mv_image_classification_h handle, const ch
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return ret;
-}
\ No newline at end of file
+}
+
+int mv_image_classification_set_engine(mv_image_classification_h handle, const char *backend_type,
+                                                                          const char *device_type)
+{
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_NULL_ARG_CHECK(backend_type);
+       MEDIA_VISION_NULL_ARG_CHECK(device_type);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = mv_image_classification_set_engine_open(handle, backend_type, device_type);
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return ret;
+}
+
+int mv_image_classification_get_engine_count(mv_image_classification_h handle, unsigned int *engine_count)
+{
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_NULL_ARG_CHECK(engine_count);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = mv_image_classification_get_engine_count_open(handle, engine_count);
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return ret;
+}
+
+int mv_image_classification_get_engine_type(mv_image_classification_h handle, const unsigned int engine_index,
+                                                                                       char **engine_type)
+{
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_NULL_ARG_CHECK(engine_type);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = mv_image_classification_get_engine_type_open(handle, engine_index, engine_type);
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return ret;
+}
+
+int mv_image_classification_get_device_count(mv_image_classification_h handle, const char *engine_type,
+                                                                                        unsigned int *device_count)
+{
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_NULL_ARG_CHECK(device_count);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = mv_image_classification_get_device_count_open(handle, engine_type, device_count);
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return ret;
+}
+
+int mv_image_classification_get_device_type(mv_image_classification_h handle, const char *engine_type,
+                                                                                       const unsigned int device_index, char **device_type)
+{
+       MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_NULL_ARG_CHECK(engine_type);
+       MEDIA_VISION_NULL_ARG_CHECK(device_type);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       int ret = mv_image_classification_get_device_type_open(handle, engine_type, device_index, device_type);
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return ret;
+}
index c172641..7790f5c 100644 (file)
@@ -47,13 +47,125 @@ int mv_image_classification_set_model_open(mv_image_classification_h handle, con
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
 
-               ImageClassificationInput input;
+               task->setModelInfo(model_file, meta_file, label_file);
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
 
-               input.model_file = string(model_file);
-               input.meta_file = string(meta_file);
-               input.label_file = string(label_file);
+       LOGD("LEAVE");
 
-               task->setInput(input);
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_set_engine_open(mv_image_classification_h handle, const char *backend_type,
+                                                                                       const char *device_type)
+{
+       if (!handle) {
+               LOGE("Handle is NULL.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       try {
+               auto context = static_cast<Context *>(handle);
+               auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+               task->setEngineInfo(backend_type, device_type);
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       LOGD("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_engine_count_open(mv_image_classification_h handle, unsigned int *engine_count)
+{
+       if (!handle) {
+               LOGE("Handle is NULL.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       try {
+               auto context = static_cast<Context *>(handle);
+               auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+               task->getNumberOfEngines(engine_count);
+               // TODO.
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       LOGD("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_engine_type_open(mv_image_classification_h handle, const unsigned int engine_index,
+                                                                                                char **engine_type)
+{
+       if (!handle) {
+               LOGE("Handle is NULL.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       try {
+               auto context = static_cast<Context *>(handle);
+               auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+               task->getEngineType(engine_index, engine_type);
+               // TODO.
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       LOGD("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_device_count_open(mv_image_classification_h handle, const char *engine_type,
+                                                                                                 unsigned int *device_count)
+{
+       if (!handle) {
+               LOGE("Handle is NULL.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       try {
+               auto context = static_cast<Context *>(handle);
+               auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+               task->getNumberOfDevices(engine_type, device_count);
+               // TODO.
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       LOGD("LEAVE");
+
+       return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_device_type_open(mv_image_classification_h handle, const char *engine_type,
+                                                                                                const unsigned int device_index, char **device_type)
+{
+       if (!handle) {
+               LOGE("Handle is NULL.");
+               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+       }
+
+       try {
+               auto context = static_cast<Context *>(handle);
+               auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+               task->getDeviceType(engine_type, device_index, device_type);
+               // TODO.
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index e22e0bb..ddbe3a9 100644 (file)
@@ -35,7 +35,7 @@ private:
        ObjectDetectionResult _result;
 
 public:
-       MobilenetV1Ssd();
+       MobilenetV1Ssd(ObjectDetectionTaskType task_type);
        ~MobilenetV1Ssd();
 
        ObjectDetectionResult &result() override;
index 830c9a1..3bbed3c 100644 (file)
@@ -41,7 +41,7 @@ private:
        Box decodeBoxWithAnchor(const BoxAnchorParam *boxAnchorParm, Box &box, cv::Rect2f &anchor);
 
 public:
-       MobilenetV2Ssd();
+       MobilenetV2Ssd(ObjectDetectionTaskType task_type);
        ~MobilenetV2Ssd();
 
        ObjectDetectionResult &result() override;
index fe00bb5..b364bfb 100644 (file)
@@ -37,13 +37,14 @@ class ObjectDetection
 private:
        void loadLabel();
 
+       ObjectDetectionTaskType _task_type;
+
 protected:
        std::unique_ptr<mediavision::inference::Inference> _inference;
        std::unique_ptr<MediaVision::Common::EngineConfig> _config;
        std::unique_ptr<MetaParser> _parser;
        std::vector<std::string> _labels;
        Preprocess _preprocess;
-       std::string _modelName;
        std::string _modelFilePath;
        std::string _modelMetaFilePath;
        std::string _modelDefaultPath;
@@ -55,11 +56,9 @@ protected:
        void getOutputTensor(std::string target_name, std::vector<float> &tensor);
 
 public:
-       ObjectDetection();
+       ObjectDetection(ObjectDetectionTaskType task_type);
        virtual ~ObjectDetection() = default;
-       void setUserModel(std::string &model_name, std::string &model_file, std::string &meta_file,
-                                         std::string &label_file);
-       void setTaskType(ObjectDetectionTaskType task_type);
+       void setUserModel(std::string model_file, std::string meta_file, std::string label_file);
        void parseMetaFile();
        void configure();
        void prepare();
index fbf3fdd..edaa54f 100644 (file)
@@ -33,7 +33,10 @@ template<typename T, typename V> class ObjectDetectionAdapter : public mediavisi
 private:
        std::unique_ptr<ObjectDetection> _object_detection;
        T _source;
-       ObjectDetectionTaskType _task_type {};
+       std::string _model_name;
+       std::string _model_file;
+       std::string _meta_file;
+       std::string _label_file;
 
 public:
        ObjectDetectionAdapter();
@@ -41,7 +44,14 @@ public:
 
        void create(int type) override;
 
+       void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                         const char *model_name) override;
+       void setEngineInfo(const char *engine_type, const char *device_type) override;
        void configure() override;
+       void getNumberOfEngines(unsigned int *number_of_engines) override;
+       void getEngineType(unsigned int engine_index, char **engine_type) override;
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+       void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
index 3238745..96bac8e 100644 (file)
@@ -26,10 +26,7 @@ namespace machine_learning
 {
 struct ObjectDetectionInput {
        mv_source_h inference_src;
-       std::string model_name;
-       std::string model_file;
-       std::string meta_file;
-       std::string label_file;
+       // TODO.
 };
 
 /**
index 60f21fc..51e6e0a 100644 (file)
@@ -31,7 +31,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-MobilenetV1Ssd::MobilenetV1Ssd() : _result()
+MobilenetV1Ssd::MobilenetV1Ssd(ObjectDetectionTaskType task_type) : ObjectDetection(task_type), _result()
 {}
 
 MobilenetV1Ssd::~MobilenetV1Ssd()
index e8dac60..4196ff9 100644 (file)
@@ -32,7 +32,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-MobilenetV2Ssd::MobilenetV2Ssd() : _result()
+MobilenetV2Ssd::MobilenetV2Ssd(ObjectDetectionTaskType task_type) : ObjectDetection(task_type), _result()
 {}
 
 MobilenetV2Ssd::~MobilenetV2Ssd()
index 384d67e..311d6b1 100644 (file)
@@ -27,6 +27,7 @@
 #include <string>
 #include <algorithm>
 #include <mutex>
+#include <iostream>
 
 using namespace std;
 using namespace mediavision::inference;
@@ -99,14 +100,7 @@ int mv_object_detection_set_model_open(mv_object_detection_h handle, const char
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<ObjectDetectionTask *>(context->__tasks.at("object_detection"));
 
-               ObjectDetectionInput input;
-
-               input.model_name = string(model_name);
-               input.model_file = string(model_file);
-               input.meta_file = string(meta_file);
-               input.label_file = string(label_file);
-
-               task->setInput(input);
+               task->setModelInfo(model_file, meta_file, label_file, model_name);
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index bfe1f1a..d26ed63 100644 (file)
@@ -33,15 +33,15 @@ namespace mediavision
 {
 namespace machine_learning
 {
-ObjectDetection::ObjectDetection() : _backendType(), _targetDeviceType()
+ObjectDetection::ObjectDetection(ObjectDetectionTaskType task_type)
+               : _task_type(task_type), _backendType(), _targetDeviceType()
 {
        _inference = make_unique<Inference>();
        _parser = make_unique<ObjectDetectionParser>();
 }
 
-void ObjectDetection::setUserModel(string &model_name, string &model_file, string &meta_file, string &label_file)
+void ObjectDetection::setUserModel(string model_file, string meta_file, string label_file)
 {
-       _modelName = model_name;
        _modelFilePath = model_file;
        _modelMetaFilePath = meta_file;
        _modelLabelFilePath = label_file;
@@ -70,11 +70,6 @@ void ObjectDetection::loadLabel()
        readFile.close();
 }
 
-void ObjectDetection::setTaskType(ObjectDetectionTaskType task_type)
-{
-       _parser->setTaskType(static_cast<int>(task_type));
-}
-
 void ObjectDetection::parseMetaFile()
 {
        _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(MV_OBJECT_DETECTION_META_FILE_NAME));
@@ -115,6 +110,7 @@ void ObjectDetection::parseMetaFile()
        _modelMetaFilePath = _modelDefaultPath + _modelMetaFilePath;
        LOGI("meta file path = %s", _modelMetaFilePath.c_str());
 
+       _parser->setTaskType(static_cast<int>(_task_type));
        _parser->load(_modelMetaFilePath);
 
        if (_modelLabelFilePath.empty()) {
index df3535f..be1a73d 100644 (file)
@@ -34,12 +34,12 @@ template<typename T, typename V> ObjectDetectionAdapter<T, V>::~ObjectDetectionA
 
 template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(int type)
 {
-       if (!_source.model_name.empty()) {
-               transform(_source.model_name.begin(), _source.model_name.end(), _source.model_name.begin(), ::toupper);
+       if (!_model_name.empty()) {
+               transform(_model_name.begin(), _model_name.end(), _model_name.begin(), ::toupper);
 
-               if (_source.model_name == string("MOBILENET_V1_SSD"))
+               if (_model_name == string("MOBILENET_V1_SSD"))
                        type = static_cast<int>(ObjectDetectionTaskType::MOBILENET_V1_SSD);
-               else if (_source.model_name == string("MOBILENET_V2_SSD"))
+               else if (_model_name == string("MOBILENET_V2_SSD"))
                        type = static_cast<int>(ObjectDetectionTaskType::MOBILENET_V2_SSD);
                // TODO.
                else
@@ -48,29 +48,57 @@ template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(int t
 
        switch (static_cast<ObjectDetectionTaskType>(type)) {
        case ObjectDetectionTaskType::MOBILENET_V1_SSD:
-               _object_detection = make_unique<MobilenetV1Ssd>();
+               _object_detection = make_unique<MobilenetV1Ssd>(static_cast<ObjectDetectionTaskType>(type));
                break;
        case ObjectDetectionTaskType::MOBILENET_V2_SSD:
-               _object_detection = make_unique<MobilenetV2Ssd>();
+               _object_detection = make_unique<MobilenetV2Ssd>(static_cast<ObjectDetectionTaskType>(type));
                break;
        default:
                throw InvalidParameter("Invalid object detection task type.");
        }
 
-       _task_type = static_cast<ObjectDetectionTaskType>(type);
+       _object_detection->setUserModel(_model_file, _meta_file, _label_file);
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                               const char *model_name)
 {
-       if (!_source.model_name.empty() && !_source.model_file.empty() && !_source.meta_file.empty() &&
-               !_source.label_file.empty())
-               _object_detection->setUserModel(_source.model_name, _source.model_file, _source.meta_file, _source.label_file);
+       _model_name = string(model_name);
+
+       _model_file = string(model_file);
+       _meta_file = string(meta_file);
+       _label_file = string(label_file);
+
+       if (_model_file.empty() && _meta_file.empty() && _label_file.empty())
+               throw InvalidParameter("Model info not invalid.");
+}
 
-       _object_detection->setTaskType(_task_type);
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
+template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+{
        _object_detection->parseMetaFile();
        _object_detection->configure();
 }
 
+template<typename T, typename V> void ObjectDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
 template<typename T, typename V> void ObjectDetectionAdapter<T, V>::prepare()
 {
        _object_detection->prepare();
index e5ffa0b..b80de8b 100644 (file)
@@ -38,7 +38,13 @@ public:
        ~ObjectDetection3dAdapter();
 
        void create(int type) override;
-
+       void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                         const char *model_name) override;
+       void setEngineInfo(const char *engine_type, const char *device_type) override;
+       void getNumberOfEngines(unsigned int *number_of_engines) override;
+       void getEngineType(unsigned int engine_index, char **engine_type) override;
+       void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+       void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void configure() override;
        void prepare() override;
        void setInput(T &t) override;
index 898c200..f0a14b7 100644 (file)
@@ -43,12 +43,38 @@ template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::create(int
        }
 }
 
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                                 const char *model_name)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
 template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::configure()
 {
        _object_detection_3d->parseMetaFile();
        _object_detection_3d->configure();
 }
 
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
+                                                                                                  char **device_type)
+{}
+
 template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::prepare()
 {
        _object_detection_3d->prepare();
index 0cc0a43..72ca193 100644 (file)
@@ -37,6 +37,52 @@ struct model_info {
        string label_file;
 };
 
+TEST(ImageClassificationTest, GettingAvailableInferenceEnginesInfoShouldBeOk)
+{
+       mv_image_classification_h handle;
+
+       int ret = mv_image_classification_create(&handle);
+       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+       unsigned int engine_count = 0;
+
+       ret = mv_image_classification_get_engine_count(handle, &engine_count);
+       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+       cout << "Engine count = " << engine_count << endl;
+       ASSERT_GE(engine_count, 1);
+
+       for (unsigned int engine_idx = 0; engine_idx < engine_count; ++engine_idx) {
+               char *engine_type = nullptr;
+
+               ret = mv_image_classification_get_engine_type(handle, engine_idx, &engine_type);
+               ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+               cout << "Engine type : " << engine_type << endl;
+
+               unsigned int device_count = 0;
+
+               ret = mv_image_classification_get_device_count(handle, engine_type, &device_count);
+               ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+               cout << "Device count = " << device_count << endl;
+
+               ASSERT_GE(engine_count, 1);
+
+               for (unsigned int device_idx = 0; device_idx < device_count; ++device_idx) {
+                       char *device_type = nullptr;
+
+                       ret = mv_image_classification_get_device_type(handle, engine_type, device_idx, &device_type);
+                       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+                       cout << "Device type : " << device_type << endl;
+               }
+       }
+
+       ret = mv_image_classification_destroy(handle);
+       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+}
+
 TEST(ImageClassificationTest, InferenceShouldBeOk)
 {
        mv_image_classification_h handle;
@@ -70,6 +116,7 @@ TEST(ImageClassificationTest, InferenceShouldBeOk)
 
                mv_image_classification_set_model(handle, model.model_file.c_str(), model.meta_file.c_str(),
                                                                                  model.label_file.c_str());
+               mv_image_classification_set_engine(handle, "tflite", "cpu");
 
                ret = mv_image_classification_configure(handle);
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);