mv_machine_learning: drop the use of template type from itask
authorInki Dae <inki.dae@samsung.com>
Tue, 5 Dec 2023 08:53:32 +0000 (17:53 +0900)
committerKwanghoon Son <k.son@samsung.com>
Wed, 27 Dec 2023 03:22:49 +0000 (12:22 +0900)
[Issue type] : code cleanup

Drop the use of template type from itask and its child classes.
Finally, we have broken the template jail by introducing common
input and output structures, InputBaseType and OutputBaseType.

This patch also includes some updates to the task groups which don't use
MachineLearningNative module yet.

Change-Id: I96abdd94dc715e3f24717e14ad92533c9451f861
Signed-off-by: Inki Dae <inki.dae@samsung.com>
37 files changed:
mv_machine_learning/common/include/MachineLearningNative.h
mv_machine_learning/common/include/context.h
mv_machine_learning/common/include/itask.h
mv_machine_learning/common/src/MachineLearningNative.cpp
mv_machine_learning/face_recognition/include/face_recognition.h
mv_machine_learning/face_recognition/include/face_recognition_adapter.h
mv_machine_learning/face_recognition/include/facenet.h
mv_machine_learning/face_recognition/include/facenet_adapter.h
mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp
mv_machine_learning/face_recognition/src/facenet_adapter.cpp
mv_machine_learning/face_recognition/src/mv_face_recognition.cpp
mv_machine_learning/image_classification/include/image_classification_adapter.h
mv_machine_learning/image_classification/include/image_classification_type.h
mv_machine_learning/image_classification/src/image_classification_adapter.cpp
mv_machine_learning/image_classification/src/mv_image_classification.cpp
mv_machine_learning/image_segmentation/include/image_segmentation_type.h
mv_machine_learning/image_segmentation/include/selfie_segmentation_adapter.h
mv_machine_learning/image_segmentation/src/mv_selfie_segmentation.cpp
mv_machine_learning/image_segmentation/src/selfie_segmentation_adapter.cpp
mv_machine_learning/landmark_detection/include/facial_landmark_adapter.h
mv_machine_learning/landmark_detection/include/landmark_detection_type.h
mv_machine_learning/landmark_detection/include/pose_landmark_adapter.h
mv_machine_learning/landmark_detection/src/facial_landmark_adapter.cpp
mv_machine_learning/landmark_detection/src/mv_facial_landmark.cpp
mv_machine_learning/landmark_detection/src/mv_pose_landmark.cpp
mv_machine_learning/landmark_detection/src/pose_landmark_adapter.cpp
mv_machine_learning/object_detection/include/face_detection_adapter.h
mv_machine_learning/object_detection/include/object_detection_adapter.h
mv_machine_learning/object_detection/include/object_detection_type.h
mv_machine_learning/object_detection/src/face_detection_adapter.cpp
mv_machine_learning/object_detection/src/mv_face_detection.cpp
mv_machine_learning/object_detection/src/mv_object_detection.cpp
mv_machine_learning/object_detection/src/object_detection_adapter.cpp
mv_machine_learning/object_detection_3d/include/object_detection_3d_adapter.h
mv_machine_learning/object_detection_3d/include/object_detection_3d_type.h
mv_machine_learning/object_detection_3d/src/mv_object_detection_3d.cpp
mv_machine_learning/object_detection_3d/src/object_detection_3d_adapter.cpp

index ced1b00..67d21cf 100644 (file)
@@ -30,15 +30,14 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V>
-void machine_learning_native_create(const std::string &task_name, mediavision::common::ITask<T, V> *task,
-                                                                       void **handle);
+void machine_learning_native_create(const std::string &task_name, mediavision::common::ITask *task, void **handle);
 void machine_learning_native_destory(void *handle, const std::string &task_name);
 void machine_learning_native_configure(void *handle, const std::string &task_name);
 void machine_learning_native_prepare(void *handle, const std::string &task_name);
 void machine_learning_native_inference(void *handle, const std::string &task_name, InputBaseType &input);
 void machine_learning_native_inference_async(void *handle, const std::string &task_name, InputBaseType &input);
 OutputBaseType &machine_learning_native_get_result(void *handle, const std::string &task_name);
+OutputBaseType &machine_learning_native_get_result_cache(void *handle, const std::string &task_name);
 void machine_learning_native_set_model(void *handle, const std::string &task_name, const char *model_file,
                                                                           const char *meta_file, const char *label_file, const char *model_name = "");
 void machine_learning_native_set_engine(void *handle, const std::string &task_name, const char *backend_type,
index e989f8a..f2b7b3d 100644 (file)
@@ -32,7 +32,7 @@ public:
        ~Context()
        {}
 
-       std::map<std::string, void *> __tasks;
+       std::map<std::string, ITask *> __tasks;
 };
 } // namespace
 } // namespace
index 99723b6..ea4f9fb 100644 (file)
 #ifndef __ITASK_H__
 #define __ITASK_H__
 
+#include "MachineLearningType.h"
+
 namespace mediavision
 {
 namespace common
 {
-// T : parameter type, V : return type
-template<typename T, typename V> class ITask
+class ITask
 {
 public:
        virtual ~ITask() {};
@@ -35,11 +36,11 @@ public:
        virtual void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) = 0;
        virtual void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) = 0;
        virtual void prepare() = 0;
-       virtual void setInput(T &t) = 0;
+       virtual void setInput(mediavision::machine_learning::InputBaseType &input) = 0;
        virtual void perform() = 0;
-       virtual void performAsync(T &t) = 0;
-       virtual V &getOutput() = 0;
-       virtual V &getOutputCache() = 0;
+       virtual void performAsync(mediavision::machine_learning::InputBaseType &input) = 0;
+       virtual mediavision::machine_learning::OutputBaseType &getOutput() = 0;
+       virtual mediavision::machine_learning::OutputBaseType &getOutputCache() = 0;
 };
 } // namespace
 } // namespace
index 24cb5b0..5f3ad8d 100644 (file)
@@ -24,21 +24,18 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace mediavision::machine_learning::exception;
 
-using MachineLearningTask = ITask<InputBaseType, OutputBaseType>;
-
 namespace mediavision
 {
 namespace machine_learning
 {
-inline MachineLearningTask *get_task(void *handle, const std::string &name)
+inline ITask *get_task(void *handle, const std::string &name)
 {
        auto context = static_cast<Context *>(handle);
 
-       return static_cast<MachineLearningTask *>(context->__tasks.at(name));
+       return context->__tasks.at(name);
 }
 
-template<typename T, typename V>
-void machine_learning_native_create(const string &task_name, ITask<T, V> *task, void **handle)
+void machine_learning_native_create(const string &task_name, ITask *task, void **handle)
 {
        Context *context = new Context();
 
@@ -46,15 +43,12 @@ void machine_learning_native_create(const string &task_name, ITask<T, V> *task,
        *handle = static_cast<void *>(context);
 }
 
-template void machine_learning_native_create<InputBaseType, OutputBaseType>(const string &task_name,
-                                                                                                                                                       MachineLearningTask *task, void **handle);
-
 void machine_learning_native_destory(void *handle, const string &task_name)
 {
        auto context = static_cast<Context *>(handle);
 
        for (auto &m : context->__tasks)
-               delete static_cast<MachineLearningTask *>(m.second);
+               delete m.second;
 
        delete context;
 }
@@ -95,6 +89,13 @@ OutputBaseType &machine_learning_native_get_result(void *handle, const string &t
        return task->getOutput();
 }
 
+OutputBaseType &machine_learning_native_get_result_cache(void *handle, const string &task_name)
+{
+       auto task = get_task(handle, task_name);
+
+       return task->getOutputCache();
+}
+
 void machine_learning_native_set_model(void *handle, const string &task_name, const char *model_file,
                                                                           const char *meta_file, const char *label_file, const char *model_name)
 {
index f8a493e..1220925 100644 (file)
@@ -24,6 +24,7 @@
 #include "training_engine_error.h"
 #include "training_engine_common_impl.h"
 #include "inference_engine_common_impl.h"
+#include "MachineLearningType.h"
 #include "Inference.h"
 #include "label_manager.h"
 #include "feature_vector_manager.h"
@@ -41,8 +42,10 @@ enum class WorkingStatus { NONE, INITIALIZED, REGISTERED, INFERENCED, DELETED };
 enum class RequestMode { REGISTER, INFERENCE, DELETE };
 }
 
-struct FaceRecognitionInput {
-       face_recognition::RequestMode mode;
+struct FaceRecognitionInput : public InputBaseType {
+       FaceRecognitionInput(mv_source_h src = nullptr) : InputBaseType(src)
+       {}
+       face_recognition::RequestMode mode {};
        std::vector<std::vector<float> > inputs;
        std::vector<std::string> labels;
 };
@@ -52,7 +55,7 @@ struct FaceRecognitionInput {
  * @details Contains face recognition result such as label, label index, raw data,
  *          and raw data count.
  */
-struct FaceRecognitionResult {
+struct FaceRecognitionResult : public OutputBaseType {
        unsigned int label_idx {}; /**< label index of label file. */
        std::vector<float> raw_data; /**< raw data to each label. */
        std::vector<std::string> labels;
index 69c2854..c2132c0 100644 (file)
@@ -27,11 +27,11 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class FaceRecognitionAdapter : public mediavision::common::ITask<T, V>
+class FaceRecognitionAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<FaceRecognition> _face_recognition;
-       T _source {};
+       InputBaseType _source;
        std::unique_ptr<MediaVision::Common::EngineConfig> _config;
 
 public:
@@ -52,11 +52,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index 52059e7..d5780ca 100644 (file)
 #include "facenet_parser.h"
 #include "face_recognition_type.h"
 #include "machine_learning_preprocess.h"
+#include "MachineLearningType.h"
 
 namespace mediavision
 {
 namespace machine_learning
 {
-struct FacenetInput {
-       std::vector<mv_source_h> inputs;
+struct FacenetInput : public InputBaseType {
+       FacenetInput(mv_source_h src = nullptr) : InputBaseType(src)
+       {}
 };
 
-struct FacenetOutput {
+struct FacenetOutput : public OutputBaseType {
        std::vector<std::vector<float> > outputs;
 };
 
index 94cb4cd..cd67241 100644 (file)
 
 #include "EngineConfig.h"
 #include "itask.h"
+#include "MachineLearningType.h"
 #include "facenet.h"
 
 namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class FacenetAdapter : public mediavision::common::ITask<T, V>
+class FacenetAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<Facenet> _facenet;
-       T _source;
+       InputBaseType _source;
 
 public:
        FacenetAdapter();
@@ -46,11 +47,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index 57516ea..61d37ac 100644 (file)
@@ -27,24 +27,22 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> FaceRecognitionAdapter<T, V>::FaceRecognitionAdapter()
+FaceRecognitionAdapter::FaceRecognitionAdapter()
 {
        _face_recognition = make_unique<FaceRecognition>();
 }
 
-template<typename T, typename V> FaceRecognitionAdapter<T, V>::~FaceRecognitionAdapter()
+FaceRecognitionAdapter::~FaceRecognitionAdapter()
 {}
 
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                               const char *model_name)
+void FaceRecognitionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                 const char *model_name)
 {}
 
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FaceRecognitionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {}
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
+void FaceRecognitionAdapter::configure()
 {
        _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(FACE_RECOGNITION_META_FILE_NAME));
 
@@ -81,41 +79,40 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
        _face_recognition->setConfig(config);
 }
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FaceRecognitionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {}
 
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FaceRecognitionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {}
 
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FaceRecognitionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {}
 
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FaceRecognitionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {}
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::prepare()
+void FaceRecognitionAdapter::prepare()
 {
        int ret = _face_recognition->initialize();
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to initialize face recognition.");
 }
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::setInput(T &t)
+void FaceRecognitionAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
+void FaceRecognitionAdapter::perform()
 {
-       if (_source.mode == RequestMode::REGISTER) {
-               if (_source.inputs.size() != _source.labels.size())
+       FaceRecognitionInput &source = static_cast<FaceRecognitionInput &>(_source);
+
+       if (source.mode == RequestMode::REGISTER) {
+               if (source.inputs.size() != source.labels.size())
                        throw InvalidParameter("The number of inputs and labels are not matched.");
 
-               for (size_t idx = 0; idx < _source.inputs.size(); ++idx) {
-                       int ret = _face_recognition->registerNewFace(_source.inputs[idx], _source.labels[idx]);
+               for (size_t idx = 0; idx < source.inputs.size(); ++idx) {
+                       int ret = _face_recognition->registerNewFace(source.inputs[idx], source.labels[idx]);
                        if (ret != MEDIA_VISION_ERROR_NONE)
                                throw InvalidOperation("Fail to register new face.");
                }
@@ -123,9 +120,9 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
                return;
        }
 
-       if (_source.mode == RequestMode::INFERENCE) {
+       if (source.mode == RequestMode::INFERENCE) {
                // _source.inputs.size should be 1.
-               int ret = _face_recognition->recognizeFace(_source.inputs[0]);
+               int ret = _face_recognition->recognizeFace(source.inputs[0]);
                if (ret == MEDIA_VISION_ERROR_NO_DATA)
                        throw NoData("Label not found.");
 
@@ -135,8 +132,8 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
                return;
        }
 
-       if (_source.mode == RequestMode::DELETE) {
-               for (auto &l : _source.labels) {
+       if (source.mode == RequestMode::DELETE) {
+               for (auto &l : source.labels) {
                        int ret = _face_recognition->deleteLabel(l);
                        if (ret != MEDIA_VISION_ERROR_NONE)
                                throw InvalidOperation("Fail to unregister a given label.");
@@ -146,21 +143,20 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
        }
 }
 
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::performAsync(T &t)
+void FaceRecognitionAdapter::performAsync(InputBaseType &input)
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template<typename T, typename V> V &FaceRecognitionAdapter<T, V>::getOutput()
+OutputBaseType &FaceRecognitionAdapter::getOutput()
 {
        return _face_recognition->result();
 }
 
-template<typename T, typename V> V &FaceRecognitionAdapter<T, V>::getOutputCache()
+OutputBaseType &FaceRecognitionAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class FaceRecognitionAdapter<FaceRecognitionInput, FaceRecognitionResult>;
 }
 }
\ No newline at end of file
index 82de240..6b726c4 100644 (file)
@@ -26,79 +26,75 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> FacenetAdapter<T, V>::FacenetAdapter() : _source()
+FacenetAdapter::FacenetAdapter() : _source()
 {
        _facenet = make_unique<Facenet>();
 }
 
-template<typename T, typename V> FacenetAdapter<T, V>::~FacenetAdapter()
+FacenetAdapter::~FacenetAdapter()
 {}
 
-template<typename T, typename V>
-void FacenetAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                               const char *model_name)
+void FacenetAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                 const char *model_name)
 {}
 
-template<typename T, typename V>
-void FacenetAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FacenetAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {}
 
-template<typename T, typename V> void FacenetAdapter<T, V>::configure()
+void FacenetAdapter::configure()
 {
        _facenet->parseMetaFile();
        _facenet->configure();
 }
 
-template<typename T, typename V> void FacenetAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FacenetAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {}
 
-template<typename T, typename V> void FacenetAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FacenetAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {}
 
-template<typename T, typename V>
-void FacenetAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FacenetAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {}
 
-template<typename T, typename V>
-void FacenetAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FacenetAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {}
 
-template<typename T, typename V> void FacenetAdapter<T, V>::prepare()
+void FacenetAdapter::prepare()
 {
        _facenet->prepare();
 }
 
-template<typename T, typename V> void FacenetAdapter<T, V>::setInput(T &t)
+void FacenetAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void FacenetAdapter<T, V>::perform()
+void FacenetAdapter::perform()
 {
+       FacenetInput &source = static_cast<FacenetInput &>(_source);
        shared_ptr<MetaInfo> metaInfo = _facenet->getInputMetaInfo();
        if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
-               _facenet->perform<unsigned char>(_source.inputs[0], metaInfo);
+               _facenet->perform<unsigned char>(source.inference_src, metaInfo);
        else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
-               _facenet->perform<float>(_source.inputs[0], metaInfo);
+               _facenet->perform<float>(source.inference_src, metaInfo);
        else
                throw InvalidOperation("Invalid model data type.");
 }
 
-template<typename T, typename V> void FacenetAdapter<T, V>::performAsync(T &t)
+void FacenetAdapter::performAsync(InputBaseType &input)
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template<typename T, typename V> V &FacenetAdapter<T, V>::getOutput()
+OutputBaseType &FacenetAdapter::getOutput()
 {
        return _facenet->result();
 }
 
-template<typename T, typename V> V &FacenetAdapter<T, V>::getOutputCache()
+OutputBaseType &FacenetAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class FacenetAdapter<FacenetInput, FacenetOutput>;
 }
 }
\ No newline at end of file
index b98a9fb..399d2f4 100644 (file)
@@ -33,8 +33,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace mediavision::machine_learning::face_recognition;
 using namespace mediavision::machine_learning::exception;
-using FaceRecognitionTask = ITask<FaceRecognitionInput, FaceRecognitionResult>;
-using FacenetTask = ITask<FacenetInput, FacenetOutput>;
 
 static mutex g_face_recognition_mutex;
 
@@ -52,52 +50,27 @@ int mv_face_recognition_create(mv_face_recognition_h *out_handle)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       Context *context = new (nothrow) Context();
-       if (!context) {
-               LOGE("Fail to allocate a context.");
-               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-       }
-
-       FaceRecognitionTask *face_recognition_task = new (nothrow)
-                       FaceRecognitionAdapter<FaceRecognitionInput, FaceRecognitionResult>();
-       if (!face_recognition_task) {
-               delete context;
-               LOGE("Fail to allocate a task.");
-               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-       }
+       Context *context = nullptr;
+       ITask *face_recognition_task = nullptr;
+       ITask *facenet_task = nullptr;
 
-       FacenetTask *facenet_task = new (nothrow) FacenetAdapter<FacenetInput, FacenetOutput>();
-       if (!facenet_task) {
-               delete face_recognition_task;
-               delete context;
-               LOGE("Fail to allocate a task.");
-               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
-       }
+       try {
+               context = new Context();
+               face_recognition_task = new FaceRecognitionAdapter();
+               facenet_task = new FacenetAdapter();
+               context->__tasks.insert(make_pair("face_recognition", face_recognition_task));
+               context->__tasks.insert(make_pair("facenet", facenet_task));
 
-       pair<map<string, void *>::iterator, bool> result;
+               *out_handle = static_cast<mv_face_recognition_h>(context);
 
-       result = context->__tasks.insert(pair<string, void *>("face_recognition", face_recognition_task));
-       if (!result.second) {
-               delete facenet_task;
+               LOGD("face recognition handle [%p] has been created", *out_handle);
+       } catch (const BaseException &e) {
                delete face_recognition_task;
-               delete context;
-               LOGE("Fail to register a new task. Same task already exists.");
-               return MEDIA_VISION_ERROR_INVALID_OPERATION;
-       }
-
-       result = context->__tasks.insert(pair<string, void *>("facenet", facenet_task));
-       if (!result.second) {
                delete facenet_task;
-               delete face_recognition_task;
                delete context;
-               LOGE("Fail to register a new task. Same task already exists.");
-               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+               return e.getError();
        }
 
-       *out_handle = static_cast<mv_face_recognition_h>(context);
-
-       LOGD("face recognition handle [%p] has been created", *out_handle);
-
        MEDIA_VISION_FUNCTION_LEAVE();
 
        return MEDIA_VISION_ERROR_NONE;
@@ -114,18 +87,14 @@ int mv_face_recognition_destroy(mv_face_recognition_h handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        Context *context = static_cast<Context *>(handle);
-       map<string, void *>::iterator iter;
+       map<string, ITask *>::iterator iter;
 
        for (iter = context->__tasks.begin(); iter != context->__tasks.end(); ++iter) {
-               if (iter->first.compare("face_recognition") == 0) {
-                       auto face_recognition_task = static_cast<FaceRecognitionTask *>(iter->second);
-                       delete face_recognition_task;
-               }
-
-               if (iter->first.compare("facenet") == 0) {
-                       auto facenet_task = static_cast<FacenetTask *>(iter->second);
-                       delete facenet_task;
-               }
+               if (iter->first.compare("face_recognition") == 0)
+                       delete iter->second;
+
+               if (iter->first.compare("facenet") == 0)
+                       delete iter->second;
        }
 
        delete context;
@@ -149,8 +118,8 @@ int mv_face_recognition_prepare(mv_face_recognition_h handle)
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
-               auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto facenet_task = context->__tasks["facenet"];
 
                face_recognition_task->configure();
                facenet_task->configure();
@@ -180,15 +149,17 @@ int mv_face_recognition_register(mv_face_recognition_h handle, mv_source_h sourc
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
-               auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
-               FacenetInput facenet_input = { { source } };
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto facenet_task = context->__tasks["facenet"];
+               FacenetInput facenet_input(source);
 
                facenet_task->setInput(facenet_input);
                facenet_task->perform();
 
-               FacenetOutput &facenet_output = facenet_task->getOutput();
-               FaceRecognitionInput face_recognition_input = { .mode = RequestMode::REGISTER };
+               auto &facenet_output = static_cast<FacenetOutput &>(facenet_task->getOutput());
+               FaceRecognitionInput face_recognition_input;
+
+               face_recognition_input.mode = RequestMode::REGISTER;
 
                face_recognition_input.inputs.push_back(facenet_output.outputs[0]);
                face_recognition_input.labels.push_back(label);
@@ -218,8 +189,10 @@ int mv_face_recognition_unregister(mv_face_recognition_h handle, const char *lab
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
-               FaceRecognitionInput input = { RequestMode::DELETE };
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               FaceRecognitionInput input;
+
+               input.mode = RequestMode::DELETE;
 
                input.labels.clear();
                input.labels.push_back(label);
@@ -248,15 +221,17 @@ int mv_face_recognition_inference(mv_face_recognition_h handle, mv_source_h sour
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
-               auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
-               FacenetInput facenet_input = { { source } };
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto facenet_task = context->__tasks["facenet"];
+               FacenetInput facenet_input(source);
 
                facenet_task->setInput(facenet_input);
                facenet_task->perform();
-               FacenetOutput &facenet_output = facenet_task->getOutput();
 
-               FaceRecognitionInput face_recognition_input = { RequestMode::INFERENCE };
+               auto &facenet_output = static_cast<FacenetOutput &>(facenet_task->getOutput());
+               FaceRecognitionInput face_recognition_input;
+
+               face_recognition_input.mode = RequestMode::INFERENCE;
 
                face_recognition_input.inputs = facenet_output.outputs;
                face_recognition_task->setInput(face_recognition_input);
@@ -284,9 +259,10 @@ int mv_face_recognition_get_label(mv_face_recognition_h handle, const char **out
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
 
-               *out_label = face_recognition_task->getOutput().label.c_str();
+               *out_label = result.label.c_str();
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -312,10 +288,11 @@ int mv_face_recognition_get_confidence(mv_face_recognition_h handle, const float
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
 
-               *confidences = face_recognition_task->getOutput().raw_data.data();
-               *num_of_confidences = face_recognition_task->getOutput().raw_data.size();
+               *confidences = result.raw_data.data();
+               *num_of_confidences = result.raw_data.size();
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -339,12 +316,13 @@ int mv_face_recognition_get_label_with_index(mv_face_recognition_h handle, const
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+               auto face_recognition_task = context->__tasks["face_recognition"];
+               auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
 
-               if (static_cast<size_t>(index) >= face_recognition_task->getOutput().labels.size())
+               if (static_cast<size_t>(index) >= result.labels.size())
                        throw InvalidParameter("A given index is out of boundary.");
 
-               *label = face_recognition_task->getOutput().labels[index].c_str();
+               *label = result.labels[index].c_str();
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index d2f24d0..5179777 100644 (file)
@@ -29,12 +29,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class ImageClassificationAdapter : public mediavision::common::ITask<T, V>
+class ImageClassificationAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<IImageClassification> _image_classification;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "image_classification.json";
 
        void create();
@@ -52,11 +52,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index eb27c32..9b9b9f7 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 namespace machine_learning
 {
 struct ImageClassificationInput : public InputBaseType {
-       ImageClassificationInput(mv_source_h src = NULL) : InputBaseType(src)
+       ImageClassificationInput(mv_source_h src = nullptr) : InputBaseType(src)
        {}
 };
 
index 1e4f748..a0d2a77 100644 (file)
@@ -29,7 +29,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> ImageClassificationAdapter<T, V>::ImageClassificationAdapter() : _source()
+ImageClassificationAdapter::ImageClassificationAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
        _config->parseConfigFile(_config_file_name);
@@ -37,12 +37,12 @@ template<typename T, typename V> ImageClassificationAdapter<T, V>::ImageClassifi
        create();
 }
 
-template<typename T, typename V> ImageClassificationAdapter<T, V>::~ImageClassificationAdapter()
+ImageClassificationAdapter::~ImageClassificationAdapter()
 {
        _image_classification->preDestroy();
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::create()
+void ImageClassificationAdapter::create()
 {
        _config->loadMetaFile(make_unique<ImageClassificationParser>());
        mv_inference_data_type_e dataType = _config->getInputMetaMap().begin()->second->dataType;
@@ -59,9 +59,8 @@ template<typename T, typename V> void ImageClassificationAdapter<T, V>::create()
        }
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file,
-                                                                                                       const char *label_file, const char *model_name)
+void ImageClassificationAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                         const char *model_name)
 {
        _config->setUserModel(model_file, meta_file, label_file);
        create();
@@ -72,72 +71,65 @@ void ImageClassificationAdapter<T, V>::setModelInfo(const char *model_file, cons
        }
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ImageClassificationAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _image_classification->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::configure()
+void ImageClassificationAdapter::configure()
 {
        _image_classification->configure();
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ImageClassificationAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _image_classification->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ImageClassificationAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _image_classification->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ImageClassificationAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _image_classification->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
-                                                                                                        char **device_type)
+void ImageClassificationAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _image_classification->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::prepare()
+void ImageClassificationAdapter::prepare()
 {
        _image_classification->prepare();
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::setInput(T &t)
+void ImageClassificationAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::perform()
+void ImageClassificationAdapter::perform()
 {
        _image_classification->perform(_source.inference_src);
 }
 
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::performAsync(T &t)
+void ImageClassificationAdapter::performAsync(InputBaseType &input)
 {
-       _image_classification->performAsync(static_cast<ImageClassificationInput &>(t));
+       _image_classification->performAsync(static_cast<ImageClassificationInput &>(input));
 }
 
-template<typename T, typename V> V &ImageClassificationAdapter<T, V>::getOutput()
+OutputBaseType &ImageClassificationAdapter::getOutput()
 {
-       return static_cast<V &>(_image_classification->getOutput());
+       return _image_classification->getOutput();
 }
 
-template<typename T, typename V> V &ImageClassificationAdapter<T, V>::getOutputCache()
+OutputBaseType &ImageClassificationAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class ImageClassificationAdapter<InputBaseType, OutputBaseType>;
 }
 }
index 8b0bb86..1134e73 100644 (file)
@@ -37,7 +37,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using ImageClassificationTask = ImageClassificationAdapter<InputBaseType, OutputBaseType>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image" };
 static const size_t num_keys = sizeof(feature_keys) / sizeof(char *);
@@ -50,8 +49,7 @@ int mv_image_classification_create(mv_image_classification_h *out_handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new ImageClassificationTask(),
-                                                                                                                                         out_handle);
+               machine_learning_native_create(TASK_NAME, new ImageClassificationAdapter(), out_handle);
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index 3d9493e..e6a459c 100644 (file)
 
 #include <mv_common.h>
 #include <mv_inference_type.h>
+#include "MachineLearningType.h"
 
 namespace mediavision
 {
 namespace machine_learning
 {
-struct ImageSegmentationInput {
-       mv_source_h inference_src {};
+struct ImageSegmentationInput : public InputBaseType {
+       ImageSegmentationInput(mv_source_h src = nullptr) : InputBaseType(src)
+       {}
 };
 
 /**
  * @brief The object detection result structure.
  * @details Contains object detection result.
  */
-struct ImageSegmentationResult {
-       unsigned long frame_number {};
+struct ImageSegmentationResult : public OutputBaseType {
        unsigned int width {};
        unsigned int height {};
        unsigned int pixel_size {};
index 3984d5a..26665bd 100644 (file)
@@ -28,12 +28,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class ImageSegmentationAdapter : public mediavision::common::ITask<T, V>
+class ImageSegmentationAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<IImageSegmentation> _selfie_segmentation;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "selfie_segmentation.json";
        const std::string _plugin_config_file_name = "selfie_segmentation_plugin.json";
 
@@ -54,11 +54,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index cae6d81..e66651c 100644 (file)
@@ -37,7 +37,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using ImageSegmentationTask = ITask<ImageSegmentationInput, ImageSegmentationResult>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
                                                                          "http://tizen.org/feature/vision.inference.face" };
@@ -51,11 +50,11 @@ int mv_selfie_segmentation_create(mv_selfie_segmentation_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        Context *context = nullptr;
-       ImageSegmentationTask *task = nullptr;
+       ITask *task = nullptr;
 
        try {
                context = new Context();
-               task = new ImageSegmentationAdapter<ImageSegmentationInput, ImageSegmentationResult>();
+               task = new ImageSegmentationAdapter();
                context->__tasks.insert(make_pair("selfie_segmentation", task));
                *handle = static_cast<mv_selfie_segmentation_h>(context);
        } catch (const BaseException &e) {
@@ -79,7 +78,7 @@ int mv_selfie_segmentation_destroy(mv_selfie_segmentation_h handle)
        auto context = static_cast<Context *>(handle);
 
        for (auto &m : context->__tasks)
-               delete static_cast<ImageSegmentationTask *>(m.second);
+               delete m.second;
 
        delete context;
 
@@ -99,7 +98,7 @@ int mv_selfie_segmentation_set_model(mv_selfie_segmentation_h handle, const char
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->setModelInfo(model_file, meta_file, label_file, model_name);
        } catch (const BaseException &e) {
@@ -125,7 +124,7 @@ int mv_selfie_segmentation_set_engine(mv_selfie_segmentation_h handle, const cha
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->setEngineInfo(backend_type, device_type);
        } catch (const BaseException &e) {
@@ -149,7 +148,7 @@ int mv_selfie_segmentation_get_engine_count(mv_selfie_segmentation_h handle, uns
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->getNumberOfEngines(engine_count);
                // TODO.
@@ -175,7 +174,7 @@ int mv_selfie_segmentation_get_engine_type(mv_selfie_segmentation_h handle, cons
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->getEngineType(engine_index, engine_type);
                // TODO.
@@ -201,7 +200,7 @@ int mv_selfie_segmentation_get_device_count(mv_selfie_segmentation_h handle, con
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->getNumberOfDevices(engine_type, device_count);
                // TODO.
@@ -228,7 +227,7 @@ int mv_selfie_segmentation_get_device_type(mv_selfie_segmentation_h handle, cons
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->getDeviceType(engine_type, device_index, device_type);
                // TODO.
@@ -251,7 +250,7 @@ int mv_selfie_segmentation_configure(mv_selfie_segmentation_h handle)
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->configure();
        } catch (const BaseException &e) {
@@ -273,7 +272,7 @@ int mv_selfie_segmentation_prepare(mv_selfie_segmentation_h handle)
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                task->prepare();
        } catch (const BaseException &e) {
@@ -296,7 +295,7 @@ int mv_selfie_segmentation_inference(mv_selfie_segmentation_h handle, mv_source_
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                ImageSegmentationInput input = { .inference_src = source };
 
@@ -322,7 +321,7 @@ int mv_selfie_segmentation_inference_async(mv_selfie_segmentation_h handle, mv_s
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
                ImageSegmentationInput input = { source };
 
@@ -351,9 +350,9 @@ int mv_selfie_segmentation_get_result(mv_selfie_segmentation_h handle, unsigned
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+               auto task = context->__tasks.at("selfie_segmentation");
 
-               ImageSegmentationResult &result = task->getOutput();
+               auto &result = static_cast<ImageSegmentationResult &>(task->getOutput());
                *width = result.width;
                *height = result.height;
                *pixel_size = result.pixel_size;
index 5a178a2..37d0570 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> ImageSegmentationAdapter<T, V>::ImageSegmentationAdapter() : _source()
+ImageSegmentationAdapter::ImageSegmentationAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
 
@@ -41,19 +41,17 @@ template<typename T, typename V> ImageSegmentationAdapter<T, V>::ImageSegmentati
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> ImageSegmentationAdapter<T, V>::~ImageSegmentationAdapter()
+ImageSegmentationAdapter::~ImageSegmentationAdapter()
 {
        _selfie_segmentation->preDestroy();
 }
 
-template<typename T, typename V>
-template<typename U>
-void ImageSegmentationAdapter<T, V>::create(ImageSegmentationTaskType task_type)
+template<typename U> void ImageSegmentationAdapter::create(ImageSegmentationTaskType task_type)
 {
        // TODO. add switch-case statement here for Mediavision own task types.
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::create(std::string model_name)
+void ImageSegmentationAdapter::create(std::string model_name)
 {
        if (model_name.empty())
                model_name = _config->getDefaultModelName();
@@ -82,8 +80,7 @@ template<typename T, typename V> void ImageSegmentationAdapter<T, V>::create(std
        }
 }
 
-template<typename T, typename V>
-ImageSegmentationTaskType ImageSegmentationAdapter<T, V>::convertToTaskType(string model_name)
+ImageSegmentationTaskType ImageSegmentationAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -96,9 +93,8 @@ ImageSegmentationTaskType ImageSegmentationAdapter<T, V>::convertToTaskType(stri
        throw InvalidParameter("Invalid selfie segmentation model name.");
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                                 const char *model_name)
+void ImageSegmentationAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                       const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -115,72 +111,65 @@ void ImageSegmentationAdapter<T, V>::setModelInfo(const char *model_file, const
        _selfie_segmentation->setUserModel(model_file, meta_file, label_file);
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ImageSegmentationAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _selfie_segmentation->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::configure()
+void ImageSegmentationAdapter::configure()
 {
        _selfie_segmentation->configure();
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ImageSegmentationAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _selfie_segmentation->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ImageSegmentationAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _selfie_segmentation->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ImageSegmentationAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _selfie_segmentation->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
-                                                                                                  char **device_type)
+void ImageSegmentationAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _selfie_segmentation->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::prepare()
+void ImageSegmentationAdapter::prepare()
 {
        _selfie_segmentation->prepare();
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::setInput(T &t)
+void ImageSegmentationAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::perform()
+void ImageSegmentationAdapter::perform()
 {
        _selfie_segmentation->perform(_source.inference_src);
 }
 
-template<typename T, typename V> V &ImageSegmentationAdapter<T, V>::getOutput()
+OutputBaseType &ImageSegmentationAdapter::getOutput()
 {
        return _selfie_segmentation->getOutput();
 }
 
-template<typename T, typename V> V &ImageSegmentationAdapter<T, V>::getOutputCache()
+OutputBaseType &ImageSegmentationAdapter::getOutputCache()
 {
        return _selfie_segmentation->getOutputCache();
 }
 
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::performAsync(T &t)
+void ImageSegmentationAdapter::performAsync(InputBaseType &input)
 {
-       _selfie_segmentation->performAsync(t);
+       _selfie_segmentation->performAsync(static_cast<ImageSegmentationInput &>(input));
 }
 
-template class ImageSegmentationAdapter<ImageSegmentationInput, ImageSegmentationResult>;
 }
 }
\ No newline at end of file
index b4b0f13..2fd3668 100644 (file)
@@ -29,12 +29,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class FacialLandmarkAdapter : public mediavision::common::ITask<T, V>
+class FacialLandmarkAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<ILandmarkDetection> _landmark_detection;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "facial_landmark.json";
 
        void create(const std::string &model_name);
@@ -54,11 +54,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index f2355a9..f131029 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 namespace machine_learning
 {
 struct LandmarkDetectionInput : public InputBaseType {
-       LandmarkDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+       LandmarkDetectionInput(mv_source_h src = nullptr) : InputBaseType(src)
        {}
 };
 
index 717ae58..eb53cd7 100644 (file)
@@ -29,12 +29,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class PoseLandmarkAdapter : public mediavision::common::ITask<T, V>
+class PoseLandmarkAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<ILandmarkDetection> _landmark_detection;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "pose_landmark.json";
 
        void create(const std::string &model_name);
@@ -54,11 +54,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index acebc95..35e8fdb 100644 (file)
@@ -27,7 +27,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> FacialLandmarkAdapter<T, V>::FacialLandmarkAdapter() : _source()
+FacialLandmarkAdapter::FacialLandmarkAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
        _config->parseConfigFile(_config_file_name);
@@ -35,14 +35,12 @@ template<typename T, typename V> FacialLandmarkAdapter<T, V>::FacialLandmarkAdap
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> FacialLandmarkAdapter<T, V>::~FacialLandmarkAdapter()
+FacialLandmarkAdapter::~FacialLandmarkAdapter()
 {
        _landmark_detection->preDestroy();
 }
 
-template<typename T, typename V>
-template<typename U>
-void FacialLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
+template<typename U> void FacialLandmarkAdapter::create(LandmarkDetectionTaskType task_type)
 {
        switch (task_type) {
        case LandmarkDetectionTaskType::FLD_TWEAK_CNN:
@@ -53,7 +51,7 @@ void FacialLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
        }
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::create(const string &model_name)
+void FacialLandmarkAdapter::create(const string &model_name)
 {
        LandmarkDetectionTaskType task_type = convertToTaskType(model_name);
        _config->loadMetaFile(make_unique<LandmarkDetectionParser>(static_cast<int>(task_type)));
@@ -71,8 +69,7 @@ template<typename T, typename V> void FacialLandmarkAdapter<T, V>::create(const
        }
 }
 
-template<typename T, typename V>
-LandmarkDetectionTaskType FacialLandmarkAdapter<T, V>::convertToTaskType(string model_name)
+LandmarkDetectionTaskType FacialLandmarkAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -86,9 +83,8 @@ LandmarkDetectionTaskType FacialLandmarkAdapter<T, V>::convertToTaskType(string
        throw InvalidParameter("Invalid facial detection model name.");
 }
 
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                          const char *model_name)
+void FacialLandmarkAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -103,70 +99,65 @@ void FacialLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const cha
        }
 }
 
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FacialLandmarkAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _landmark_detection->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::configure()
+void FacialLandmarkAdapter::configure()
 {
        _landmark_detection->configure();
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FacialLandmarkAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _landmark_detection->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FacialLandmarkAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _landmark_detection->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FacialLandmarkAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _landmark_detection->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FacialLandmarkAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _landmark_detection->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::prepare()
+void FacialLandmarkAdapter::prepare()
 {
        _landmark_detection->prepare();
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::setInput(T &t)
+void FacialLandmarkAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::perform()
+void FacialLandmarkAdapter::perform()
 {
        _landmark_detection->perform(_source.inference_src);
 }
 
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::performAsync(T &t)
+void FacialLandmarkAdapter::performAsync(InputBaseType &input)
 {
-       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
+       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(input));
 }
 
-template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutput()
+OutputBaseType &FacialLandmarkAdapter::getOutput()
 {
        return _landmark_detection->getOutput();
 }
 
-template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutputCache()
+OutputBaseType &FacialLandmarkAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
 }
 }
\ No newline at end of file
index b7398f8..cca0757 100644 (file)
@@ -37,7 +37,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
                                                                          "http://tizen.org/feature/vision.inference.face" };
@@ -51,7 +50,7 @@ int mv_facial_landmark_create(mv_facial_landmark_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new LandmarkDetectionTask(), handle);
+               machine_learning_native_create(TASK_NAME, new FacialLandmarkAdapter(), handle);
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index f9be39f..c899e03 100644 (file)
@@ -37,7 +37,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
                                                                          "http://tizen.org/feature/vision.inference.face" };
@@ -51,7 +50,7 @@ int mv_pose_landmark_create(mv_pose_landmark_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new LandmarkDetectionTask(), handle);
+               machine_learning_native_create(TASK_NAME, new PoseLandmarkAdapter(), handle);
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index 486bcdd..ed4e2bb 100644 (file)
@@ -27,7 +27,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> PoseLandmarkAdapter<T, V>::PoseLandmarkAdapter() : _source()
+PoseLandmarkAdapter::PoseLandmarkAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
        _config->parseConfigFile(_config_file_name);
@@ -35,14 +35,12 @@ template<typename T, typename V> PoseLandmarkAdapter<T, V>::PoseLandmarkAdapter(
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> PoseLandmarkAdapter<T, V>::~PoseLandmarkAdapter()
+PoseLandmarkAdapter::~PoseLandmarkAdapter()
 {
        _landmark_detection->preDestroy();
 }
 
-template<typename T, typename V>
-template<typename U>
-void PoseLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
+template<typename U> void PoseLandmarkAdapter::create(LandmarkDetectionTaskType task_type)
 {
        switch (task_type) {
        case LandmarkDetectionTaskType::PLD_CPM:
@@ -53,7 +51,7 @@ void PoseLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
        }
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::create(const string &model_name)
+void PoseLandmarkAdapter::create(const string &model_name)
 {
        LandmarkDetectionTaskType task_type = convertToTaskType(model_name);
        _config->loadMetaFile(make_unique<LandmarkDetectionParser>(static_cast<int>(task_type)));
@@ -71,8 +69,7 @@ template<typename T, typename V> void PoseLandmarkAdapter<T, V>::create(const st
        }
 }
 
-template<typename T, typename V>
-LandmarkDetectionTaskType PoseLandmarkAdapter<T, V>::convertToTaskType(string model_name)
+LandmarkDetectionTaskType PoseLandmarkAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -85,9 +82,8 @@ LandmarkDetectionTaskType PoseLandmarkAdapter<T, V>::convertToTaskType(string mo
        throw InvalidParameter("Invalid pose landmark model name.");
 }
 
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                        const char *model_name)
+void PoseLandmarkAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                          const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -102,70 +98,65 @@ void PoseLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const char
        }
 }
 
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void PoseLandmarkAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _landmark_detection->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::configure()
+void PoseLandmarkAdapter::configure()
 {
        _landmark_detection->configure();
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void PoseLandmarkAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _landmark_detection->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void PoseLandmarkAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _landmark_detection->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void PoseLandmarkAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _landmark_detection->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void PoseLandmarkAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _landmark_detection->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::prepare()
+void PoseLandmarkAdapter::prepare()
 {
        _landmark_detection->prepare();
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::setInput(T &t)
+void PoseLandmarkAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::perform()
+void PoseLandmarkAdapter::perform()
 {
        _landmark_detection->perform(_source.inference_src);
 }
 
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::performAsync(T &t)
+void PoseLandmarkAdapter::performAsync(InputBaseType &input)
 {
-       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
+       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(input));
 }
 
-template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutput()
+OutputBaseType &PoseLandmarkAdapter::getOutput()
 {
        return _landmark_detection->getOutput();
 }
 
-template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutputCache()
+OutputBaseType &PoseLandmarkAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
 }
 }
index 47a690a..2f33db7 100644 (file)
@@ -28,12 +28,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class FaceDetectionAdapter : public mediavision::common::ITask<T, V>
+class FaceDetectionAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<IObjectDetection> _object_detection;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "face_detection.json";
        const std::string _plugin_config_file_name = "face_detection_plugin.json";
 
@@ -54,11 +54,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index 53e0fa6..fbea1ef 100644 (file)
@@ -29,12 +29,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class ObjectDetectionAdapter : public mediavision::common::ITask<T, V>
+class ObjectDetectionAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<IObjectDetection> _object_detection;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "object_detection.json";
        const std::string _plugin_config_file_name = "object_detection_plugin.json";
 
@@ -55,11 +55,11 @@ public:
        void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index a653b25..467374b 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 namespace machine_learning
 {
 struct ObjectDetectionInput : public InputBaseType {
-       ObjectDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+       ObjectDetectionInput(mv_source_h src = nullptr) : InputBaseType(src)
        {}
 };
 
index d50a5fb..1ecae44 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> FaceDetectionAdapter<T, V>::FaceDetectionAdapter() : _source()
+FaceDetectionAdapter::FaceDetectionAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
 
@@ -41,14 +41,12 @@ template<typename T, typename V> FaceDetectionAdapter<T, V>::FaceDetectionAdapte
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> FaceDetectionAdapter<T, V>::~FaceDetectionAdapter()
+FaceDetectionAdapter::~FaceDetectionAdapter()
 {
        _object_detection->preDestroy();
 }
 
-template<typename T, typename V>
-template<typename U>
-void FaceDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
+template<typename U> void FaceDetectionAdapter::create(ObjectDetectionTaskType task_type)
 {
        switch (task_type) {
        case ObjectDetectionTaskType::FD_MOBILENET_V1_SSD:
@@ -60,7 +58,7 @@ void FaceDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
        // TODO.
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::create(string model_name)
+void FaceDetectionAdapter::create(string model_name)
 {
        if (model_name.empty())
                model_name = _config->getDefaultModelName();
@@ -89,8 +87,7 @@ template<typename T, typename V> void FaceDetectionAdapter<T, V>::create(string
        }
 }
 
-template<typename T, typename V>
-ObjectDetectionTaskType FaceDetectionAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetectionTaskType FaceDetectionAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -108,9 +105,8 @@ ObjectDetectionTaskType FaceDetectionAdapter<T, V>::convertToTaskType(string mod
        throw InvalidParameter("Invalid face detection model name.");
 }
 
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                         const char *model_name)
+void FaceDetectionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                               const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -125,70 +121,65 @@ void FaceDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char
        }
 }
 
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FaceDetectionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _object_detection->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::configure()
+void FaceDetectionAdapter::configure()
 {
        _object_detection->configure();
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FaceDetectionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _object_detection->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FaceDetectionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _object_detection->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FaceDetectionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _object_detection->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FaceDetectionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _object_detection->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::prepare()
+void FaceDetectionAdapter::prepare()
 {
        _object_detection->prepare();
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::setInput(T &t)
+void FaceDetectionAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::perform()
+void FaceDetectionAdapter::perform()
 {
        _object_detection->perform(_source.inference_src);
 }
 
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::performAsync(T &t)
+void FaceDetectionAdapter::performAsync(InputBaseType &input)
 {
-       _object_detection->performAsync(static_cast<ObjectDetectionInput &>(t));
+       _object_detection->performAsync(static_cast<ObjectDetectionInput &>(input));
 }
 
-template<typename T, typename V> V &FaceDetectionAdapter<T, V>::getOutput()
+OutputBaseType &FaceDetectionAdapter::getOutput()
 {
        return _object_detection->getOutput();
 }
 
-template<typename T, typename V> V &FaceDetectionAdapter<T, V>::getOutputCache()
+OutputBaseType &FaceDetectionAdapter::getOutputCache()
 {
        return _object_detection->getOutputCache();
 }
 
-template class FaceDetectionAdapter<InputBaseType, OutputBaseType>;
 }
 }
index 41a254c..821ba2f 100644 (file)
@@ -39,7 +39,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using FaceDetectionTask = FaceDetectionAdapter<InputBaseType, OutputBaseType>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
                                                                          "http://tizen.org/feature/vision.inference.face" };
@@ -53,7 +52,7 @@ int mv_face_detection_create(mv_face_detection_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new FaceDetectionTask(), handle);
+               machine_learning_native_create(TASK_NAME, new FaceDetectionAdapter(), handle);
        } catch (const BaseException &e) {
                return e.getError();
        }
@@ -336,10 +335,8 @@ int mv_face_detection_get_label(mv_face_detection_h handle, const unsigned int i
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               auto context = static_cast<Context *>(handle);
-               auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
-
-               auto &result = static_cast<ObjectDetectionResult &>(task->getOutputCache());
+               auto &result =
+                               static_cast<ObjectDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
 
                if (result.number_of_objects <= index)
                        throw InvalidParameter("Invalid index range.");
index e336583..35050af 100644 (file)
@@ -39,7 +39,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using ObjectDetectionTask = ObjectDetectionAdapter<InputBaseType, OutputBaseType>;
 
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
                                                                          "http://tizen.org/feature/vision.inference.face" };
@@ -53,7 +52,7 @@ int mv_object_detection_create(mv_object_detection_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new ObjectDetectionTask(), handle);
+               machine_learning_native_create(TASK_NAME, new ObjectDetectionAdapter(), handle);
        } catch (const BaseException &e) {
                return e.getError();
        }
@@ -334,7 +333,8 @@ int mv_object_detection_get_label(mv_object_detection_h handle, const unsigned i
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
-               auto &result = static_cast<ObjectDetectionResult &>(machine_learning_native_get_result(handle, TASK_NAME));
+               auto &result =
+                               static_cast<ObjectDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
 
                if (result.number_of_objects <= index)
                        throw InvalidParameter("Invalid index range.");
index cce24f4..6a9dcf0 100644 (file)
@@ -28,7 +28,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> ObjectDetectionAdapter<T, V>::ObjectDetectionAdapter() : _source()
+ObjectDetectionAdapter::ObjectDetectionAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
 
@@ -41,14 +41,12 @@ template<typename T, typename V> ObjectDetectionAdapter<T, V>::ObjectDetectionAd
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> ObjectDetectionAdapter<T, V>::~ObjectDetectionAdapter()
+ObjectDetectionAdapter::~ObjectDetectionAdapter()
 {
        _object_detection->preDestroy();
 }
 
-template<typename T, typename V>
-template<typename U>
-void ObjectDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
+template<typename U> void ObjectDetectionAdapter::create(ObjectDetectionTaskType task_type)
 {
        switch (task_type) {
        case ObjectDetectionTaskType::MOBILENET_V1_SSD:
@@ -63,7 +61,7 @@ void ObjectDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
        // TODO.
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(string model_name)
+void ObjectDetectionAdapter::create(string model_name)
 {
        if (model_name.empty())
                model_name = _config->getDefaultModelName();
@@ -92,8 +90,7 @@ template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(strin
        }
 }
 
-template<typename T, typename V>
-ObjectDetectionTaskType ObjectDetectionAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetectionTaskType ObjectDetectionAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -113,9 +110,8 @@ ObjectDetectionTaskType ObjectDetectionAdapter<T, V>::convertToTaskType(string m
        throw InvalidParameter("Invalid object detection model name.");
 }
 
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                               const char *model_name)
+void ObjectDetectionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                 const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -130,70 +126,65 @@ void ObjectDetectionAdapter<T, V>::setModelInfo(const char *model_file, const ch
        }
 }
 
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ObjectDetectionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _object_detection->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+void ObjectDetectionAdapter::configure()
 {
        _object_detection->configure();
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ObjectDetectionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _object_detection->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ObjectDetectionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _object_detection->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ObjectDetectionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _object_detection->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void ObjectDetectionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _object_detection->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::prepare()
+void ObjectDetectionAdapter::prepare()
 {
        _object_detection->prepare();
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::setInput(T &t)
+void ObjectDetectionAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::perform()
+void ObjectDetectionAdapter::perform()
 {
        _object_detection->perform(_source.inference_src);
 }
 
-template<typename T, typename V> V &ObjectDetectionAdapter<T, V>::getOutput()
+OutputBaseType &ObjectDetectionAdapter::getOutput()
 {
        return _object_detection->getOutput();
 }
 
-template<typename T, typename V> V &ObjectDetectionAdapter<T, V>::getOutputCache()
+OutputBaseType &ObjectDetectionAdapter::getOutputCache()
 {
        return _object_detection->getOutputCache();
 }
 
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::performAsync(T &t)
+void ObjectDetectionAdapter::performAsync(InputBaseType &input)
 {
-       _object_detection->performAsync(static_cast<ObjectDetectionInput &>(t));
+       _object_detection->performAsync(static_cast<ObjectDetectionInput &>(input));
 }
 
-template class ObjectDetectionAdapter<InputBaseType, OutputBaseType>;
 }
 }
\ No newline at end of file
index f9aa9e9..a9963f4 100644 (file)
@@ -29,12 +29,12 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> class ObjectDetection3dAdapter : public mediavision::common::ITask<T, V>
+class ObjectDetection3dAdapter : public mediavision::common::ITask
 {
 private:
        std::unique_ptr<IObjectDetection3d> _object_detection_3d;
        std::shared_ptr<MachineLearningConfig> _config;
-       T _source;
+       InputBaseType _source;
        const std::string _config_file_name = "object_detection_3d.json";
 
        void create(const std::string &model_name);
@@ -54,11 +54,11 @@ public:
        void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
        void configure() override;
        void prepare() override;
-       void setInput(T &t) override;
+       void setInput(InputBaseType &input) override;
        void perform() override;
-       void performAsync(T &t) override;
-       V &getOutput() override;
-       V &getOutputCache() override;
+       void performAsync(InputBaseType &input) override;
+       OutputBaseType &getOutput() override;
+       OutputBaseType &getOutputCache() override;
 };
 
 } // machine_learning
index cbe8664..bd86601 100644 (file)
 
 #include <mv_common.h>
 #include <mv_inference_type.h>
+#include "MachineLearningType.h"
 
 namespace mediavision
 {
 namespace machine_learning
 {
-struct ObjectDetection3dInput {
-       mv_source_h inference_src;
+struct ObjectDetection3dInput : public InputBaseType {
+       ObjectDetection3dInput(mv_source_h src = nullptr) : InputBaseType(src)
+       {}
 };
 
 struct EdgeIndex {
@@ -37,7 +39,7 @@ struct EdgeIndex {
  * @brief The object detection 3d result structure.
  * @details Contains object detection 3d result.
  */
-struct ObjectDetection3dResult {
+struct ObjectDetection3dResult : public OutputBaseType {
        unsigned int probability {};
        unsigned int number_of_points {};
        std::vector<unsigned int> x_vec;
index cd02449..25c74e6 100644 (file)
@@ -35,7 +35,6 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using ObjectDetection3dTask = ITask<ObjectDetection3dInput, ObjectDetection3dResult>;
 
 static mutex g_object_detection_3d_mutex;
 static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
@@ -51,11 +50,11 @@ int mv_object_detection_3d_create(mv_object_detection_3d_h *handle)
        MEDIA_VISION_FUNCTION_ENTER();
 
        Context *context = nullptr;
-       ObjectDetection3dTask *task = nullptr;
+       ITask *task = nullptr;
 
        try {
                context = new Context();
-               task = new ObjectDetection3dAdapter<ObjectDetection3dInput, ObjectDetection3dResult>();
+               task = new ObjectDetection3dAdapter();
                context->__tasks.insert(make_pair("object_detection_3d", task));
                *handle = static_cast<mv_object_detection_3d_h>(context);
        } catch (const BaseException &e) {
@@ -82,7 +81,7 @@ int mv_object_detection_3d_destroy(mv_object_detection_3d_h handle)
        auto context = static_cast<Context *>(handle);
 
        for (auto &m : context->__tasks)
-               delete static_cast<ObjectDetection3dTask *>(m.second);
+               delete m.second;
 
        delete context;
 
@@ -108,7 +107,7 @@ int mv_object_detection_3d_set_model(mv_object_detection_3d_h handle, const char
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->setModelInfo(model_file, meta_file, label_file, model_name);
        } catch (const BaseException &e) {
@@ -136,7 +135,7 @@ int mv_object_detection_3d_set_engine(mv_object_detection_3d_h handle, const cha
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->setEngineInfo(backend_type, device_type);
        } catch (const BaseException &e) {
@@ -162,7 +161,7 @@ int mv_object_detection_3d_get_engine_count(mv_object_detection_3d_h handle, uns
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->getNumberOfEngines(engine_count);
                // TODO.
@@ -190,7 +189,7 @@ int mv_object_detection_3d_get_engine_type(mv_object_detection_3d_h handle, cons
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->getEngineType(engine_index, engine_type);
                // TODO.
@@ -218,7 +217,7 @@ int mv_object_detection_3d_get_device_count(mv_object_detection_3d_h handle, con
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->getNumberOfDevices(engine_type, device_count);
                // TODO.
@@ -248,7 +247,7 @@ int mv_object_detection_3d_get_device_type(mv_object_detection_3d_h handle, cons
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->getDeviceType(engine_type, device_index, device_type);
                // TODO.
@@ -274,7 +273,7 @@ int mv_object_detection_3d_configure(mv_object_detection_3d_h handle)
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->configure();
        } catch (const BaseException &e) {
@@ -299,7 +298,7 @@ int mv_object_detection_3d_prepare(mv_object_detection_3d_h handle)
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                task->prepare();
        } catch (const BaseException &e) {
@@ -325,7 +324,7 @@ int mv_object_detection_3d_inference(mv_object_detection_3d_h handle, mv_source_
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
                ObjectDetection3dInput input = { source };
 
@@ -354,9 +353,9 @@ int mv_object_detection_3d_get_probability(mv_object_detection_3d_h handle, unsi
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
-               ObjectDetection3dResult &result = task->getOutput();
+               auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
 
                *out_probability = result.probability;
        } catch (const BaseException &e) {
@@ -382,9 +381,9 @@ int mv_object_detection_3d_get_num_of_points(mv_object_detection_3d_h handle, un
 
        try {
                auto context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
-               ObjectDetection3dResult &result = task->getOutput();
+               auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
 
                *out_num_of_points = result.number_of_points;
        } catch (const BaseException &e) {
@@ -411,9 +410,9 @@ int mv_object_detection_3d_get_points(mv_object_detection_3d_h handle, unsigned
 
        try {
                Context *context = static_cast<Context *>(handle);
-               auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+               auto task = context->__tasks.at("object_detection_3d");
 
-               ObjectDetection3dResult &result = task->getOutput();
+               auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
 
                *out_x = result.x_vec.data();
                *out_y = result.y_vec.data();
index bd548ee..6912231 100644 (file)
@@ -26,7 +26,7 @@ namespace mediavision
 {
 namespace machine_learning
 {
-template<typename T, typename V> ObjectDetection3dAdapter<T, V>::ObjectDetection3dAdapter() : _source()
+ObjectDetection3dAdapter::ObjectDetection3dAdapter() : _source()
 {
        _config = make_shared<MachineLearningConfig>();
        _config->parseConfigFile(_config_file_name);
@@ -34,12 +34,10 @@ template<typename T, typename V> ObjectDetection3dAdapter<T, V>::ObjectDetection
        create(_config->getDefaultModelName());
 }
 
-template<typename T, typename V> ObjectDetection3dAdapter<T, V>::~ObjectDetection3dAdapter()
+ObjectDetection3dAdapter::~ObjectDetection3dAdapter()
 {}
 
-template<typename T, typename V>
-template<typename U>
-void ObjectDetection3dAdapter<T, V>::create(ObjectDetection3dTaskType task_type)
+template<typename U> void ObjectDetection3dAdapter::create(ObjectDetection3dTaskType task_type)
 {
        switch (task_type) {
        case ObjectDetection3dTaskType::OBJECTRON:
@@ -50,7 +48,7 @@ void ObjectDetection3dAdapter<T, V>::create(ObjectDetection3dTaskType task_type)
        }
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::create(const string &model_name)
+void ObjectDetection3dAdapter::create(const string &model_name)
 {
        ObjectDetection3dTaskType task_type = convertToTaskType(model_name);
        _config->loadMetaFile(make_unique<ObjectDetection3dParser>(static_cast<int>(task_type)));
@@ -68,8 +66,7 @@ template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::create(con
        }
 }
 
-template<typename T, typename V>
-ObjectDetection3dTaskType ObjectDetection3dAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetection3dTaskType ObjectDetection3dAdapter::convertToTaskType(string model_name)
 {
        if (model_name.empty())
                throw InvalidParameter("model name is empty.");
@@ -82,9 +79,8 @@ ObjectDetection3dTaskType ObjectDetection3dAdapter<T, V>::convertToTaskType(stri
        throw InvalidParameter("Invalid object detection 3d model name.");
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
-                                                                                                 const char *model_name)
+void ObjectDetection3dAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+                                                                                       const char *model_name)
 {
        try {
                _config->setUserModel(model_file, meta_file, label_file);
@@ -97,73 +93,66 @@ void ObjectDetection3dAdapter<T, V>::setModelInfo(const char *model_file, const
                throw InvalidParameter("Model info not invalid.");
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ObjectDetection3dAdapter::setEngineInfo(const char *engine_type, const char *device_type)
 {
        _object_detection_3d->setEngineInfo(string(engine_type), string(device_type));
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::configure()
+void ObjectDetection3dAdapter::configure()
 {
        _object_detection_3d->configure();
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ObjectDetection3dAdapter::getNumberOfEngines(unsigned int *number_of_engines)
 {
        _object_detection_3d->getNumberOfEngines(number_of_engines);
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ObjectDetection3dAdapter::getEngineType(unsigned int engine_index, char **engine_type)
 {
        _object_detection_3d->getEngineType(engine_index, engine_type);
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ObjectDetection3dAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
 {
        _object_detection_3d->getNumberOfDevices(engine_type, number_of_devices);
 }
 
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
-                                                                                                  char **device_type)
+void ObjectDetection3dAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
 {
        _object_detection_3d->getDeviceType(engine_type, device_index, device_type);
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::prepare()
+void ObjectDetection3dAdapter::prepare()
 {
        _object_detection_3d->prepare();
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::setInput(T &t)
+void ObjectDetection3dAdapter::setInput(InputBaseType &input)
 {
-       _source = t;
+       _source = input;
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::perform()
+void ObjectDetection3dAdapter::perform()
 {
        shared_ptr<MetaInfo> metaInfo = _object_detection_3d->getInputMetaInfo();
        _object_detection_3d->perform(_source.inference_src, metaInfo);
 }
 
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::performAsync(T &t)
+void ObjectDetection3dAdapter::performAsync(InputBaseType &input)
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template<typename T, typename V> V &ObjectDetection3dAdapter<T, V>::getOutput()
+OutputBaseType &ObjectDetection3dAdapter::getOutput()
 {
        return _object_detection_3d->result();
 }
 
-template<typename T, typename V> V &ObjectDetection3dAdapter<T, V>::getOutputCache()
+OutputBaseType &ObjectDetection3dAdapter::getOutputCache()
 {
        throw InvalidOperation("Not support yet.");
 }
 
-template class ObjectDetection3dAdapter<ObjectDetection3dInput, ObjectDetection3dResult>;
 }
 }
\ No newline at end of file