{
namespace machine_learning
{
-template<typename T, typename V>
-void machine_learning_native_create(const std::string &task_name, mediavision::common::ITask<T, V> *task,
- void **handle);
+void machine_learning_native_create(const std::string &task_name, mediavision::common::ITask *task, void **handle);
void machine_learning_native_destory(void *handle, const std::string &task_name);
void machine_learning_native_configure(void *handle, const std::string &task_name);
void machine_learning_native_prepare(void *handle, const std::string &task_name);
void machine_learning_native_inference(void *handle, const std::string &task_name, InputBaseType &input);
void machine_learning_native_inference_async(void *handle, const std::string &task_name, InputBaseType &input);
OutputBaseType &machine_learning_native_get_result(void *handle, const std::string &task_name);
+OutputBaseType &machine_learning_native_get_result_cache(void *handle, const std::string &task_name);
void machine_learning_native_set_model(void *handle, const std::string &task_name, const char *model_file,
const char *meta_file, const char *label_file, const char *model_name = "");
void machine_learning_native_set_engine(void *handle, const std::string &task_name, const char *backend_type,
~Context()
{}
- std::map<std::string, void *> __tasks;
+ std::map<std::string, ITask *> __tasks;
};
} // namespace
} // namespace
#ifndef __ITASK_H__
#define __ITASK_H__
+#include "MachineLearningType.h"
+
namespace mediavision
{
namespace common
{
-// T : parameter type, V : return type
-template<typename T, typename V> class ITask
+class ITask
{
public:
virtual ~ITask() {};
virtual void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) = 0;
virtual void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) = 0;
virtual void prepare() = 0;
- virtual void setInput(T &t) = 0;
+ virtual void setInput(mediavision::machine_learning::InputBaseType &input) = 0;
virtual void perform() = 0;
- virtual void performAsync(T &t) = 0;
- virtual V &getOutput() = 0;
- virtual V &getOutputCache() = 0;
+ virtual void performAsync(mediavision::machine_learning::InputBaseType &input) = 0;
+ virtual mediavision::machine_learning::OutputBaseType &getOutput() = 0;
+ virtual mediavision::machine_learning::OutputBaseType &getOutputCache() = 0;
};
} // namespace
} // namespace
using namespace mediavision::machine_learning;
using namespace mediavision::machine_learning::exception;
-using MachineLearningTask = ITask<InputBaseType, OutputBaseType>;
-
namespace mediavision
{
namespace machine_learning
{
-inline MachineLearningTask *get_task(void *handle, const std::string &name)
+inline ITask *get_task(void *handle, const std::string &name)
{
auto context = static_cast<Context *>(handle);
- return static_cast<MachineLearningTask *>(context->__tasks.at(name));
+ return context->__tasks.at(name);
}
-template<typename T, typename V>
-void machine_learning_native_create(const string &task_name, ITask<T, V> *task, void **handle)
+void machine_learning_native_create(const string &task_name, ITask *task, void **handle)
{
Context *context = new Context();
*handle = static_cast<void *>(context);
}
-template void machine_learning_native_create<InputBaseType, OutputBaseType>(const string &task_name,
- MachineLearningTask *task, void **handle);
-
void machine_learning_native_destory(void *handle, const string &task_name)
{
auto context = static_cast<Context *>(handle);
for (auto &m : context->__tasks)
- delete static_cast<MachineLearningTask *>(m.second);
+ delete m.second;
delete context;
}
return task->getOutput();
}
+OutputBaseType &machine_learning_native_get_result_cache(void *handle, const string &task_name)
+{
+ auto task = get_task(handle, task_name);
+
+ return task->getOutputCache();
+}
+
void machine_learning_native_set_model(void *handle, const string &task_name, const char *model_file,
const char *meta_file, const char *label_file, const char *model_name)
{
#include "training_engine_error.h"
#include "training_engine_common_impl.h"
#include "inference_engine_common_impl.h"
+#include "MachineLearningType.h"
#include "Inference.h"
#include "label_manager.h"
#include "feature_vector_manager.h"
enum class RequestMode { REGISTER, INFERENCE, DELETE };
}
-struct FaceRecognitionInput {
- face_recognition::RequestMode mode;
+struct FaceRecognitionInput : public InputBaseType {
+ FaceRecognitionInput(mv_source_h src = nullptr) : InputBaseType(src)
+ {}
+ face_recognition::RequestMode mode {};
std::vector<std::vector<float> > inputs;
std::vector<std::string> labels;
};
* @details Contains face recognition result such as label, label index, raw data,
* and raw data count.
*/
-struct FaceRecognitionResult {
+struct FaceRecognitionResult : public OutputBaseType {
unsigned int label_idx {}; /**< label index of label file. */
std::vector<float> raw_data; /**< raw data to each label. */
std::vector<std::string> labels;
{
namespace machine_learning
{
-template<typename T, typename V> class FaceRecognitionAdapter : public mediavision::common::ITask<T, V>
+class FaceRecognitionAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<FaceRecognition> _face_recognition;
- T _source {};
+ InputBaseType _source;
std::unique_ptr<MediaVision::Common::EngineConfig> _config;
public:
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
#include "facenet_parser.h"
#include "face_recognition_type.h"
#include "machine_learning_preprocess.h"
+#include "MachineLearningType.h"
namespace mediavision
{
namespace machine_learning
{
-struct FacenetInput {
- std::vector<mv_source_h> inputs;
+struct FacenetInput : public InputBaseType {
+ FacenetInput(mv_source_h src = nullptr) : InputBaseType(src)
+ {}
};
-struct FacenetOutput {
+struct FacenetOutput : public OutputBaseType {
std::vector<std::vector<float> > outputs;
};
#include "EngineConfig.h"
#include "itask.h"
+#include "MachineLearningType.h"
#include "facenet.h"
namespace mediavision
{
namespace machine_learning
{
-template<typename T, typename V> class FacenetAdapter : public mediavision::common::ITask<T, V>
+class FacenetAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<Facenet> _facenet;
- T _source;
+ InputBaseType _source;
public:
FacenetAdapter();
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
{
namespace machine_learning
{
-template<typename T, typename V> FaceRecognitionAdapter<T, V>::FaceRecognitionAdapter()
+FaceRecognitionAdapter::FaceRecognitionAdapter()
{
_face_recognition = make_unique<FaceRecognition>();
}
-template<typename T, typename V> FaceRecognitionAdapter<T, V>::~FaceRecognitionAdapter()
+FaceRecognitionAdapter::~FaceRecognitionAdapter()
{}
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void FaceRecognitionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{}
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FaceRecognitionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
+void FaceRecognitionAdapter::configure()
{
_config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(FACE_RECOGNITION_META_FILE_NAME));
_face_recognition->setConfig(config);
}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FaceRecognitionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{}
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FaceRecognitionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{}
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FaceRecognitionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{}
-template<typename T, typename V>
-void FaceRecognitionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FaceRecognitionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::prepare()
+void FaceRecognitionAdapter::prepare()
{
int ret = _face_recognition->initialize();
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to initialize face recognition.");
}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::setInput(T &t)
+void FaceRecognitionAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
+void FaceRecognitionAdapter::perform()
{
- if (_source.mode == RequestMode::REGISTER) {
- if (_source.inputs.size() != _source.labels.size())
+ FaceRecognitionInput &source = static_cast<FaceRecognitionInput &>(_source);
+
+ if (source.mode == RequestMode::REGISTER) {
+ if (source.inputs.size() != source.labels.size())
throw InvalidParameter("The number of inputs and labels are not matched.");
- for (size_t idx = 0; idx < _source.inputs.size(); ++idx) {
- int ret = _face_recognition->registerNewFace(_source.inputs[idx], _source.labels[idx]);
+ for (size_t idx = 0; idx < source.inputs.size(); ++idx) {
+ int ret = _face_recognition->registerNewFace(source.inputs[idx], source.labels[idx]);
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to register new face.");
}
return;
}
- if (_source.mode == RequestMode::INFERENCE) {
+ if (source.mode == RequestMode::INFERENCE) {
// _source.inputs.size should be 1.
- int ret = _face_recognition->recognizeFace(_source.inputs[0]);
+ int ret = _face_recognition->recognizeFace(source.inputs[0]);
if (ret == MEDIA_VISION_ERROR_NO_DATA)
throw NoData("Label not found.");
return;
}
- if (_source.mode == RequestMode::DELETE) {
- for (auto &l : _source.labels) {
+ if (source.mode == RequestMode::DELETE) {
+ for (auto &l : source.labels) {
int ret = _face_recognition->deleteLabel(l);
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to unregister a given label.");
}
}
-template<typename T, typename V> void FaceRecognitionAdapter<T, V>::performAsync(T &t)
+void FaceRecognitionAdapter::performAsync(InputBaseType &input)
{
throw InvalidOperation("Not support yet.");
}
-template<typename T, typename V> V &FaceRecognitionAdapter<T, V>::getOutput()
+OutputBaseType &FaceRecognitionAdapter::getOutput()
{
return _face_recognition->result();
}
-template<typename T, typename V> V &FaceRecognitionAdapter<T, V>::getOutputCache()
+OutputBaseType &FaceRecognitionAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class FaceRecognitionAdapter<FaceRecognitionInput, FaceRecognitionResult>;
}
}
\ No newline at end of file
{
namespace machine_learning
{
-template<typename T, typename V> FacenetAdapter<T, V>::FacenetAdapter() : _source()
+FacenetAdapter::FacenetAdapter() : _source()
{
_facenet = make_unique<Facenet>();
}
-template<typename T, typename V> FacenetAdapter<T, V>::~FacenetAdapter()
+FacenetAdapter::~FacenetAdapter()
{}
-template<typename T, typename V>
-void FacenetAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void FacenetAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{}
-template<typename T, typename V>
-void FacenetAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FacenetAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{}
-template<typename T, typename V> void FacenetAdapter<T, V>::configure()
+void FacenetAdapter::configure()
{
_facenet->parseMetaFile();
_facenet->configure();
}
-template<typename T, typename V> void FacenetAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FacenetAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{}
-template<typename T, typename V> void FacenetAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FacenetAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{}
-template<typename T, typename V>
-void FacenetAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FacenetAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{}
-template<typename T, typename V>
-void FacenetAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FacenetAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{}
-template<typename T, typename V> void FacenetAdapter<T, V>::prepare()
+void FacenetAdapter::prepare()
{
_facenet->prepare();
}
-template<typename T, typename V> void FacenetAdapter<T, V>::setInput(T &t)
+void FacenetAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void FacenetAdapter<T, V>::perform()
+void FacenetAdapter::perform()
{
+ FacenetInput &source = static_cast<FacenetInput &>(_source);
shared_ptr<MetaInfo> metaInfo = _facenet->getInputMetaInfo();
if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- _facenet->perform<unsigned char>(_source.inputs[0], metaInfo);
+ _facenet->perform<unsigned char>(source.inference_src, metaInfo);
else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- _facenet->perform<float>(_source.inputs[0], metaInfo);
+ _facenet->perform<float>(source.inference_src, metaInfo);
else
throw InvalidOperation("Invalid model data type.");
}
-template<typename T, typename V> void FacenetAdapter<T, V>::performAsync(T &t)
+void FacenetAdapter::performAsync(InputBaseType &input)
{
throw InvalidOperation("Not support yet.");
}
-template<typename T, typename V> V &FacenetAdapter<T, V>::getOutput()
+OutputBaseType &FacenetAdapter::getOutput()
{
return _facenet->result();
}
-template<typename T, typename V> V &FacenetAdapter<T, V>::getOutputCache()
+OutputBaseType &FacenetAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class FacenetAdapter<FacenetInput, FacenetOutput>;
}
}
\ No newline at end of file
using namespace mediavision::machine_learning;
using namespace mediavision::machine_learning::face_recognition;
using namespace mediavision::machine_learning::exception;
-using FaceRecognitionTask = ITask<FaceRecognitionInput, FaceRecognitionResult>;
-using FacenetTask = ITask<FacenetInput, FacenetOutput>;
static mutex g_face_recognition_mutex;
MEDIA_VISION_FUNCTION_ENTER();
- Context *context = new (nothrow) Context();
- if (!context) {
- LOGE("Fail to allocate a context.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- FaceRecognitionTask *face_recognition_task = new (nothrow)
- FaceRecognitionAdapter<FaceRecognitionInput, FaceRecognitionResult>();
- if (!face_recognition_task) {
- delete context;
- LOGE("Fail to allocate a task.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ Context *context = nullptr;
+ ITask *face_recognition_task = nullptr;
+ ITask *facenet_task = nullptr;
- FacenetTask *facenet_task = new (nothrow) FacenetAdapter<FacenetInput, FacenetOutput>();
- if (!facenet_task) {
- delete face_recognition_task;
- delete context;
- LOGE("Fail to allocate a task.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
+ try {
+ context = new Context();
+ face_recognition_task = new FaceRecognitionAdapter();
+ facenet_task = new FacenetAdapter();
+ context->__tasks.insert(make_pair("face_recognition", face_recognition_task));
+ context->__tasks.insert(make_pair("facenet", facenet_task));
- pair<map<string, void *>::iterator, bool> result;
+ *out_handle = static_cast<mv_face_recognition_h>(context);
- result = context->__tasks.insert(pair<string, void *>("face_recognition", face_recognition_task));
- if (!result.second) {
- delete facenet_task;
+ LOGD("face recognition handle [%p] has been created", *out_handle);
+ } catch (const BaseException &e) {
delete face_recognition_task;
- delete context;
- LOGE("Fail to register a new task. Same task already exists.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
-
- result = context->__tasks.insert(pair<string, void *>("facenet", facenet_task));
- if (!result.second) {
delete facenet_task;
- delete face_recognition_task;
delete context;
- LOGE("Fail to register a new task. Same task already exists.");
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ return e.getError();
}
- *out_handle = static_cast<mv_face_recognition_h>(context);
-
- LOGD("face recognition handle [%p] has been created", *out_handle);
-
MEDIA_VISION_FUNCTION_LEAVE();
return MEDIA_VISION_ERROR_NONE;
MEDIA_VISION_FUNCTION_ENTER();
Context *context = static_cast<Context *>(handle);
- map<string, void *>::iterator iter;
+ map<string, ITask *>::iterator iter;
for (iter = context->__tasks.begin(); iter != context->__tasks.end(); ++iter) {
- if (iter->first.compare("face_recognition") == 0) {
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(iter->second);
- delete face_recognition_task;
- }
-
- if (iter->first.compare("facenet") == 0) {
- auto facenet_task = static_cast<FacenetTask *>(iter->second);
- delete facenet_task;
- }
+ if (iter->first.compare("face_recognition") == 0)
+ delete iter->second;
+
+ if (iter->first.compare("facenet") == 0)
+ delete iter->second;
}
delete context;
try {
Context *context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
- auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto facenet_task = context->__tasks["facenet"];
face_recognition_task->configure();
facenet_task->configure();
try {
Context *context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
- auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
- FacenetInput facenet_input = { { source } };
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto facenet_task = context->__tasks["facenet"];
+ FacenetInput facenet_input(source);
facenet_task->setInput(facenet_input);
facenet_task->perform();
- FacenetOutput &facenet_output = facenet_task->getOutput();
- FaceRecognitionInput face_recognition_input = { .mode = RequestMode::REGISTER };
+ auto &facenet_output = static_cast<FacenetOutput &>(facenet_task->getOutput());
+ FaceRecognitionInput face_recognition_input;
+
+ face_recognition_input.mode = RequestMode::REGISTER;
face_recognition_input.inputs.push_back(facenet_output.outputs[0]);
face_recognition_input.labels.push_back(label);
try {
Context *context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
- FaceRecognitionInput input = { RequestMode::DELETE };
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ FaceRecognitionInput input;
+
+ input.mode = RequestMode::DELETE;
input.labels.clear();
input.labels.push_back(label);
try {
Context *context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
- auto facenet_task = static_cast<FacenetTask *>(context->__tasks["facenet"]);
- FacenetInput facenet_input = { { source } };
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto facenet_task = context->__tasks["facenet"];
+ FacenetInput facenet_input(source);
facenet_task->setInput(facenet_input);
facenet_task->perform();
- FacenetOutput &facenet_output = facenet_task->getOutput();
- FaceRecognitionInput face_recognition_input = { RequestMode::INFERENCE };
+ auto &facenet_output = static_cast<FacenetOutput &>(facenet_task->getOutput());
+ FaceRecognitionInput face_recognition_input;
+
+ face_recognition_input.mode = RequestMode::INFERENCE;
face_recognition_input.inputs = facenet_output.outputs;
face_recognition_task->setInput(face_recognition_input);
try {
Context *context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
- *out_label = face_recognition_task->getOutput().label.c_str();
+ *out_label = result.label.c_str();
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
try {
auto context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
- *confidences = face_recognition_task->getOutput().raw_data.data();
- *num_of_confidences = face_recognition_task->getOutput().raw_data.size();
+ *confidences = result.raw_data.data();
+ *num_of_confidences = result.raw_data.size();
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
try {
auto context = static_cast<Context *>(handle);
- auto face_recognition_task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
+ auto face_recognition_task = context->__tasks["face_recognition"];
+ auto &result = static_cast<FaceRecognitionResult &>(face_recognition_task->getOutput());
- if (static_cast<size_t>(index) >= face_recognition_task->getOutput().labels.size())
+ if (static_cast<size_t>(index) >= result.labels.size())
throw InvalidParameter("A given index is out of boundary.");
- *label = face_recognition_task->getOutput().labels[index].c_str();
+ *label = result.labels[index].c_str();
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
{
namespace machine_learning
{
-template<typename T, typename V> class ImageClassificationAdapter : public mediavision::common::ITask<T, V>
+class ImageClassificationAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<IImageClassification> _image_classification;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "image_classification.json";
void create();
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
namespace machine_learning
{
struct ImageClassificationInput : public InputBaseType {
- ImageClassificationInput(mv_source_h src = NULL) : InputBaseType(src)
+ ImageClassificationInput(mv_source_h src = nullptr) : InputBaseType(src)
{}
};
{
namespace machine_learning
{
-template<typename T, typename V> ImageClassificationAdapter<T, V>::ImageClassificationAdapter() : _source()
+ImageClassificationAdapter::ImageClassificationAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
_config->parseConfigFile(_config_file_name);
create();
}
-template<typename T, typename V> ImageClassificationAdapter<T, V>::~ImageClassificationAdapter()
+ImageClassificationAdapter::~ImageClassificationAdapter()
{
_image_classification->preDestroy();
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::create()
+void ImageClassificationAdapter::create()
{
_config->loadMetaFile(make_unique<ImageClassificationParser>());
mv_inference_data_type_e dataType = _config->getInputMetaMap().begin()->second->dataType;
}
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file,
- const char *label_file, const char *model_name)
+void ImageClassificationAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
_config->setUserModel(model_file, meta_file, label_file);
create();
}
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ImageClassificationAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_image_classification->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::configure()
+void ImageClassificationAdapter::configure()
{
_image_classification->configure();
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ImageClassificationAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_image_classification->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ImageClassificationAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_image_classification->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ImageClassificationAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_image_classification->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void ImageClassificationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
- char **device_type)
+void ImageClassificationAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_image_classification->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::prepare()
+void ImageClassificationAdapter::prepare()
{
_image_classification->prepare();
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::setInput(T &t)
+void ImageClassificationAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::perform()
+void ImageClassificationAdapter::perform()
{
_image_classification->perform(_source.inference_src);
}
-template<typename T, typename V> void ImageClassificationAdapter<T, V>::performAsync(T &t)
+void ImageClassificationAdapter::performAsync(InputBaseType &input)
{
- _image_classification->performAsync(static_cast<ImageClassificationInput &>(t));
+ _image_classification->performAsync(static_cast<ImageClassificationInput &>(input));
}
-template<typename T, typename V> V &ImageClassificationAdapter<T, V>::getOutput()
+OutputBaseType &ImageClassificationAdapter::getOutput()
{
- return static_cast<V &>(_image_classification->getOutput());
+ return _image_classification->getOutput();
}
-template<typename T, typename V> V &ImageClassificationAdapter<T, V>::getOutputCache()
+OutputBaseType &ImageClassificationAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class ImageClassificationAdapter<InputBaseType, OutputBaseType>;
}
}
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using ImageClassificationTask = ImageClassificationAdapter<InputBaseType, OutputBaseType>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image" };
static const size_t num_keys = sizeof(feature_keys) / sizeof(char *);
MEDIA_VISION_FUNCTION_ENTER();
try {
- machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new ImageClassificationTask(),
- out_handle);
+ machine_learning_native_create(TASK_NAME, new ImageClassificationAdapter(), out_handle);
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
#include <mv_common.h>
#include <mv_inference_type.h>
+#include "MachineLearningType.h"
namespace mediavision
{
namespace machine_learning
{
-struct ImageSegmentationInput {
- mv_source_h inference_src {};
+struct ImageSegmentationInput : public InputBaseType {
+ ImageSegmentationInput(mv_source_h src = nullptr) : InputBaseType(src)
+ {}
};
/**
* @brief The object detection result structure.
* @details Contains object detection result.
*/
-struct ImageSegmentationResult {
- unsigned long frame_number {};
+struct ImageSegmentationResult : public OutputBaseType {
unsigned int width {};
unsigned int height {};
unsigned int pixel_size {};
{
namespace machine_learning
{
-template<typename T, typename V> class ImageSegmentationAdapter : public mediavision::common::ITask<T, V>
+class ImageSegmentationAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<IImageSegmentation> _selfie_segmentation;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "selfie_segmentation.json";
const std::string _plugin_config_file_name = "selfie_segmentation_plugin.json";
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using ImageSegmentationTask = ITask<ImageSegmentationInput, ImageSegmentationResult>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
"http://tizen.org/feature/vision.inference.face" };
MEDIA_VISION_FUNCTION_ENTER();
Context *context = nullptr;
- ImageSegmentationTask *task = nullptr;
+ ITask *task = nullptr;
try {
context = new Context();
- task = new ImageSegmentationAdapter<ImageSegmentationInput, ImageSegmentationResult>();
+ task = new ImageSegmentationAdapter();
context->__tasks.insert(make_pair("selfie_segmentation", task));
*handle = static_cast<mv_selfie_segmentation_h>(context);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
for (auto &m : context->__tasks)
- delete static_cast<ImageSegmentationTask *>(m.second);
+ delete m.second;
delete context;
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->setModelInfo(model_file, meta_file, label_file, model_name);
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->setEngineInfo(backend_type, device_type);
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->getNumberOfEngines(engine_count);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->getEngineType(engine_index, engine_type);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->getNumberOfDevices(engine_type, device_count);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->getDeviceType(engine_type, device_index, device_type);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->configure();
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
task->prepare();
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
ImageSegmentationInput input = { .inference_src = source };
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
ImageSegmentationInput input = { source };
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ImageSegmentationTask *>(context->__tasks.at("selfie_segmentation"));
+ auto task = context->__tasks.at("selfie_segmentation");
- ImageSegmentationResult &result = task->getOutput();
+ auto &result = static_cast<ImageSegmentationResult &>(task->getOutput());
*width = result.width;
*height = result.height;
*pixel_size = result.pixel_size;
{
namespace machine_learning
{
-template<typename T, typename V> ImageSegmentationAdapter<T, V>::ImageSegmentationAdapter() : _source()
+ImageSegmentationAdapter::ImageSegmentationAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
create(_config->getDefaultModelName());
}
-template<typename T, typename V> ImageSegmentationAdapter<T, V>::~ImageSegmentationAdapter()
+ImageSegmentationAdapter::~ImageSegmentationAdapter()
{
_selfie_segmentation->preDestroy();
}
-template<typename T, typename V>
-template<typename U>
-void ImageSegmentationAdapter<T, V>::create(ImageSegmentationTaskType task_type)
+template<typename U> void ImageSegmentationAdapter::create(ImageSegmentationTaskType task_type)
{
// TODO. add switch-case statement here for Mediavision own task types.
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::create(std::string model_name)
+void ImageSegmentationAdapter::create(std::string model_name)
{
if (model_name.empty())
model_name = _config->getDefaultModelName();
}
}
-template<typename T, typename V>
-ImageSegmentationTaskType ImageSegmentationAdapter<T, V>::convertToTaskType(string model_name)
+ImageSegmentationTaskType ImageSegmentationAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid selfie segmentation model name.");
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void ImageSegmentationAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
_selfie_segmentation->setUserModel(model_file, meta_file, label_file);
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ImageSegmentationAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_selfie_segmentation->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::configure()
+void ImageSegmentationAdapter::configure()
{
_selfie_segmentation->configure();
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ImageSegmentationAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_selfie_segmentation->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ImageSegmentationAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_selfie_segmentation->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ImageSegmentationAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_selfie_segmentation->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void ImageSegmentationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
- char **device_type)
+void ImageSegmentationAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_selfie_segmentation->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::prepare()
+void ImageSegmentationAdapter::prepare()
{
_selfie_segmentation->prepare();
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::setInput(T &t)
+void ImageSegmentationAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::perform()
+void ImageSegmentationAdapter::perform()
{
_selfie_segmentation->perform(_source.inference_src);
}
-template<typename T, typename V> V &ImageSegmentationAdapter<T, V>::getOutput()
+OutputBaseType &ImageSegmentationAdapter::getOutput()
{
return _selfie_segmentation->getOutput();
}
-template<typename T, typename V> V &ImageSegmentationAdapter<T, V>::getOutputCache()
+OutputBaseType &ImageSegmentationAdapter::getOutputCache()
{
return _selfie_segmentation->getOutputCache();
}
-template<typename T, typename V> void ImageSegmentationAdapter<T, V>::performAsync(T &t)
+void ImageSegmentationAdapter::performAsync(InputBaseType &input)
{
- _selfie_segmentation->performAsync(t);
+ _selfie_segmentation->performAsync(static_cast<ImageSegmentationInput &>(input));
}
-template class ImageSegmentationAdapter<ImageSegmentationInput, ImageSegmentationResult>;
}
}
\ No newline at end of file
{
namespace machine_learning
{
-template<typename T, typename V> class FacialLandmarkAdapter : public mediavision::common::ITask<T, V>
+class FacialLandmarkAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<ILandmarkDetection> _landmark_detection;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "facial_landmark.json";
void create(const std::string &model_name);
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
namespace machine_learning
{
struct LandmarkDetectionInput : public InputBaseType {
- LandmarkDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+ LandmarkDetectionInput(mv_source_h src = nullptr) : InputBaseType(src)
{}
};
{
namespace machine_learning
{
-template<typename T, typename V> class PoseLandmarkAdapter : public mediavision::common::ITask<T, V>
+class PoseLandmarkAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<ILandmarkDetection> _landmark_detection;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "pose_landmark.json";
void create(const std::string &model_name);
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
{
namespace machine_learning
{
-template<typename T, typename V> FacialLandmarkAdapter<T, V>::FacialLandmarkAdapter() : _source()
+FacialLandmarkAdapter::FacialLandmarkAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
_config->parseConfigFile(_config_file_name);
create(_config->getDefaultModelName());
}
-template<typename T, typename V> FacialLandmarkAdapter<T, V>::~FacialLandmarkAdapter()
+FacialLandmarkAdapter::~FacialLandmarkAdapter()
{
_landmark_detection->preDestroy();
}
-template<typename T, typename V>
-template<typename U>
-void FacialLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
+template<typename U> void FacialLandmarkAdapter::create(LandmarkDetectionTaskType task_type)
{
switch (task_type) {
case LandmarkDetectionTaskType::FLD_TWEAK_CNN:
}
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::create(const string &model_name)
+void FacialLandmarkAdapter::create(const string &model_name)
{
LandmarkDetectionTaskType task_type = convertToTaskType(model_name);
_config->loadMetaFile(make_unique<LandmarkDetectionParser>(static_cast<int>(task_type)));
}
}
-template<typename T, typename V>
-LandmarkDetectionTaskType FacialLandmarkAdapter<T, V>::convertToTaskType(string model_name)
+LandmarkDetectionTaskType FacialLandmarkAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid facial detection model name.");
}
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void FacialLandmarkAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
}
}
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FacialLandmarkAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_landmark_detection->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::configure()
+void FacialLandmarkAdapter::configure()
{
_landmark_detection->configure();
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FacialLandmarkAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_landmark_detection->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FacialLandmarkAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_landmark_detection->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FacialLandmarkAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_landmark_detection->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void FacialLandmarkAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FacialLandmarkAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_landmark_detection->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::prepare()
+void FacialLandmarkAdapter::prepare()
{
_landmark_detection->prepare();
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::setInput(T &t)
+void FacialLandmarkAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::perform()
+void FacialLandmarkAdapter::perform()
{
_landmark_detection->perform(_source.inference_src);
}
-template<typename T, typename V> void FacialLandmarkAdapter<T, V>::performAsync(T &t)
+void FacialLandmarkAdapter::performAsync(InputBaseType &input)
{
- _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
+ _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(input));
}
-template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutput()
+OutputBaseType &FacialLandmarkAdapter::getOutput()
{
return _landmark_detection->getOutput();
}
-template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutputCache()
+OutputBaseType &FacialLandmarkAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
}
}
\ No newline at end of file
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
"http://tizen.org/feature/vision.inference.face" };
MEDIA_VISION_FUNCTION_ENTER();
try {
- machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new LandmarkDetectionTask(), handle);
+ machine_learning_native_create(TASK_NAME, new FacialLandmarkAdapter(), handle);
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
"http://tizen.org/feature/vision.inference.face" };
MEDIA_VISION_FUNCTION_ENTER();
try {
- machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new LandmarkDetectionTask(), handle);
+ machine_learning_native_create(TASK_NAME, new PoseLandmarkAdapter(), handle);
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
{
namespace machine_learning
{
-template<typename T, typename V> PoseLandmarkAdapter<T, V>::PoseLandmarkAdapter() : _source()
+PoseLandmarkAdapter::PoseLandmarkAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
_config->parseConfigFile(_config_file_name);
create(_config->getDefaultModelName());
}
-template<typename T, typename V> PoseLandmarkAdapter<T, V>::~PoseLandmarkAdapter()
+PoseLandmarkAdapter::~PoseLandmarkAdapter()
{
_landmark_detection->preDestroy();
}
-template<typename T, typename V>
-template<typename U>
-void PoseLandmarkAdapter<T, V>::create(LandmarkDetectionTaskType task_type)
+template<typename U> void PoseLandmarkAdapter::create(LandmarkDetectionTaskType task_type)
{
switch (task_type) {
case LandmarkDetectionTaskType::PLD_CPM:
}
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::create(const string &model_name)
+void PoseLandmarkAdapter::create(const string &model_name)
{
LandmarkDetectionTaskType task_type = convertToTaskType(model_name);
_config->loadMetaFile(make_unique<LandmarkDetectionParser>(static_cast<int>(task_type)));
}
}
-template<typename T, typename V>
-LandmarkDetectionTaskType PoseLandmarkAdapter<T, V>::convertToTaskType(string model_name)
+LandmarkDetectionTaskType PoseLandmarkAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid pose landmark model name.");
}
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void PoseLandmarkAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
}
}
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void PoseLandmarkAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_landmark_detection->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::configure()
+void PoseLandmarkAdapter::configure()
{
_landmark_detection->configure();
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void PoseLandmarkAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_landmark_detection->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void PoseLandmarkAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_landmark_detection->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void PoseLandmarkAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_landmark_detection->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void PoseLandmarkAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void PoseLandmarkAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_landmark_detection->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::prepare()
+void PoseLandmarkAdapter::prepare()
{
_landmark_detection->prepare();
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::setInput(T &t)
+void PoseLandmarkAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::perform()
+void PoseLandmarkAdapter::perform()
{
_landmark_detection->perform(_source.inference_src);
}
-template<typename T, typename V> void PoseLandmarkAdapter<T, V>::performAsync(T &t)
+void PoseLandmarkAdapter::performAsync(InputBaseType &input)
{
- _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
+ _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(input));
}
-template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutput()
+OutputBaseType &PoseLandmarkAdapter::getOutput()
{
return _landmark_detection->getOutput();
}
-template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutputCache()
+OutputBaseType &PoseLandmarkAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
}
}
{
namespace machine_learning
{
-template<typename T, typename V> class FaceDetectionAdapter : public mediavision::common::ITask<T, V>
+class FaceDetectionAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<IObjectDetection> _object_detection;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "face_detection.json";
const std::string _plugin_config_file_name = "face_detection_plugin.json";
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
{
namespace machine_learning
{
-template<typename T, typename V> class ObjectDetectionAdapter : public mediavision::common::ITask<T, V>
+class ObjectDetectionAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<IObjectDetection> _object_detection;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "object_detection.json";
const std::string _plugin_config_file_name = "object_detection_plugin.json";
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
namespace machine_learning
{
struct ObjectDetectionInput : public InputBaseType {
- ObjectDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+ ObjectDetectionInput(mv_source_h src = nullptr) : InputBaseType(src)
{}
};
{
namespace machine_learning
{
-template<typename T, typename V> FaceDetectionAdapter<T, V>::FaceDetectionAdapter() : _source()
+FaceDetectionAdapter::FaceDetectionAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
create(_config->getDefaultModelName());
}
-template<typename T, typename V> FaceDetectionAdapter<T, V>::~FaceDetectionAdapter()
+FaceDetectionAdapter::~FaceDetectionAdapter()
{
_object_detection->preDestroy();
}
-template<typename T, typename V>
-template<typename U>
-void FaceDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
+template<typename U> void FaceDetectionAdapter::create(ObjectDetectionTaskType task_type)
{
switch (task_type) {
case ObjectDetectionTaskType::FD_MOBILENET_V1_SSD:
// TODO.
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::create(string model_name)
+void FaceDetectionAdapter::create(string model_name)
{
if (model_name.empty())
model_name = _config->getDefaultModelName();
}
}
-template<typename T, typename V>
-ObjectDetectionTaskType FaceDetectionAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetectionTaskType FaceDetectionAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid face detection model name.");
}
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void FaceDetectionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
}
}
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void FaceDetectionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_object_detection->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::configure()
+void FaceDetectionAdapter::configure()
{
_object_detection->configure();
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void FaceDetectionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_object_detection->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void FaceDetectionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_object_detection->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void FaceDetectionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_object_detection->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void FaceDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void FaceDetectionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_object_detection->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::prepare()
+void FaceDetectionAdapter::prepare()
{
_object_detection->prepare();
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::setInput(T &t)
+void FaceDetectionAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::perform()
+void FaceDetectionAdapter::perform()
{
_object_detection->perform(_source.inference_src);
}
-template<typename T, typename V> void FaceDetectionAdapter<T, V>::performAsync(T &t)
+void FaceDetectionAdapter::performAsync(InputBaseType &input)
{
- _object_detection->performAsync(static_cast<ObjectDetectionInput &>(t));
+ _object_detection->performAsync(static_cast<ObjectDetectionInput &>(input));
}
-template<typename T, typename V> V &FaceDetectionAdapter<T, V>::getOutput()
+OutputBaseType &FaceDetectionAdapter::getOutput()
{
return _object_detection->getOutput();
}
-template<typename T, typename V> V &FaceDetectionAdapter<T, V>::getOutputCache()
+OutputBaseType &FaceDetectionAdapter::getOutputCache()
{
return _object_detection->getOutputCache();
}
-template class FaceDetectionAdapter<InputBaseType, OutputBaseType>;
}
}
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using FaceDetectionTask = FaceDetectionAdapter<InputBaseType, OutputBaseType>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
"http://tizen.org/feature/vision.inference.face" };
MEDIA_VISION_FUNCTION_ENTER();
try {
- machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new FaceDetectionTask(), handle);
+ machine_learning_native_create(TASK_NAME, new FaceDetectionAdapter(), handle);
} catch (const BaseException &e) {
return e.getError();
}
MEDIA_VISION_FUNCTION_ENTER();
try {
- auto context = static_cast<Context *>(handle);
- auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
-
- auto &result = static_cast<ObjectDetectionResult &>(task->getOutputCache());
+ auto &result =
+ static_cast<ObjectDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
if (result.number_of_objects <= index)
throw InvalidParameter("Invalid index range.");
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using ObjectDetectionTask = ObjectDetectionAdapter<InputBaseType, OutputBaseType>;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
"http://tizen.org/feature/vision.inference.face" };
MEDIA_VISION_FUNCTION_ENTER();
try {
- machine_learning_native_create<InputBaseType, OutputBaseType>(TASK_NAME, new ObjectDetectionTask(), handle);
+ machine_learning_native_create(TASK_NAME, new ObjectDetectionAdapter(), handle);
} catch (const BaseException &e) {
return e.getError();
}
MEDIA_VISION_FUNCTION_ENTER();
try {
- auto &result = static_cast<ObjectDetectionResult &>(machine_learning_native_get_result(handle, TASK_NAME));
+ auto &result =
+ static_cast<ObjectDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
if (result.number_of_objects <= index)
throw InvalidParameter("Invalid index range.");
{
namespace machine_learning
{
-template<typename T, typename V> ObjectDetectionAdapter<T, V>::ObjectDetectionAdapter() : _source()
+ObjectDetectionAdapter::ObjectDetectionAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
create(_config->getDefaultModelName());
}
-template<typename T, typename V> ObjectDetectionAdapter<T, V>::~ObjectDetectionAdapter()
+ObjectDetectionAdapter::~ObjectDetectionAdapter()
{
_object_detection->preDestroy();
}
-template<typename T, typename V>
-template<typename U>
-void ObjectDetectionAdapter<T, V>::create(ObjectDetectionTaskType task_type)
+template<typename U> void ObjectDetectionAdapter::create(ObjectDetectionTaskType task_type)
{
switch (task_type) {
case ObjectDetectionTaskType::MOBILENET_V1_SSD:
// TODO.
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(string model_name)
+void ObjectDetectionAdapter::create(string model_name)
{
if (model_name.empty())
model_name = _config->getDefaultModelName();
}
}
-template<typename T, typename V>
-ObjectDetectionTaskType ObjectDetectionAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetectionTaskType ObjectDetectionAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid object detection model name.");
}
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void ObjectDetectionAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
}
}
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ObjectDetectionAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_object_detection->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+void ObjectDetectionAdapter::configure()
{
_object_detection->configure();
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ObjectDetectionAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_object_detection->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ObjectDetectionAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_object_detection->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ObjectDetectionAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_object_detection->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void ObjectDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+void ObjectDetectionAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_object_detection->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::prepare()
+void ObjectDetectionAdapter::prepare()
{
_object_detection->prepare();
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::setInput(T &t)
+void ObjectDetectionAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::perform()
+void ObjectDetectionAdapter::perform()
{
_object_detection->perform(_source.inference_src);
}
-template<typename T, typename V> V &ObjectDetectionAdapter<T, V>::getOutput()
+OutputBaseType &ObjectDetectionAdapter::getOutput()
{
return _object_detection->getOutput();
}
-template<typename T, typename V> V &ObjectDetectionAdapter<T, V>::getOutputCache()
+OutputBaseType &ObjectDetectionAdapter::getOutputCache()
{
return _object_detection->getOutputCache();
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::performAsync(T &t)
+void ObjectDetectionAdapter::performAsync(InputBaseType &input)
{
- _object_detection->performAsync(static_cast<ObjectDetectionInput &>(t));
+ _object_detection->performAsync(static_cast<ObjectDetectionInput &>(input));
}
-template class ObjectDetectionAdapter<InputBaseType, OutputBaseType>;
}
}
\ No newline at end of file
{
namespace machine_learning
{
-template<typename T, typename V> class ObjectDetection3dAdapter : public mediavision::common::ITask<T, V>
+class ObjectDetection3dAdapter : public mediavision::common::ITask
{
private:
std::unique_ptr<IObjectDetection3d> _object_detection_3d;
std::shared_ptr<MachineLearningConfig> _config;
- T _source;
+ InputBaseType _source;
const std::string _config_file_name = "object_detection_3d.json";
void create(const std::string &model_name);
void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void configure() override;
void prepare() override;
- void setInput(T &t) override;
+ void setInput(InputBaseType &input) override;
void perform() override;
- void performAsync(T &t) override;
- V &getOutput() override;
- V &getOutputCache() override;
+ void performAsync(InputBaseType &input) override;
+ OutputBaseType &getOutput() override;
+ OutputBaseType &getOutputCache() override;
};
} // machine_learning
#include <mv_common.h>
#include <mv_inference_type.h>
+#include "MachineLearningType.h"
namespace mediavision
{
namespace machine_learning
{
-struct ObjectDetection3dInput {
- mv_source_h inference_src;
+struct ObjectDetection3dInput : public InputBaseType {
+ ObjectDetection3dInput(mv_source_h src = nullptr) : InputBaseType(src)
+ {}
};
struct EdgeIndex {
* @brief The object detection 3d result structure.
* @details Contains object detection 3d result.
*/
-struct ObjectDetection3dResult {
+struct ObjectDetection3dResult : public OutputBaseType {
unsigned int probability {};
unsigned int number_of_points {};
std::vector<unsigned int> x_vec;
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using ObjectDetection3dTask = ITask<ObjectDetection3dInput, ObjectDetection3dResult>;
static mutex g_object_detection_3d_mutex;
static const char *feature_keys[] = { "http://tizen.org/feature/vision.inference.image",
MEDIA_VISION_FUNCTION_ENTER();
Context *context = nullptr;
- ObjectDetection3dTask *task = nullptr;
+ ITask *task = nullptr;
try {
context = new Context();
- task = new ObjectDetection3dAdapter<ObjectDetection3dInput, ObjectDetection3dResult>();
+ task = new ObjectDetection3dAdapter();
context->__tasks.insert(make_pair("object_detection_3d", task));
*handle = static_cast<mv_object_detection_3d_h>(context);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
for (auto &m : context->__tasks)
- delete static_cast<ObjectDetection3dTask *>(m.second);
+ delete m.second;
delete context;
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->setModelInfo(model_file, meta_file, label_file, model_name);
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->setEngineInfo(backend_type, device_type);
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->getNumberOfEngines(engine_count);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->getEngineType(engine_index, engine_type);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->getNumberOfDevices(engine_type, device_count);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->getDeviceType(engine_type, device_index, device_type);
// TODO.
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->configure();
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
task->prepare();
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
ObjectDetection3dInput input = { source };
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
- ObjectDetection3dResult &result = task->getOutput();
+ auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
*out_probability = result.probability;
} catch (const BaseException &e) {
try {
auto context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
- ObjectDetection3dResult &result = task->getOutput();
+ auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
*out_num_of_points = result.number_of_points;
} catch (const BaseException &e) {
try {
Context *context = static_cast<Context *>(handle);
- auto task = static_cast<ObjectDetection3dTask *>(context->__tasks.at("object_detection_3d"));
+ auto task = context->__tasks.at("object_detection_3d");
- ObjectDetection3dResult &result = task->getOutput();
+ auto &result = static_cast<ObjectDetection3dResult &>(task->getOutput());
*out_x = result.x_vec.data();
*out_y = result.y_vec.data();
{
namespace machine_learning
{
-template<typename T, typename V> ObjectDetection3dAdapter<T, V>::ObjectDetection3dAdapter() : _source()
+ObjectDetection3dAdapter::ObjectDetection3dAdapter() : _source()
{
_config = make_shared<MachineLearningConfig>();
_config->parseConfigFile(_config_file_name);
create(_config->getDefaultModelName());
}
-template<typename T, typename V> ObjectDetection3dAdapter<T, V>::~ObjectDetection3dAdapter()
+ObjectDetection3dAdapter::~ObjectDetection3dAdapter()
{}
-template<typename T, typename V>
-template<typename U>
-void ObjectDetection3dAdapter<T, V>::create(ObjectDetection3dTaskType task_type)
+template<typename U> void ObjectDetection3dAdapter::create(ObjectDetection3dTaskType task_type)
{
switch (task_type) {
case ObjectDetection3dTaskType::OBJECTRON:
}
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::create(const string &model_name)
+void ObjectDetection3dAdapter::create(const string &model_name)
{
ObjectDetection3dTaskType task_type = convertToTaskType(model_name);
_config->loadMetaFile(make_unique<ObjectDetection3dParser>(static_cast<int>(task_type)));
}
}
-template<typename T, typename V>
-ObjectDetection3dTaskType ObjectDetection3dAdapter<T, V>::convertToTaskType(string model_name)
+ObjectDetection3dTaskType ObjectDetection3dAdapter::convertToTaskType(string model_name)
{
if (model_name.empty())
throw InvalidParameter("model name is empty.");
throw InvalidParameter("Invalid object detection 3d model name.");
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
- const char *model_name)
+void ObjectDetection3dAdapter::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
try {
_config->setUserModel(model_file, meta_file, label_file);
throw InvalidParameter("Model info not invalid.");
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+void ObjectDetection3dAdapter::setEngineInfo(const char *engine_type, const char *device_type)
{
_object_detection_3d->setEngineInfo(string(engine_type), string(device_type));
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::configure()
+void ObjectDetection3dAdapter::configure()
{
_object_detection_3d->configure();
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+void ObjectDetection3dAdapter::getNumberOfEngines(unsigned int *number_of_engines)
{
_object_detection_3d->getNumberOfEngines(number_of_engines);
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+void ObjectDetection3dAdapter::getEngineType(unsigned int engine_index, char **engine_type)
{
_object_detection_3d->getEngineType(engine_index, engine_type);
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+void ObjectDetection3dAdapter::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
{
_object_detection_3d->getNumberOfDevices(engine_type, number_of_devices);
}
-template<typename T, typename V>
-void ObjectDetection3dAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
- char **device_type)
+void ObjectDetection3dAdapter::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
{
_object_detection_3d->getDeviceType(engine_type, device_index, device_type);
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::prepare()
+void ObjectDetection3dAdapter::prepare()
{
_object_detection_3d->prepare();
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::setInput(T &t)
+void ObjectDetection3dAdapter::setInput(InputBaseType &input)
{
- _source = t;
+ _source = input;
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::perform()
+void ObjectDetection3dAdapter::perform()
{
shared_ptr<MetaInfo> metaInfo = _object_detection_3d->getInputMetaInfo();
_object_detection_3d->perform(_source.inference_src, metaInfo);
}
-template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::performAsync(T &t)
+void ObjectDetection3dAdapter::performAsync(InputBaseType &input)
{
throw InvalidOperation("Not support yet.");
}
-template<typename T, typename V> V &ObjectDetection3dAdapter<T, V>::getOutput()
+OutputBaseType &ObjectDetection3dAdapter::getOutput()
{
return _object_detection_3d->result();
}
-template<typename T, typename V> V &ObjectDetection3dAdapter<T, V>::getOutputCache()
+OutputBaseType &ObjectDetection3dAdapter::getOutputCache()
{
throw InvalidOperation("Not support yet.");
}
-template class ObjectDetection3dAdapter<ObjectDetection3dInput, ObjectDetection3dResult>;
}
}
\ No newline at end of file