template<typename T>
void preprocess(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo, std::vector<T> &inputVector);
- template<typename T> void inference(std::vector<std::vector<T> > &inputVectors);
+ std::shared_ptr<MetaInfo> getInputMetaInfo();
+ template<typename T> void perform(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo);
LandmarkDetectionTaskType _task_type;
void getOutputNames(std::vector<std::string> &names);
void getOutputTensor(std::string target_name, std::vector<float> &tensor);
+ void parseMetaFile(const std::string &meta_file_name);
+
+ template<typename T> void inference(std::vector<std::vector<T> > &inputVectors);
+ virtual LandmarkDetectionResult &result() = 0;
public:
LandmarkDetection(LandmarkDetectionTaskType task_type);
void getEngineType(unsigned int engine_index, char **engine_type);
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices);
void getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type);
- std::shared_ptr<MetaInfo> getInputMetaInfo();
- void parseMetaFile(const char *meta_file_name);
- void configure();
+ void configure(const std::string &configFile);
void prepare();
- template<typename T> void perform(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo);
- virtual LandmarkDetectionResult &result() = 0;
+ void perform(mv_source_h &mv_src);
+ LandmarkDetectionResult &getOutput();
};
} // machine_learning
template<typename T, typename V> void FacialLandmarkAdapter<T, V>::configure()
{
- _landmark_detection->parseMetaFile("facial_landmark.json");
- _landmark_detection->configure();
+ _landmark_detection->configure("facial_landmark.json");
}
template<typename T, typename V> void FacialLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
template<typename T, typename V> void FacialLandmarkAdapter<T, V>::perform()
{
- shared_ptr<MetaInfo> metaInfo = _landmark_detection->getInputMetaInfo();
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- _landmark_detection->perform<unsigned char>(_source.inference_src, metaInfo);
- else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- _landmark_detection->perform<float>(_source.inference_src, metaInfo);
- else
- throw InvalidOperation("Invalid model data type.");
+ _landmark_detection->perform(_source.inference_src);
}
template<typename T, typename V> void FacialLandmarkAdapter<T, V>::performAsync(T &t)
template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutput()
{
- return _landmark_detection->result();
+ return _landmark_detection->getOutput();
}
template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutputCache()
readFile.close();
}
-void LandmarkDetection::parseMetaFile(const char *meta_file_name)
+void LandmarkDetection::parseMetaFile(const string &meta_file_name)
{
- _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(meta_file_name));
+ _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + meta_file_name);
int ret = _config->getIntegerAttribute(string(MV_LANDMARK_DETECTION_BACKEND_TYPE), &_backendType);
if (ret != MEDIA_VISION_ERROR_NONE)
loadLabel();
}
-void LandmarkDetection::configure()
+void LandmarkDetection::configure(const string &configFile)
{
+ parseMetaFile(configFile);
+
int ret = _inference->bind(_backendType, _targetDeviceType);
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
inference<T>(inputVectors);
}
+void LandmarkDetection::perform(mv_source_h &mv_src)
+{
+ shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
+
+ if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
+ perform<unsigned char>(mv_src, metaInfo);
+ else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
+ perform<float>(mv_src, metaInfo);
+ else
+ throw InvalidOperation("Invalid model data type.");
+}
+
+LandmarkDetectionResult &LandmarkDetection::getOutput()
+{
+ return result();
+}
+
void LandmarkDetection::getOutputNames(vector<string> &names)
{
TensorBuffer &tensor_buffer_obj = _inference->getOutputTensorBuffer();
template<typename T, typename V> void PoseLandmarkAdapter<T, V>::configure()
{
- _landmark_detection->parseMetaFile("pose_landmark.json");
- _landmark_detection->configure();
+ _landmark_detection->configure("pose_landmark.json");
}
template<typename T, typename V> void PoseLandmarkAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
template<typename T, typename V> void PoseLandmarkAdapter<T, V>::perform()
{
- shared_ptr<MetaInfo> metaInfo = _landmark_detection->getInputMetaInfo();
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- _landmark_detection->perform<unsigned char>(_source.inference_src, metaInfo);
- else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- _landmark_detection->perform<float>(_source.inference_src, metaInfo);
- else
- throw InvalidOperation("Invalid model data type.");
+ _landmark_detection->perform(_source.inference_src);
}
template<typename T, typename V> void PoseLandmarkAdapter<T, V>::performAsync(T &t)
template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutput()
{
- return _landmark_detection->result();
+ return _landmark_detection->getOutput();
}
template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutputCache()