#include "inference_engine_common_impl.h"
#include "Inference.h"
#include "label_manager.h"
-#include "face_net_info.h"
+#include "backbone_model_info.h"
#include "simple_shot.h"
#include "data_augment_default.h"
#include "data_augment_flip.h"
class FaceRecognition {
private:
+ unsigned int _status;
+ std::unique_ptr<mediavision::inference::Inference> _internal;
+ std::unique_ptr<mediavision::inference::Inference> _backbone;
+ std::unique_ptr<IBackboneModelInfo> _backbone_model_info;
+ std::unique_ptr<TrainingModel> _training_model;
+ std::unique_ptr<LabelManager> _label_manager;
FaceRecognitionConfig _config;
- std::vector<std::unique_ptr<DataAugment>> _data_augments;
mv_face_recognition_result_s _result;
+ std::vector<std::unique_ptr<DataAugment>> _data_augments;
+
// FYI. This function should be called every time a new face is registered.
void ImportLabel();
std::vector<model_layer_info>& GetBackboneInputLayerInfo();
int GetVecFromMvSource(mv_source_h img_src, std::vector<float>& out_vec);
-protected:
- unsigned int _status;
- std::unique_ptr<mediavision::inference::Inference> _internal;
- std::unique_ptr<mediavision::inference::Inference> _backbone;
- std::unique_ptr<FaceNetInfo> _face_net_info;
- std::unique_ptr<TrainingModel> _training_model;
- std::unique_ptr<LabelManager> _label_manager;
-
public:
FaceRecognition();
~ FaceRecognition();
#include "face_recognition.h"
#include "nntrainer_fvm.h"
#include "nntrainer_dsm.h"
+#include "face_net_info.h"
#include "file_util.h"
using namespace std;
{
FaceRecognition::FaceRecognition() :
- _status(NONE), _internal(), _backbone(), _face_net_info(), _training_model(), _label_manager()
+ _status(NONE), _internal(), _backbone(), _backbone_model_info(), _training_model(), _label_manager(), _config(), _result()
{
_data_augments.push_back(std::make_unique<DataAugmentDefault>());
/* Add other data argument classes. */
std::vector<model_layer_info>& FaceRecognition::GetBackboneInputLayerInfo()
{
- return _face_net_info->GetInputLayerInfo();
+ return _backbone_model_info->GetInputLayerInfo();
}
int FaceRecognition::GetVecFromMvSource(mv_source_h img_src, std::vector<float>& out_vec)
int FaceRecognition::Initialize()
{
- _face_net_info = make_unique<FaceNetInfo>(_config.backbone_model_file_path);
+ _backbone_model_info = make_unique<FaceNetInfo>(_config.backbone_model_file_path);
- if (_face_net_info->GetInputLayerInfo().empty() || _face_net_info->GetInputLayerInfo().size() > 1) {
+ if (_backbone_model_info->GetInputLayerInfo().empty() || _backbone_model_info->GetInputLayerInfo().size() > 1) {
LOGE("Invalid input layer size - input layer size should be 1.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- if (_face_net_info->GetOutputLayerInfo().empty() || _face_net_info->GetOutputLayerInfo().size() > 1) {
+ if (_backbone_model_info->GetOutputLayerInfo().empty() || _backbone_model_info->GetOutputLayerInfo().size() > 1) {
LOGE("Invalid output layer size - output layer size should be 1.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
vector<string> input_layer_names, output_layer_names;
vector<inference_engine_tensor_info> input_tensor_info, output_tensor_info;
- for (auto& input : _face_net_info->GetInputLayerInfo()) {
+ for (auto& input : _backbone_model_info->GetInputLayerInfo()) {
input_layer_names.push_back(input.layer_name);
input_tensor_info.push_back(input.tensor_info);
}
- for (auto& output : _face_net_info->GetOutputLayerInfo()) {
+ for (auto& output : _backbone_model_info->GetOutputLayerInfo()) {
output_layer_names.push_back(output.layer_name);
output_tensor_info.push_back(output.tensor_info);
}
_backbone->ConfigureInputInfo(width, height, 1, ch, 127.5f, 127.5f, MV_INFERENCE_DATA_FLOAT32, input_layer_names);
_backbone->ConfigureOutputInfo(output_layer_names, output_tensor_info);
- _backbone->ConfigureModelFiles("", _face_net_info->GetModelFilePath(), "");
+ _backbone->ConfigureModelFiles("", _backbone_model_info->GetModelFilePath(), "");
ret = _backbone->Load();
if (ret != MEDIA_VISION_ERROR_NONE)
int FaceRecognition::RegisterNewFace(mv_source_h img_src, string label_name)
{
- vector<model_layer_info>& output_layer_info = _face_net_info->GetOutputLayerInfo();
+ vector<model_layer_info>& output_layer_info = _backbone_model_info->GetOutputLayerInfo();
if (_status < INITIALIZED) {
LOGE("Initialization not ready yet. (%u)", _status);
// Import label data from a label file.
ImportLabel();
- if (_face_net_info->GetInputLayerInfo().empty() || _face_net_info->GetInputLayerInfo().size() > 1) {
+ if (_backbone_model_info->GetInputLayerInfo().empty() || _backbone_model_info->GetInputLayerInfo().size() > 1) {
LOGE("Invalid input layer size - input layer size should be 1.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- if (_face_net_info->GetOutputLayerInfo().empty() || _face_net_info->GetOutputLayerInfo().size() > 1) {
+ if (_backbone_model_info->GetOutputLayerInfo().empty() || _backbone_model_info->GetOutputLayerInfo().size() > 1) {
LOGE("Invalid output layer size - output layer size should be 1.");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
}
// Get output layer info for facenet model.
- vector<model_layer_info>& output_layer_info = _face_net_info->GetOutputLayerInfo();
+ vector<model_layer_info>& output_layer_info = _backbone_model_info->GetOutputLayerInfo();
// Get output tensor buffer to the output layer.
inference_engine_tensor_buffer *backbone_output_buffer = _backbone->GetOutputTensorBuffer(output_layer_info[0].layer_name);
if (!backbone_output_buffer) {