void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int data_set_cnt) override;
void ReadHeader(FeaVecHeader& header) override;
void WriteFeatureVec(std::vector<float>& feature_vec, const int max_label, const int label_index) override;
+ void Remove() override;
};
#endif
\ No newline at end of file
class SimpleShot : public TrainingModel {
private:
TrainingEngineBackendInfo _engine_info;
-
+private:
+ void SaveModel(const std::string file_path);
+ void RemoveModel(const std::string file_path);
public:
SimpleShot(const mv_inference_backend_type_e backend_type = MV_INFERENCE_BACKEND_NNTRAINER,
const mv_inference_target_device_e target_type = MV_INFERENCE_TARGET_DEVICE_CPU,
// Configure layers for SimpleShot learning.
void ConfigureModel(int num_of_class);
- void SaveModel(const std::string file_path);
-
TrainingEngineBackendInfo& GetTrainingEngineInfo() { return _engine_info; }
};
{
"name" : "MV_FACE_RECOGNITION_BACKBONE_MODEL_FILE_PATH",
"type" : "string",
- "value" : "/usr/share/capi-media-vision/models/FR/backbone/tflite/facenet.tflite"
+ "value" : "/home/owner/media/res/face_recognition/backbone/facenet.tflite"
},
{
"name" : "MV_FACE_RECOGNITION_DEFAULT_PATH",
"type" : "string",
- "value" : "/usr/share/capi-media-vision/models/FR/training/"
+ "value" : "/home/owner/media/res/face_recognition/training/"
},
{
"name" : "MV_FACE_RECOGNITION_DECISION_THRESHOLD",
remove(fvm->GetFileName().c_str());
rename(fvm_new->GetFileName().c_str(), fvm->GetFileName().c_str());
- _training_model->ConfigureModel(label_cnt);
+ if (data_set_cnt == 0) {
+ _training_model->RemoveModel();
+ fvm->Remove();
+ _label_manager->Remove();
- unique_ptr<DataSetManager> new_data_set;
+ LOGD("No training data so removed all relevant files.");
+ } else {
+ _training_model->ConfigureModel(label_cnt);
- UpdateDataSet(new_data_set);
- _training_model->ApplyDataSet(new_data_set);
- _training_model->Compile();
- _training_model->Train();
+ unique_ptr<DataSetManager> new_data_set;
+
+ UpdateDataSet(new_data_set);
+ _training_model->ApplyDataSet(new_data_set);
+ _training_model->Compile();
+ _training_model->Train();
+ }
_status = DELETED;
} catch (const BaseException& e) {
outFile.write((char *)&oneHotTable, sizeof(float));
}
}
+
+void NNTrainerFVM::Remove()
+{
+ // Remove existing file forcely.
+ ::remove(_feature_vector_file.c_str());
+}
\ No newline at end of file
if (ret != TRAINING_ENGINE_ERROR_NONE)
throw InvalidOperation("Fail to save a model.");
}
+
+void SimpleShot::RemoveModel(const string file_path)
+{
+ int ret = TRAINING_ENGINE_ERROR_NONE;
+
+ string bin_file_path = file_path.substr(0, file_path.find('.')) + ".bin";
+
+ // Remove existing files forcely.
+ ::remove(bin_file_path.c_str());
+ ::remove(file_path.c_str());
+}
\ No newline at end of file
virtual void WriteHeader(size_t feature_size, size_t one_hot_table_size, unsigned int data_set_cnt) = 0;
virtual void ReadHeader(FeaVecHeader& header) = 0;
virtual void WriteFeatureVec(std::vector<float>& feature_vec, const int max_label, const int label_index) = 0;
+ virtual void Remove() = 0;
static constexpr unsigned int feature_vector_signature = 0xFEA09841;
};
size_t GetMaxLabel(const std::string label_file);
size_t GetMaxLabel();
std::string GetLabelFromAnswer(const std::vector<float>& result);
+ void Remove();
};
#endif
} TrainingEngineBackendInfo;
class TrainingModel {
+private:
+ virtual void SaveModel(const std::string file_path) = 0;
+ virtual void RemoveModel(const std::string file_path) = 0;
protected:
std::unique_ptr<TrainingEngineInterface::Common::TrainingEngineCommon> _training;
std::unique_ptr<training_engine_model> _model;
std::unique_ptr<training_engine_dataset> _data_set;
std::string _internal_model_file;
-
public:
TrainingModel(const mv_inference_backend_type_e backend_type = MV_INFERENCE_BACKEND_NNTRAINER,
const mv_inference_target_device_e target_type = MV_INFERENCE_TARGET_DEVICE_CPU,
void ClearDataSet(std::unique_ptr<DataSetManager>& data_set);
void Compile();
void Train();
+ void RemoveModel();
virtual void ConfigureModel(int num_of_class) = 0;
virtual TrainingEngineBackendInfo& GetTrainingEngineInfo() = 0;
- virtual void SaveModel(const std::string file_path) = 0;
};
#endif
\ No newline at end of file
return answer_label;
}
+
+void LabelManager::Remove()
+{
+ // Remove existing files forcely.
+ ::remove(_label_file.c_str());
+}
\ No newline at end of file
// Save model file.
SaveModel(_internal_model_file);
+}
+
+void TrainingModel::RemoveModel()
+{
+ RemoveModel(_internal_model_file);
}
\ No newline at end of file
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.23.9
+Version: 0.23.10
Release: 0
Group: Multimedia/Framework
License: Apache-2.0 and BSD-3-Clause
#include "ImageHelper.h"
#include "mv_face_recognition.h"
-#define TRAIN_LIST_FILE "/usr/share/capi-media-vision/models/FR/training/train_list.txt"
-#define TEST_LIST_FILE "/usr/share/capi-media-vision/models/FR/training/test_list.txt"
+#define TRAIN_LIST_FILE "/home/owner/media/res/face_recognition/res/measurement/train_list.txt"
+#define TEST_LIST_FILE "/home/owner/media/res/face_recognition/res/measurement/test_list.txt"
+#define TRAINING_IMAGE_PATH "/home/owner/media/res/face_recognition/res/measurement/train/"
+#define TEST_IMAGE_PATH "/home/owner/media/res/face_recognition/res/measurement/test/"
#define MAX_TRAINING_CLASS 20
#define SHOT_PER_CLASS 5
train_file >> filename >> label >> index;
- const string image_path = string("/usr/share/capi-media-vision/models/FR/training/train/") + filename;
+ const string image_path = string(TRAINING_IMAGE_PATH) + filename;
cout << "training " << image_path << " file" << " with " << label << " train cnt = " << train_cnt << endl;
if (filename.empty() || label.empty() || index.empty())
break;
- string image_path = string("/usr/share/capi-media-vision/models/FR/training/test/") + filename;
+ string image_path = string(TEST_IMAGE_PATH) + filename;
cout << "inferencing " << image_path << " file" << " with " << label << " index = " << test_cnt++ << endl;
#include "ImageHelper.h"
#include "mv_face_recognition.h"
+#define TRAINING_IMAGE_PATH "/home/owner/media/res/face_recognition/res/test/training/"
+#define TEST_IMAGE_PATH "/home/owner/media/res/face_recognition/res/test/test/"
+
using namespace testing;
using namespace std;
using namespace MediaVision::Common;
+TEST(FaceRecognitionTest, FaceRecognitionSimpleTest)
+{
+ mv_face_recognition_h handle;
+
+ int ret = mv_face_recognition_create(&handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = mv_face_recognition_destroy(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+}
+
TEST(FaceRecognitionTest, FaceRecognitionClassShouldBeOk)
{
mv_face_recognition_h handle;
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
for (auto& image : training_images) {
- const string image_path = string("/usr/share/capi-media-vision/face-recognition/images/training/") + image.first;
+ const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
mv_source_h mv_source = NULL;
int ret = mv_create_source(&mv_source);
unsigned int correct_cnt = 0;
for (auto& image : test_images) {
- const string image_path = string("/usr/share/capi-media-vision/face-recognition/images/test/") + image.first;
+ const string image_path = string(TEST_IMAGE_PATH) + image.first;
mv_source_h mv_source = NULL;
int ret = mv_create_source(&mv_source);
auto& answer = answers[label_idx++];
for (auto& image : training_images) {
- const string image_path = string("/usr/share/capi-media-vision/face-recognition/images/training/") + image.first;
+ const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
mv_source_h mv_source = NULL;
ret = mv_create_source(&mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
-
if (!label.empty()) {
ret = mv_face_recognition_unregister(handle, label.c_str());
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
unsigned int correct_cnt = 0;
for (auto& image : test_images) {
- const string image_path = string("/usr/share/capi-media-vision/face-recognition/images/test/") + image.first;
+ const string image_path = string(TEST_IMAGE_PATH) + image.first;
mv_source_h mv_source = NULL;
ret = mv_create_source(&mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
+TEST(FaceRecognitionTest, RemoveAllLabelsShouldBeOk)
+{
+ vector<string> labels = { "3448", "2929", "7779" };
+ mv_face_recognition_h handle;
+
+ int ret = mv_face_recognition_create(&handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = mv_face_recognition_prepare(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ for (auto& image : training_images) {
+ const string image_path = string(TRAINING_IMAGE_PATH) + image.first;
+ mv_source_h mv_source = NULL;
+ ret = mv_create_source(&mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = ImageHelper::loadImageToSource(image_path.c_str(), mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = mv_face_recognition_register(handle, mv_source, image.second.c_str());
+ ASSERT_EQ(ret, 0);
+
+ ret = mv_destroy_source(mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ }
+
+ for (auto& label : labels) {
+ ret = mv_face_recognition_unregister(handle, label.c_str());
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ }
+
+ ret = mv_face_recognition_destroy(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+}
+
int main(int argc, char **argv)
{
InitGoogleTest(&argc, argv);