mv_machine_learning: use reference and vector
authorInki Dae <inki.dae@samsung.com>
Mon, 26 Sep 2022 07:56:43 +0000 (16:56 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 7 Nov 2022 03:12:39 +0000 (12:12 +0900)
[Issue type] : code clean

Use reference instead of pointer when returning the result to user
and replace the array type of members of object_detection_3d_result_s structure
with vector type.

Change-Id: Ieed3a2d087e617af604f7a42d57d7a78eab0d116
Signed-off-by: Inki Dae <inki.dae@samsung.com>
14 files changed:
mv_machine_learning/common/include/itask.h
mv_machine_learning/face_recognition/include/face_recognition.h
mv_machine_learning/face_recognition/include/face_recognition_adapter.h
mv_machine_learning/face_recognition/src/face_recognition.cpp
mv_machine_learning/face_recognition/src/face_recognition_adapter.cpp
mv_machine_learning/face_recognition/src/mv_face_recognition_open.cpp
mv_machine_learning/object_detection/include/object_detection.h
mv_machine_learning/object_detection/include/object_detection_adapter.h
mv_machine_learning/object_detection/include/object_detection_type.h
mv_machine_learning/object_detection/include/objectron.h
mv_machine_learning/object_detection/meta/object_detection_3d.json
mv_machine_learning/object_detection/src/mv_object_detection_3d_open.cpp
mv_machine_learning/object_detection/src/object_detection_adapter.cpp
mv_machine_learning/object_detection/src/objectron.cpp

index 420e050..ef5f9b2 100644 (file)
@@ -31,7 +31,7 @@ public:
        virtual void prepare() = 0;
        virtual void setInput(T &t) = 0;
        virtual void perform() = 0;
-       virtual V* getOutput() = 0;
+       virtual V& getOutput() = 0;
 };
 } // namespace
 } // namespace
index 18c28bb..19da884 100644 (file)
@@ -134,7 +134,7 @@ public:
        int RecognizeFace(mv_source_h img_src);
        int DeleteLabel(std::string label_name);
        int GetLabel(const char **out_label);
-       mv_face_recognition_result_s* GetResult();
+       mv_face_recognition_result_s& GetResult();
 };
 
 } // machine_learning
index 253c395..ffc3a03 100644 (file)
@@ -81,7 +81,7 @@ public:
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
-       V* getOutput() override;
+       V& getOutput() override;
 };
 
 } // machine_learning
index 6e71086..7ef00e7 100644 (file)
@@ -663,7 +663,7 @@ int FaceRecognition::GetLabel(const char **out_label)
        return MEDIA_VISION_ERROR_NONE;
 }
 
-mv_face_recognition_result_s* FaceRecognition::GetResult()
+mv_face_recognition_result_s& FaceRecognition::GetResult()
 {
        if (_status != INFERENCED)
                throw InvalidOperation("Inference not completed yet.");
@@ -678,7 +678,7 @@ mv_face_recognition_result_s* FaceRecognition::GetResult()
                throw e;
        }
 
-       return &_result;
+       return _result;
 }
 
 } // machine_learning
index b018b84..1a1f9d9 100644 (file)
@@ -127,7 +127,7 @@ template<typename T, typename V> void FaceRecognitionAdapter<T, V>::perform()
        }
 }
 
-template<typename T, typename V> V* FaceRecognitionAdapter<T, V>::getOutput()
+template<typename T, typename V> V& FaceRecognitionAdapter<T, V>::getOutput()
 {
        return _face_recognition->GetResult();
 }
index c5c7713..5d305f3 100644 (file)
@@ -213,7 +213,7 @@ int mv_face_recognition_get_label_open(mv_face_recognition_h handle, const char
                Context *context = static_cast<Context *>(handle);
                auto task = static_cast<FaceRecognitionTask *>(context->__tasks["face_recognition"]);
 
-               *out_label = task->getOutput()->label.c_str();
+               *out_label = task->getOutput().label.c_str();
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index bbdbe7c..bf9e94b 100644 (file)
@@ -47,7 +47,7 @@ public:
        void configure();
        void prepare();
        void inference(mv_source_h source);
-       virtual object_detection_3d_result_s* getResult() = 0;
+       virtual object_detection_3d_result_s& getResult() = 0;
 };
 
 } // machine_learning
index dcf136c..a84de91 100644 (file)
@@ -45,7 +45,7 @@ public:
        void prepare() override;
        void setInput(T &t) override;
        void perform() override;
-       V* getOutput() override;
+       V& getOutput() override;
 };
 
 } // machine_learning
index 6694387..a91e442 100644 (file)
@@ -41,22 +41,10 @@ typedef struct {
 struct object_detection_3d_result_s {
        unsigned int probability;
        unsigned int number_of_points;
-       unsigned int *x_points;
-       unsigned int *y_points;
+       std::vector<unsigned int> x_vec;
+       std::vector<unsigned int> y_vec;
        unsigned int number_of_edges;
-       edge_index_s *edge_indexes;
-
-       ~object_detection_3d_result_s()
-       {
-               if (x_points)
-                       delete []x_points;
-
-               if (y_points)
-                       delete []y_points;
-
-               if (edge_indexes)
-                       delete []edge_indexes;
-       }
+       std::vector<edge_index_s> edge_index_vec;
 };
 
 typedef enum {
index 3761e0d..bfe3f9f 100644 (file)
@@ -30,15 +30,13 @@ namespace machine_learning
 class Objectron : public ObjectDetection
 {
 private:
-       std::unique_ptr<object_detection_3d_result_s> _result;
-       int _maxPoints;
-       int _maxEdges;
+       object_detection_3d_result_s _result;
 
 public:
        Objectron();
        ~Objectron();
        void parseMetaFile() override;
-       object_detection_3d_result_s* getResult() override;
+       object_detection_3d_result_s& getResult() override;
 };
 
 } // machine_learning
index 057abb2..1b4821d 100644 (file)
@@ -7,16 +7,6 @@
                        "value" : "cup"
                },
                {
-            "name" : "MAX_NUM_OF_POINTS",
-                       "type" : "integer",
-                       "value" : 9
-               },
-               {
-            "name" : "MAX_NUM_OF_EDGES",
-                       "type" : "integer",
-                       "value" : 12
-               },
-               {
             "name"  : "MODEL_FILE_PATH",
             "type"  : "string",
             "value" : "/home/owner/media/res/object_detection_3d/tflite/object_detection_3d_cup.tflite"
index 38ae681..7d360d0 100644 (file)
@@ -177,9 +177,9 @@ int mv_object_detection_3d_get_probability_open(mv_object_detection_3d_h handle,
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<ObjectDetectionTask *>(context->__tasks["objectron"]);
 
-               object_detection_3d_result_s *result = task->getOutput();
+               object_detection_3d_result_sresult = task->getOutput();
 
-               *out_probability = result->probability;
+               *out_probability = result.probability;
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -203,9 +203,9 @@ int mv_object_detection_3d_get_num_of_points_open(mv_object_detection_3d_h handl
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<ObjectDetectionTask *>(context->__tasks["objectron"]);
 
-               auto result = task->getOutput();
+               object_detection_3d_result_s& result = task->getOutput();
 
-               *out_num_of_points = result->number_of_points;
+               *out_num_of_points = result.number_of_points;
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -229,10 +229,10 @@ int mv_object_detection_3d_get_points_open(mv_object_detection_3d_h handle, unsi
                Context *context = static_cast<Context *>(handle);
                auto task = static_cast<ObjectDetectionTask *>(context->__tasks["objectron"]);
 
-               auto result = task->getOutput();
+               object_detection_3d_result_s& result = task->getOutput();
 
-               *out_x = result->x_points;
-               *out_y = result->y_points;
+               *out_x = result.x_vec.data();
+               *out_y = result.y_vec.data();
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
index b1b9ce7..7d95585 100644 (file)
@@ -80,7 +80,7 @@ template<typename T, typename V> void ObjectDetectionAdapter<T, V>::perform()
        }
 }
 
-template<typename T, typename V> V* ObjectDetectionAdapter<T, V>::getOutput()
+template<typename T, typename V> V& ObjectDetectionAdapter<T, V>::getOutput()
 {
        return _object_detection->getResult();
 }
index fa67444..ba4bde4 100644 (file)
@@ -32,10 +32,9 @@ namespace mediavision
 namespace machine_learning
 {
 
-Objectron::Objectron()
+Objectron::Objectron() : _result()
 {
        _inference = make_unique<Inference>();
-       _result = make_unique<object_detection_3d_result_s>();
 }
 
 Objectron::~Objectron()
@@ -62,16 +61,6 @@ void Objectron::parseMetaFile()
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to get target device type.");
 
-       ret = _config->getIntegerAttribute(string(MV_OBJECT_DETECTION_3D_MAX_NUM_OF_POINTS),
-                                                                         &_maxPoints);
-       if (ret != MEDIA_VISION_ERROR_NONE)
-               throw InvalidOperation("Fail to get maximum number of points.");
-
-       ret = _config->getIntegerAttribute(string(MV_OBJECT_DETECTION_3D_MAX_NUM_OF_EDGES),
-                                                                         &_maxEdges);
-       if (ret != MEDIA_VISION_ERROR_NONE)
-               throw InvalidOperation("Fail to get maximum number of edges.");
-
        ret = _config->getStringAttribute(MV_OBJECT_DETECTION_3D_MODEL_FILE_PATH, &_modelFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to get model file path");
@@ -89,32 +78,9 @@ void Objectron::parseMetaFile()
        ret = _inference->ParseMetadata(_modelMetaFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE)
                throw InvalidOperation("Fail to ParseMetadata");
-
-       if (!_result->x_points) {
-               _result->x_points = new (nothrow) unsigned int[_maxPoints];
-               if (!_result->x_points)
-                       throw InvalidOperation("Fail to allocate x edges");
-       }
-
-       if (!_result->y_points) {
-               _result->y_points = new (nothrow) unsigned int[_maxPoints];
-               if (!_result->y_points) {
-                       delete []_result->x_points;
-                       throw InvalidOperation("Fail to allocate y edges");
-               }
-       }
-
-       if (!_result->edge_indexes) {
-               _result->edge_indexes = new (nothrow) edge_index_s[_maxEdges];
-               if (!_result->edge_indexes) {
-                       delete []_result->y_points;
-                       delete []_result->x_points;
-                       throw InvalidOperation("Fail to allocate edge indexes");
-               }
-       }
 }
 
-object_detection_3d_result_s* Objectron::getResult()
+object_detection_3d_result_s& Objectron::getResult()
 {
        TensorBuffer& tensor_buffer_obj = _inference->GetOutputTensorBuffer();
        IETensorBuffer &ie_tensor_buffer = tensor_buffer_obj.getIETensorBuffer();
@@ -136,17 +102,18 @@ object_detection_3d_result_s* Objectron::getResult()
        if (output_size != 18)
                throw InvalidOperation("Invalid number of points. Number of points should be 18.");
 
-       unsigned int result_idx = 0;
-
        float x_scale = static_cast<float>(_inference->getSourceWidth()) / static_cast<float>(_inference->getInputWidth());
        float y_scale = static_cast<float>(_inference->getSourceHeight()) / static_cast<float>(_inference->getInputHeight());
 
+       _result.x_vec.clear();
+       _result.y_vec.clear();
+
        for (unsigned int idx = 0; idx < output_size; idx += 2) {
-               _result->x_points[result_idx] = static_cast<int>(keypoints[idx] * x_scale);
-               _result->y_points[result_idx++] = static_cast<int>(keypoints[idx + 1] * y_scale);
+               _result.x_vec.push_back(static_cast<int>(keypoints[idx] * x_scale));
+               _result.y_vec.push_back(static_cast<int>(keypoints[idx + 1] * y_scale));
        }
 
-       _result->number_of_points = output_size / 2;
+       _result.number_of_points = output_size / 2;
 
        string& identity_layer = output_layer_names[0];
 
@@ -155,23 +122,20 @@ object_detection_3d_result_s* Objectron::getResult()
                throw InvalidOperation("Fail to get tensor buffer.");
 
        auto *prob = reinterpret_cast<float *>(tensor_buffer->buffer);
-       _result->probability = static_cast<unsigned int>(prob[0] * 100);
 
-       _result->number_of_edges = _maxEdges;
+       _result.probability = static_cast<unsigned int>(prob[0] * 100);
 
-       unsigned int edges[_maxEdges][2] = {
+       std::vector<edge_index_s> defaultEdges {
                {2, 3}, {4, 5}, {6, 7}, {8, 9},
                {2, 4}, {3, 5}, {6, 8}, {7, 9},
                {2, 6}, {3, 7}, {4, 8}, {5, 9}
        };
 
-       for (auto idx = 0; idx < _maxEdges; ++idx) {
-               _result->edge_indexes[idx].start = edges[idx][0];
-               _result->edge_indexes[idx].end = edges[idx][1];
-       }
+       _result.edge_index_vec = defaultEdges;
+       _result.number_of_edges = defaultEdges.size();
 
-       return _result.get();
+       return _result;
 }
 
 }
-}
\ No newline at end of file
+}