Change members of inference_engine_layer_property structure,
[platform/core/multimedia/inference-engine-mlapi.git] / src / inference_engine_mlapi.cpp
index c11a337..213bc8d 100644 (file)
@@ -55,10 +55,10 @@ namespace MLAPIImpl
        InferenceMLAPI::~InferenceMLAPI()
        {
                mDesignated_inputs.clear();
-               std::vector<std::string>().swap(mDesignated_inputs);
+               std::map<std::string, int>().swap(mDesignated_inputs);
 
                mDesignated_outputs.clear();
-               std::vector<std::string>().swap(mDesignated_outputs);
+               std::map<std::string, int>().swap(mDesignated_outputs);
 
                ml_single_close(mSingle);
 
@@ -94,7 +94,7 @@ namespace MLAPIImpl
                }
 
                mPluginType = type;
-
+               LOGI("backend type.(%d)", type);
                LOGI("LEAVE");
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -232,7 +232,7 @@ namespace MLAPIImpl
        }
 
        int InferenceMLAPI::GetInputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
@@ -244,16 +244,7 @@ namespace MLAPIImpl
 
                buffers.clear();
 
-               int ret;
-               unsigned int cnt;
-
-               ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               LOGI("input tensor count = %u", cnt);
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
 
                // TODO. Below is test code, should we allocate new buffer for every inference?
                if (mInputDataHandle == NULL) {
@@ -265,11 +256,11 @@ namespace MLAPIImpl
                }
 
                // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()
-               for (unsigned int i = 0; i < cnt; ++i) {
+               for (auto& input : mDesignated_inputs) {
                        inference_engine_tensor_buffer in_buffer;
                        ml_tensor_type_e in_type;
 
-                       ret = ml_tensors_data_get_tensor_data(mInputDataHandle, i, &in_buffer.buffer, &in_buffer.size);
+                       ret = ml_tensors_data_get_tensor_data(mInputDataHandle, input.second, &in_buffer.buffer, &in_buffer.size);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -277,7 +268,7 @@ namespace MLAPIImpl
 
                        LOGE("buffer = %p, size = %zu\n", in_buffer.buffer, in_buffer.size);
 
-                       ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type);
+                       ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -293,7 +284,7 @@ namespace MLAPIImpl
                        in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
                        in_buffer.owner_is_backend = 1;
 
-                       buffers.push_back(in_buffer);
+                       buffers.insert(std::make_pair(input.first, in_buffer));
                }
 
                LOGI("LEAVE");
@@ -302,7 +293,7 @@ namespace MLAPIImpl
        }
 
        int InferenceMLAPI::GetOutputTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
                LOGI("ENTER");
 
@@ -314,16 +305,7 @@ namespace MLAPIImpl
 
                buffers.clear();
 
-               int ret;
-               unsigned int cnt;
-
-               ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               LOGI("output tensor count = %u", cnt);
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
 
                // TODO. Below is test code, should we allocate new buffer for every inference?
                if (mOutputDataHandle == NULL) {
@@ -335,11 +317,11 @@ namespace MLAPIImpl
                }
 
                // TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()
-               for (unsigned int i = 0; i < cnt; ++i) {
+               for (auto& output : mDesignated_outputs) {
                        inference_engine_tensor_buffer out_buffer;
                        ml_tensor_type_e out_type;
 
-                       ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, i, &out_buffer.buffer, &out_buffer.size);
+                       ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, output.second, &out_buffer.buffer, &out_buffer.size);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -347,7 +329,7 @@ namespace MLAPIImpl
 
                        LOGE("buffer = %p, size = %zu\n", out_buffer.buffer, out_buffer.size);
 
-                       ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type);
+                       ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -363,7 +345,7 @@ namespace MLAPIImpl
                        out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
                        out_buffer.owner_is_backend = 1;
 
-                       buffers.push_back(out_buffer);
+                       buffers.insert(std::make_pair(output.first, out_buffer));
                }
 
                LOGI("LEAVE");
@@ -377,25 +359,15 @@ namespace MLAPIImpl
                LOGI("ENTER");
 
                // TODO. Need to check if model file loading is done.
-               int ret;
-               unsigned int cnt;
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
 
-               ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               LOGI("input tensor count = %u", cnt);
-
-               for (unsigned int i = 0; i < cnt; ++i) {
+               for (auto& input : mDesignated_inputs) {
                        inference_engine_tensor_info tensor_info;
                        ml_tensor_type_e in_type;
                        ml_tensor_dimension in_dim;
-                       char *in_name = NULL;
                        size_t in_size = 1;
 
-                       ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type);
+                       ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
                                         ret);
@@ -409,7 +381,7 @@ namespace MLAPIImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
 
-                       ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, i, in_dim);
+                       ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
                                         ret);
@@ -425,25 +397,16 @@ namespace MLAPIImpl
 
                        LOGI("input tensor size = %zu", in_size);
 
-                       ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, i, &in_name);
-                       if (ret != ML_ERROR_NONE) {
-                               LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
-                                        ret);
-                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       }
-
-                       LOGI("input tensor name = %s", in_name);
+                       LOGI("input tensor name = %s", input.first.c_str());
 
                        tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
                        tensor_info.size = in_size;
 
-                       property.tensor_infos.push_back(tensor_info);
+                       property.layers.insert(std::make_pair(input.first, tensor_info));
 
                        // TODO. Compare tensor info from engine to one from a given property.
                }
 
-               property.layer_names = mInputProperty.layer_names;
-
                LOGI("LEAVE");
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -455,25 +418,15 @@ namespace MLAPIImpl
                LOGI("ENTER");
 
                // TODO. Need to check if model file loading is done.
-               int ret;
-               unsigned int cnt;
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
 
-               ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               LOGI("output tensor count = %u", cnt);
-
-               for (unsigned int i = 0; i < cnt; ++i) {
+               for (auto& output : mDesignated_outputs) {
                        inference_engine_tensor_info tensor_info;
                        ml_tensor_type_e out_type;
                        unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
-                       char *out_name = NULL;
                        size_t out_size = 1;
 
-                       ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type);
+                       ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
                                         ret);
@@ -487,7 +440,7 @@ namespace MLAPIImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
 
-                       ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, i, out_dim);
+                       ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
                                         ret);
@@ -518,25 +471,16 @@ namespace MLAPIImpl
 
                        LOGI("output tensor size = %zu", out_size);
 
-                       ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, i, &out_name);
-                       if (ret != ML_ERROR_NONE) {
-                               LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
-                                        ret);
-                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       }
-
-                       LOGI("output tensor name = %s", out_name);
+                       LOGI("output tensor name = %s", output.first.c_str());
 
                        tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
                        tensor_info.size = out_size;
 
-                       property.tensor_infos.push_back(tensor_info);
+                       property.layers.insert(std::make_pair(output.first, tensor_info));
 
                        // TODO. Compare tensor info from engine to one from a given property.
                }
 
-               property.layer_names = mOutputProperty.layer_names;
-
                LOGI("LEAVE");
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -547,20 +491,17 @@ namespace MLAPIImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("input layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("input layer name = %s", layer.first.c_str());
                }
 
                mDesignated_inputs.clear();
-               std::vector<std::string>().swap(mDesignated_inputs);
+               std::map<std::string, int>().swap(mDesignated_inputs);
 
                // TODO. Request input property information to a given ML Single API of nnstreamer backend,
                // and set it instead of user-given one,
                // Call UpdateTensorsInfo() after requesting input info.
-               mDesignated_inputs = property.layer_names;
+
                mInputProperty = property;
 
                LOGI("LEAVE");
@@ -573,20 +514,17 @@ namespace MLAPIImpl
        {
                LOGI("ENTER");
 
-               std::vector<std::string>::iterator iter;
-               for (iter = property.layer_names.begin();
-                        iter != property.layer_names.end(); iter++) {
-                       std::string name = *iter;
-                       LOGI("output layer name = %s", name.c_str());
+               for (auto& layer : property.layers) {
+                       LOGI("output layer name = %s", layer.first.c_str());
                }
 
                mDesignated_outputs.clear();
-               std::vector<std::string>().swap(mDesignated_outputs);
+               std::map<std::string, int>().swap(mDesignated_outputs);
 
                // TODO. Request output property information to a given ML Single API of nnstreamer backend,
                // and set it instead of user-given one,
                // Call UpdateTensorsInfo() after requesting output info.
-               mDesignated_outputs = property.layer_names;
+
                mOutputProperty = property;
 
                LOGI("LEAVE");
@@ -617,8 +555,8 @@ namespace MLAPIImpl
        }
 
        int InferenceMLAPI::CheckTensorBuffers(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
 
@@ -677,19 +615,75 @@ namespace MLAPIImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
+               unsigned int cnt = 0;
+               ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
+               if (ret != ML_ERROR_NONE || !cnt) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               LOGI("input tensor count = %u", cnt);
+               mDesignated_inputs.clear();
+               std::map<std::string, int>().swap(mDesignated_inputs);
+               for(unsigned int index = 0; index < cnt; ++index) {
+                       char *in_name = NULL;
+                       ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name);
+                       LOGI("index:%d with name %s", index, in_name);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       if (in_name == NULL)
+                               continue;
+
+                       mDesignated_inputs.insert(std::make_pair(std::string(in_name), index));
+                       free(in_name);
+               }
+
                ret = ml_single_get_output_info(mSingle, &mOutputInfoHandle);
                if (ret != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_single_get_output_info(%d).", ret);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
+               cnt = 0;
+               ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
+               if (ret != ML_ERROR_NONE || cnt == 0) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d) with cnt %u.", ret, cnt);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               LOGI("output tensor count = %u", cnt);
+               mDesignated_outputs.clear();
+               std::map<std::string, int>().swap(mDesignated_outputs);
+               for (unsigned int index = 0; index < cnt; ++index) {
+                       char *out_name = NULL;
+                       ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
+                       LOGI("index:%u with name %s", index, out_name);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       if (out_name == NULL)
+                               continue;
+
+                       mDesignated_outputs.insert(std::make_pair(std::string(out_name), index));
+                       if (out_name != NULL){
+                               free(out_name);
+                       }
+               }
+
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
        int InferenceMLAPI::Run(
-                       std::vector<inference_engine_tensor_buffer> &input_buffers,
-                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+                       std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+                       std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");