Fix coding style based on Tizen SE C++ Coding Rule 60/235360/1
authorInki Dae <inki.dae@samsung.com>
Thu, 4 Jun 2020 05:47:00 +0000 (14:47 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 4 Jun 2020 05:47:00 +0000 (14:47 +0900)
Tizen SE C++ Coding Rule:
https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=160925159

Change-Id: I1ae54a3676dc9cc0e06d4322eb612ceb07d7626c
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index d115a52..bde2e92 100644 (file)
 #include <time.h>
 #include <queue>
 
-namespace InferenceEngineImpl {
-namespace MLAPIImpl {
-
-InferenceMLAPI::InferenceMLAPI(void) :
-       mPluginType(),
-       mTargetDevice(),
-       mSingle(),
-       mDesignated_inputs(),
-       mDesignated_outputs(),
-       mInputProperty(),
-       mOutputProperty(),
-       mInputTensorBuffer(),
-       mOutputTensorBuffer(),
-       mInputTensorInfo(),
-       mOutputTensorInfo()
+namespace InferenceEngineImpl
 {
-    LOGI("ENTER");
-
-    LOGI("LEAVE");
-}
-
-InferenceMLAPI::~InferenceMLAPI()
-{
-    mDesignated_inputs.clear();
-    std::vector<std::string>().swap(mDesignated_inputs);
-
-    mDesignated_outputs.clear();
-    std::vector<std::string>().swap(mDesignated_outputs);
-}
-
-int InferenceMLAPI::SetPrivateData(void *data)
+namespace MLAPIImpl
 {
-    LOGI("ENTER");
-
-       inference_backend_type_e type = *(static_cast<inference_backend_type_e *>(data));
-
-       if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) {
-               LOGE("Invalid backend type.");
-               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+       InferenceMLAPI::InferenceMLAPI(void)
+                       : mPluginType()
+                       , mTargetDevice()
+                       , mSingle()
+                       , mDesignated_inputs()
+                       , mDesignated_outputs()
+                       , mInputProperty()
+                       , mOutputProperty()
+                       , mInputTensorBuffer()
+                       , mOutputTensorBuffer()
+                       , mInputTensorInfo()
+                       , mOutputTensorInfo()
+       {
+               LOGI("ENTER");
+
+               LOGI("LEAVE");
        }
 
-       mPluginType = type;
+       InferenceMLAPI::~InferenceMLAPI()
+       {
+               mDesignated_inputs.clear();
+               std::vector<std::string>().swap(mDesignated_inputs);
 
-    LOGI("LEAVE");
+               mDesignated_outputs.clear();
+               std::vector<std::string>().swap(mDesignated_outputs);
+       }
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+       int InferenceMLAPI::SetPrivateData(void *data)
+       {
+               LOGI("ENTER");
 
-int InferenceMLAPI::SetTargetDevices(int types)
-{
-    LOGI("ENTER");
+               inference_backend_type_e type =
+                               *(static_cast<inference_backend_type_e *>(data));
 
+               if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) {
+                       LOGE("Invalid backend type.");
+                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+               }
 
-    LOGI("Inference targets are, ");
-    if (types & INFERENCE_TARGET_CPU) {
-               mTargetDevice |= INFERENCE_TARGET_CPU;
-        LOGI("CPU");
-    }
+               mPluginType = type;
 
-    if (types & INFERENCE_TARGET_GPU) {
-               mTargetDevice |= INFERENCE_TARGET_GPU;
-        LOGI("GPU");
-    }
+               LOGI("LEAVE");
 
-       if (types & INFERENCE_TARGET_CUSTOM) {
-               mTargetDevice |= INFERENCE_TARGET_CUSTOM;
-               LOGI("NPU");
+               return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+       int InferenceMLAPI::SetTargetDevices(int types)
+       {
+               LOGI("ENTER");
 
-int InferenceMLAPI::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
-    LOGI("ENTER");
-
-       // ML Single API of MLAPI requires model_paths rule like below,
-       // "so library file path,nb model file path" or vise versa.
-       std::string model_str(model_paths[0] + "," + model_paths[1]);
-
-       LOGI("Model name = %s", model_str.c_str());
-
-       // TODO. Set NNFW backend type and HW type properly.
-
-       ml_nnfw_type_e nnfw_type;
-       ml_nnfw_hw_e nnfw_hw;
-
-       switch (mPluginType) {
-       case INFERENCE_BACKEND_MLAPI:
-               // For now, backend type is MLAPI and target device type is CUSTOM then
-               // we will use Vivante NPU.
-               // TODO. other NPU should be considered later. I.e., SRNPU.
-               if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) {
-                       nnfw_type = ML_NNFW_TYPE_VIVANTE;
-                       nnfw_hw = ML_NNFW_HW_ANY;
-                       LOGI("Vivante tensor filter will be used.");
-               } else {
-                       LOGE("Invalid target device type.");
-                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-               }
-               break;
-       case INFERENCE_BACKEND_NNFW:
-               nnfw_type = ML_NNFW_TYPE_NNFW;
-               if (mTargetDevice == INFERENCE_TARGET_CPU) {
-                       nnfw_hw = ML_NNFW_HW_CPU_NEON;
-                       LOGI("Target device is NEON.");
-               } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
-                       nnfw_hw = ML_NNFW_HW_GPU;
-                       LOGI("Target device is GPU");
-               } else {
-                       LOGE("Invalid inference target device type.");
-                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               LOGI("Inference targets are, ");
+               if (types & INFERENCE_TARGET_CPU) {
+                       mTargetDevice |= INFERENCE_TARGET_CPU;
+                       LOGI("CPU");
                }
-               LOGI("NNFW tensor filter will be used.");
-               break;
-       // TODO.
-       default:
-               LOGE("Invalid plugin type.");
-               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-       }
 
-       int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw);
-       if (ret != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_single_open(%d).", ret);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+               if (types & INFERENCE_TARGET_GPU) {
+                       mTargetDevice |= INFERENCE_TARGET_GPU;
+                       LOGI("GPU");
+               }
 
-    LOGI("LEAVE");
+               if (types & INFERENCE_TARGET_CUSTOM) {
+                       mTargetDevice |= INFERENCE_TARGET_CUSTOM;
+                       LOGI("NPU");
+               }
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               LOGI("LEAVE");
 
-int InferenceMLAPI::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
-    LOGI("ENTER");
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-       // TODO. Implement this function according to a given ML Single API backend properly.
+       int InferenceMLAPI::Load(std::vector<std::string> model_paths,
+                                                        inference_model_format_e model_format)
+       {
+               LOGI("ENTER");
+
+               // ML Single API of MLAPI requires model_paths rule like below,
+               // "so library file path,nb model file path" or vise versa.
+               std::string model_str(model_paths[0] + "," + model_paths[1]);
+
+               LOGI("Model name = %s", model_str.c_str());
+
+               // TODO. Set NNFW backend type and HW type properly.
+
+               ml_nnfw_type_e nnfw_type;
+               ml_nnfw_hw_e nnfw_hw;
+
+               switch (mPluginType) {
+               case INFERENCE_BACKEND_MLAPI:
+                       // For now, backend type is MLAPI and target device type is CUSTOM then
+                       // we will use Vivante NPU.
+                       // TODO. other NPU should be considered later. I.e., SRNPU.
+                       if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) ==
+                               INFERENCE_TARGET_CUSTOM) {
+                               nnfw_type = ML_NNFW_TYPE_VIVANTE;
+                               nnfw_hw = ML_NNFW_HW_ANY;
+                               LOGI("Vivante tensor filter will be used.");
+                       } else {
+                               LOGE("Invalid target device type.");
+                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       }
+                       break;
+               case INFERENCE_BACKEND_NNFW:
+                       nnfw_type = ML_NNFW_TYPE_NNFW;
+                       if (mTargetDevice == INFERENCE_TARGET_CPU) {
+                               nnfw_hw = ML_NNFW_HW_CPU_NEON;
+                               LOGI("Target device is NEON.");
+                       } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+                               nnfw_hw = ML_NNFW_HW_GPU;
+                               LOGI("Target device is GPU");
+                       } else {
+                               LOGE("Invalid inference target device type.");
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+                       LOGI("NNFW tensor filter will be used.");
+                       break;
+               // TODO.
+               default:
+                       LOGE("Invalid plugin type.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
 
-    LOGI("LEAVE");
+               int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL,
+                                                                nnfw_type, nnfw_hw);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_open(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               LOGI("LEAVE");
 
-int InferenceMLAPI::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
-    LOGI("ENTER");
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-       // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
-       // So add a null tensor buffer object. This buffer will be updated at Run callback.
+       int InferenceMLAPI::GetInputTensorBuffers(
+                       std::vector<inference_engine_tensor_buffer> &buffers)
+       {
+               LOGI("ENTER");
 
-       // Caution. this tensor buffer will be checked by upper framework to verity if
-       //                      the tensor buffer object is valid or not so fill dummy data to the tensor buffer.
+               // TODO. Implement this function according to a given ML Single API backend properly.
 
-       // TODO. Consider multiple output tensors.
+               LOGI("LEAVE");
 
-       inference_engine_tensor_buffer tensor_buf = { 0, };
-       tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
-       tensor_buf.buffer = (void *)1;
-       tensor_buf.size = 1;
-       tensor_buf.owner_is_backend = 1;
-       buffers.push_back(tensor_buf);
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-    LOGI("LEAVE");
+       int InferenceMLAPI::GetOutputTensorBuffers(
+                       std::vector<inference_engine_tensor_buffer> &buffers)
+       {
+               LOGI("ENTER");
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
+               // So add a null tensor buffer object. This buffer will be updated at Run callback.
 
+               // Caution. this tensor buffer will be checked by upper framework to verity if
+               //                      the tensor buffer object is valid or not so fill dummy data to the tensor buffer.
 
-int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
+               // TODO. Consider multiple output tensors.
 
-       ml_tensors_info_h in_info = NULL;
+               inference_engine_tensor_buffer tensor_buf = {
+                       0,
+               };
+               tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
+               tensor_buf.buffer = (void *) 1;
+               tensor_buf.size = 1;
+               tensor_buf.owner_is_backend = 1;
+               buffers.push_back(tensor_buf);
 
-    // TODO. Need to check if model file loading is done.
+               LOGI("LEAVE");
 
-       int ret = ml_single_get_input_info(mSingle, &in_info);
-       if (ret != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_single_get_input_info(%d).", ret);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       unsigned int cnt;
-       ret = ml_tensors_info_get_count(in_info, &cnt);
-       if (ret != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+       int InferenceMLAPI::GetInputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
 
-       LOGI("input tensor count = %u", cnt);
+               ml_tensors_info_h in_info = NULL;
 
-       for (unsigned int i = 0; i < cnt; ++i) {
-               ml_tensor_type_e in_type;
-               unsigned int in_dim;
-               char *in_name = NULL;
-               size_t in_size;
+               // TODO. Need to check if model file loading is done.
 
-               ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
+               int ret = ml_single_get_input_info(mSingle, &in_info);
                if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
+                       LOGE("Failed to request ml_single_get_input_info(%d).", ret);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               LOGI("input tensor type = %d", in_type);
-
-               ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim);
+               unsigned int cnt;
+               ret = ml_tensors_info_get_count(in_info, &cnt);
                if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret);
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               LOGI("input tensor dimension = %u", in_dim);
+               LOGI("input tensor count = %u", cnt);
 
-               ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
+               for (unsigned int i = 0; i < cnt; ++i) {
+                       ml_tensor_type_e in_type;
+                       unsigned int in_dim;
+                       char *in_name = NULL;
+                       size_t in_size;
 
-               LOGI("input tensor name = %s", in_name);
+                       ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
 
-               ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size);
-               if (ret != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
+                       LOGI("input tensor type = %d", in_type);
 
-               LOGI("input tensor size = %u", in_size);
+                       ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
 
-               // TODO. Compare tensor info from engine to one from a given property.
-       }
+                       LOGI("input tensor dimension = %u", in_dim);
 
-       property.layer_names = mInputProperty.layer_names;
+                       ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
 
-       std::vector<inference_engine_tensor_info>::iterator iter;
-       for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) {
-               inference_engine_tensor_info tensor_info = *iter;
-               property.tensor_infos.push_back(tensor_info);
-       }
+                       LOGI("input tensor name = %s", in_name);
 
-    LOGI("LEAVE");
+                       ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+                       LOGI("input tensor size = %u", in_size);
 
-int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
+                       // TODO. Compare tensor info from engine to one from a given property.
+               }
 
-       property.layer_names = mOutputProperty.layer_names;
+               property.layer_names = mInputProperty.layer_names;
 
-       inference_engine_tensor_info tensor_info;
+               std::vector<inference_engine_tensor_info>::iterator iter;
+               for (iter = mInputProperty.tensor_infos.begin();
+                        iter != mInputProperty.tensor_infos.end(); iter++) {
+                       inference_engine_tensor_info tensor_info = *iter;
+                       property.tensor_infos.push_back(tensor_info);
+               }
 
-       // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
+               LOGI("LEAVE");
 
-       tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
-       tensor_info.shape = { 1, 1001 };
-       tensor_info.size = 1001;
-       property.tensor_infos.push_back(tensor_info);
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-    LOGI("LEAVE");
+       int InferenceMLAPI::GetOutputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               property.layer_names = mOutputProperty.layer_names;
 
-int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
+               inference_engine_tensor_info tensor_info;
 
-    std::vector<std::string>::iterator iter;
-    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
-        std::string name = *iter;
-        LOGI("input layer name = %s", name.c_str());
-    }
+               // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
 
-    mDesignated_inputs.clear();
-    std::vector<std::string>().swap(mDesignated_inputs);
+               tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
+               tensor_info.shape = { 1, 1001 };
+               tensor_info.size = 1001;
+               property.tensor_infos.push_back(tensor_info);
 
-       // TODO. Request input property information to a given ML Single API of nnstreamer backend,
-       // and set it instead of user-given one,
+               LOGI("LEAVE");
 
-    mDesignated_inputs = property.layer_names;
-       mInputProperty = property;
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-    LOGI("LEAVE");
+       int InferenceMLAPI::SetInputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               std::vector<std::string>::iterator iter;
+               for (iter = property.layer_names.begin();
+                        iter != property.layer_names.end(); iter++) {
+                       std::string name = *iter;
+                       LOGI("input layer name = %s", name.c_str());
+               }
 
-int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property)
-{
-    LOGI("ENTER");
+               mDesignated_inputs.clear();
+               std::vector<std::string>().swap(mDesignated_inputs);
 
-    std::vector<std::string>::iterator iter;
-    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
-        std::string name = *iter;
-        LOGI("output layer name = %s", name.c_str());
-    }
+               // TODO. Request input property information to a given ML Single API of nnstreamer backend,
+               // and set it instead of user-given one,
 
-    mDesignated_outputs.clear();
-    std::vector<std::string>().swap(mDesignated_outputs);
+               mDesignated_inputs = property.layer_names;
+               mInputProperty = property;
 
-       // TODO. Request output property information to a given ML Single API of nnstreamer backend,
-       // and set it instead of user-given one,
+               LOGI("LEAVE");
 
-    mDesignated_outputs = property.layer_names;
-       mOutputProperty = property;
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-    LOGI("LEAVE");
+       int InferenceMLAPI::SetOutputLayerProperty(
+                       inference_engine_layer_property &property)
+       {
+               LOGI("ENTER");
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               std::vector<std::string>::iterator iter;
+               for (iter = property.layer_names.begin();
+                        iter != property.layer_names.end(); iter++) {
+                       std::string name = *iter;
+                       LOGI("output layer name = %s", name.c_str());
+               }
 
-int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity)
-{
-    LOGI("ENTER");
+               mDesignated_outputs.clear();
+               std::vector<std::string>().swap(mDesignated_outputs);
 
-    if (capacity == NULL) {
-        LOGE("Bad pointer.");
-        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
-    }
+               // TODO. Request output property information to a given ML Single API of nnstreamer backend,
+               // and set it instead of user-given one,
 
-       // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
-    capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
+               mDesignated_outputs = property.layer_names;
+               mOutputProperty = property;
 
-    LOGI("LEAVE");
+               LOGI("LEAVE");
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-int InferenceMLAPI::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                                                                                       std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
-    LOGI("ENTER");
+       int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity)
+       {
+               LOGI("ENTER");
 
-    LOGI("LEAVE");
+               if (capacity == NULL) {
+                       LOGE("Bad pointer.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
+               capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
 
-int InferenceMLAPI::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                        std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
-    LOGI("ENTER");
+               LOGI("LEAVE");
 
-    // Make sure to check if tensor buffer count and binding info one are same.
-    int err = CheckTensorBuffers(input_buffers, output_buffers);
-    if (err != INFERENCE_ENGINE_ERROR_NONE) {
-        return err;
-    }
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-       ml_tensors_info_h in_info = NULL;
+       int InferenceMLAPI::CheckTensorBuffers(
+                       std::vector<inference_engine_tensor_buffer> &input_buffers,
+                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+       {
+               LOGI("ENTER");
 
-       err = ml_single_get_input_info(mSingle, &in_info);
-       if (err != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_single_get_input_info(%d).", err);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+               LOGI("LEAVE");
 
-       ml_tensors_data_h input_data = NULL;
-       err = ml_tensors_data_create(in_info, &input_data);
-       if (err != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_tensors_data_create(%d).", err);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       unsigned int cnt;
-       err = ml_tensors_info_get_count(in_info, &cnt);
-       if (err != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+       int InferenceMLAPI::Run(
+                       std::vector<inference_engine_tensor_buffer> &input_buffers,
+                       std::vector<inference_engine_tensor_buffer> &output_buffers)
+       {
+               LOGI("ENTER");
+
+               // Make sure to check if tensor buffer count and binding info one are same.
+               int err = CheckTensorBuffers(input_buffers, output_buffers);
+               if (err != INFERENCE_ENGINE_ERROR_NONE) {
+                       return err;
+               }
 
-       for (unsigned int i = 0; i < cnt; ++i) {
-               LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size);
-               err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size);
+               ml_tensors_info_h in_info = NULL;
+
+               err = ml_single_get_input_info(mSingle, &in_info);
                if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err);
+                       LOGE("Failed to request ml_single_get_input_info(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
-       }
 
-       ml_tensors_data_h output_data = NULL;
-       err = ml_single_invoke(mSingle, input_data, &output_data);
-       if (err != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_single_invoke(%d).", err);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+               ml_tensors_data_h input_data = NULL;
+               err = ml_tensors_data_create(in_info, &input_data);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_data_create(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
 
-       // TODO. Consider mutiple output tensors.
+               unsigned int cnt;
+               err = ml_tensors_info_get_count(in_info, &cnt);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
 
-       err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size);
-       if (err != ML_ERROR_NONE) {
-               LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
-               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-       }
+               for (unsigned int i = 0; i < cnt; ++i) {
+                       LOGI("index(%d) : buffer = %p, size = %u\n", i,
+                                input_buffers[i].buffer, input_buffers[i].size);
+                       err = ml_tensors_data_set_tensor_data(input_data, i,
+                                                                                                 input_buffers[i].buffer,
+                                                                                                 input_buffers[i].size);
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).",
+                                        err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+               }
+
+               ml_tensors_data_h output_data = NULL;
+               err = ml_single_invoke(mSingle, input_data, &output_data);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_invoke(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               // TODO. Consider mutiple output tensors.
 
-       LOGI("Output tensor = %u", output_buffers[0].size);
+               err = ml_tensors_data_get_tensor_data(
+                               output_data, 0, (void **) &output_buffers[0].buffer,
+                               &output_buffers[0].size);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
 
-    LOGI("LEAVE");
+               LOGI("Output tensor = %u", output_buffers[0].size);
 
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
+               LOGI("LEAVE");
 
-extern "C"
-{
-class IInferenceEngineCommon* EngineCommonInit(void)
-{
-    LOGI("ENTER");
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
 
-    InferenceMLAPI *engine = new InferenceMLAPI();
+       extern "C"
+       {
+               class IInferenceEngineCommon *EngineCommonInit(void)
+               {
+                       LOGI("ENTER");
 
-    LOGI("LEAVE");
+                       InferenceMLAPI *engine = new InferenceMLAPI();
 
-    return engine;
-}
+                       LOGI("LEAVE");
 
-void EngineCommonDestroy(class IInferenceEngineCommon *engine)
-{
-    LOGI("ENTER");
+                       return engine;
+               }
+
+               void EngineCommonDestroy(class IInferenceEngineCommon *engine)
+               {
+                       LOGI("ENTER");
 
-    delete engine;
+                       delete engine;
 
-    LOGI("LEAVE");
-}
-}
+                       LOGI("LEAVE");
+               }
+       }
 } /* MLAPIImpl */
 } /* InferenceEngineImpl */
index a79ff91..91b1ab2 100644 (file)
 
 using namespace InferenceEngineInterface::Common;
 
-namespace InferenceEngineImpl {
-namespace MLAPIImpl {
-
-class InferenceMLAPI : public IInferenceEngineCommon {
-public:
-    InferenceMLAPI();
-    ~InferenceMLAPI();
-
-    int SetPrivateData(void *data) override;
-
-    int SetTargetDevices(int types) override;
-
-    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
-
-    int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
-
-    int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
-
-    int GetInputLayerProperty(inference_engine_layer_property &property) override;
-
-    int GetOutputLayerProperty(inference_engine_layer_property &property) override;
-
-    int SetInputLayerProperty(inference_engine_layer_property &property) override;
-
-    int SetOutputLayerProperty(inference_engine_layer_property &property) override;
-
-    int GetBackendCapacity(inference_engine_capacity *capacity) override;
-
-    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
-            std::vector<inference_engine_tensor_buffer> &output_buffers) override;
-
-private:
-       int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
-                                                       std::vector<inference_engine_tensor_buffer> &output_buffers);
-
-       int mPluginType;
-       int mTargetDevice;
-       ml_single_h mSingle;
-    std::vector<std::string> mDesignated_inputs;
-    std::vector<std::string> mDesignated_outputs;
-       inference_engine_layer_property mInputProperty;
-       inference_engine_layer_property mOutputProperty;
-       std::vector<inference_engine_tensor_buffer> mInputTensorBuffer;
-       std::vector<inference_engine_tensor_buffer> mOutputTensorBuffer;
-       std::vector<inference_engine_tensor_info> mInputTensorInfo;
-       std::vector<inference_engine_tensor_info> mOutputTensorInfo;
-};
+namespace InferenceEngineImpl
+{
+namespace MLAPIImpl
+{
+       class InferenceMLAPI : public IInferenceEngineCommon
+       {
+       public:
+               InferenceMLAPI();
+               ~InferenceMLAPI();
+
+               int SetPrivateData(void *data) override;
+
+               int SetTargetDevices(int types) override;
+
+               int Load(std::vector<std::string> model_paths,
+                                inference_model_format_e model_format) override;
+
+               int GetInputTensorBuffers(
+                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+
+               int GetOutputTensorBuffers(
+                               std::vector<inference_engine_tensor_buffer> &buffers) override;
+
+               int GetInputLayerProperty(
+                               inference_engine_layer_property &property) override;
+
+               int GetOutputLayerProperty(
+                               inference_engine_layer_property &property) override;
+
+               int SetInputLayerProperty(
+                               inference_engine_layer_property &property) override;
+
+               int SetOutputLayerProperty(
+                               inference_engine_layer_property &property) override;
+
+               int GetBackendCapacity(inference_engine_capacity *capacity) override;
+
+               int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+                               override;
+
+       private:
+               int CheckTensorBuffers(
+                               std::vector<inference_engine_tensor_buffer> &input_buffers,
+                               std::vector<inference_engine_tensor_buffer> &output_buffers);
+
+               int mPluginType;
+               int mTargetDevice;
+               ml_single_h mSingle;
+               std::vector<std::string> mDesignated_inputs;
+               std::vector<std::string> mDesignated_outputs;
+               inference_engine_layer_property mInputProperty;
+               inference_engine_layer_property mOutputProperty;
+               std::vector<inference_engine_tensor_buffer> mInputTensorBuffer;
+               std::vector<inference_engine_tensor_buffer> mOutputTensorBuffer;
+               std::vector<inference_engine_tensor_info> mInputTensorInfo;
+               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
+       };
 
 } /* InferenceEngineImpl */
 } /* MLAPIImpl */