From: Inki Dae Date: Tue, 2 Jun 2020 09:36:54 +0000 (+0900) Subject: Change postfix of file name to "mlapi" X-Git-Tag: submit/tizen/20200626.050805~8 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8ee8ca35a1072d9397e5a43ed932795eab33265d;p=platform%2Fcore%2Fmultimedia%2Finference-engine-mlapi.git Change postfix of file name to "mlapi" Signed-off-by: Inki Dae --- diff --git a/CMakeLists.txt b/CMakeLists.txt index d7aab7a..010a06f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.6) -SET(fw_name "inference-engine-nnstreamer") +SET(fw_name "inference-engine-mlapi") PROJECT(${fw_name}) diff --git a/src/inference_engine_mlapi.cpp b/src/inference_engine_mlapi.cpp new file mode 100644 index 0000000..4a418ec --- /dev/null +++ b/src/inference_engine_mlapi.cpp @@ -0,0 +1,453 @@ +/** + * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "inference_engine_mlapi_private.h" + +#include +#include +#include +#include +#include + +namespace InferenceEngineImpl { +namespace MLAPIImpl { + +InferenceMLAPI::InferenceMLAPI(void) : + mPluginType(), + mTargetDevice(), + mSingle(), + mDesignated_inputs(), + mDesignated_outputs(), + mInputProperty(), + mOutputProperty(), + mInputTensorBuffer(), + mOutputTensorBuffer(), + mInputTensorInfo(), + mOutputTensorInfo() +{ + LOGI("ENTER"); + + LOGI("LEAVE"); +} + +InferenceMLAPI::~InferenceMLAPI() +{ + mDesignated_inputs.clear(); + std::vector().swap(mDesignated_inputs); + + mDesignated_outputs.clear(); + std::vector().swap(mDesignated_outputs); +} + +int InferenceMLAPI::SetPluginType(int type) +{ + LOGI("ENTER"); + + if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) { + LOGE("Invalid backend type."); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + + mPluginType = type; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetTargetDevices(int types) +{ + LOGI("ENTER"); + + + LOGI("Inference targets are, "); + if (types & INFERENCE_TARGET_CPU) { + mTargetDevice |= INFERENCE_TARGET_CPU; + LOGI("CPU"); + } + + if (types & INFERENCE_TARGET_GPU) { + mTargetDevice |= INFERENCE_TARGET_GPU; + LOGI("GPU"); + } + + if (types & INFERENCE_TARGET_CUSTOM) { + mTargetDevice |= INFERENCE_TARGET_CUSTOM; + LOGI("NPU"); + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::Load(std::vector model_paths, inference_model_format_e model_format) +{ + LOGI("ENTER"); + + // ML Single API of MLAPI requires model_paths rule like below, + // "so library file path,nb model file path" or vise versa. + std::string model_str(model_paths[0] + "," + model_paths[1]); + + LOGI("Model name = %s", model_str.c_str()); + + // TODO. Set NNFW backend type and HW type properly. + + ml_nnfw_type_e nnfw_type; + ml_nnfw_hw_e nnfw_hw; + + switch (mPluginType) { + case INFERENCE_BACKEND_MLAPI: + // For now, backend type is MLAPI and target device type is CUSTOM then + // we will use Vivante NPU. + // TODO. other NPU should be considered later. I.e., SRNPU. + if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) { + nnfw_type = ML_NNFW_TYPE_VIVANTE; + nnfw_hw = ML_NNFW_HW_ANY; + LOGI("Vivante tensor filter will be used."); + } else { + LOGE("Invalid target device type."); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + break; + case INFERENCE_BACKEND_NNFW: + nnfw_type = ML_NNFW_TYPE_NNFW; + if (mTargetDevice == INFERENCE_TARGET_CPU) { + nnfw_hw = ML_NNFW_HW_CPU_NEON; + LOGI("Target device is NEON."); + } else if (mTargetDevice == INFERENCE_TARGET_GPU) { + nnfw_hw = ML_NNFW_HW_GPU; + LOGI("Target device is GPU"); + } else { + LOGE("Invalid inference target device type."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + LOGI("NNFW tensor filter will be used."); + break; + // TODO. + default: + LOGE("Invalid plugin type."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_open(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetInputTensorBuffers(std::vector &buffers) +{ + LOGI("ENTER"); + + // TODO. Implement this function according to a given ML Single API backend properly. + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetOutputTensorBuffers(std::vector &buffers) +{ + LOGI("ENTER"); + + // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer + // So add a null tensor buffer object. This buffer will be updated at Run callback. + + // Caution. this tensor buffer will be checked by upper framework to verity if + // the tensor buffer object is valid or not so fill dummy data to the tensor buffer. + + // TODO. Consider multiple output tensors. + + inference_engine_tensor_buffer tensor_buf = { 0, }; + tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; + tensor_buf.buffer = (void *)1; + tensor_buf.size = 1; + tensor_buf.owner_is_backend = 1; + buffers.push_back(tensor_buf); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + + +int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + ml_tensors_info_h in_info = NULL; + + // TODO. Need to check if model file loading is done. + + int ret = ml_single_get_input_info(mSingle, &in_info); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_input_info(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + unsigned int cnt; + ret = ml_tensors_info_get_count(in_info, &cnt); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor count = %u", cnt); + + for (unsigned int i = 0; i < cnt; ++i) { + ml_tensor_type_e in_type; + unsigned int in_dim; + char *in_name = NULL; + size_t in_size; + + ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor type = %d", in_type); + + ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor dimension = %u", in_dim); + + ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor name = %s", in_name); + + ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor size = %u", in_size); + + // TODO. Compare tensor info from engine to one from a given property. + } + + property.layer_names = mInputProperty.layer_names; + + std::vector::iterator iter; + for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) { + inference_engine_tensor_info tensor_info = *iter; + property.tensor_infos.push_back(tensor_info); + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + property.layer_names = mOutputProperty.layer_names; + + inference_engine_tensor_info tensor_info; + + // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one. + + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; + tensor_info.shape = { 1, 1001 }; + tensor_info.size = 1001; + property.tensor_infos.push_back(tensor_info); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + std::vector::iterator iter; + for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("input layer name = %s", name.c_str()); + } + + mDesignated_inputs.clear(); + std::vector().swap(mDesignated_inputs); + + // TODO. Request input property information to a given ML Single API of nnstreamer backend, + // and set it instead of user-given one, + + mDesignated_inputs = property.layer_names; + mInputProperty = property; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + std::vector::iterator iter; + for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("output layer name = %s", name.c_str()); + } + + mDesignated_outputs.clear(); + std::vector().swap(mDesignated_outputs); + + // TODO. Request output property information to a given ML Single API of nnstreamer backend, + // and set it instead of user-given one, + + mDesignated_outputs = property.layer_names; + mOutputProperty = property; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity) +{ + LOGI("ENTER"); + + if (capacity == NULL) { + LOGE("Bad pointer."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend. + capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::CheckTensorBuffers(std::vector &input_buffers, + std::vector &output_buffers) +{ + LOGI("ENTER"); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::Run(std::vector &input_buffers, + std::vector &output_buffers) +{ + LOGI("ENTER"); + + // Make sure to check if tensor buffer count and binding info one are same. + int err = CheckTensorBuffers(input_buffers, output_buffers); + if (err != INFERENCE_ENGINE_ERROR_NONE) { + return err; + } + + ml_tensors_info_h in_info = NULL; + + err = ml_single_get_input_info(mSingle, &in_info); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_input_info(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + ml_tensors_data_h input_data = NULL; + err = ml_tensors_data_create(in_info, &input_data); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_create(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + unsigned int cnt; + err = ml_tensors_info_get_count(in_info, &cnt); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + for (unsigned int i = 0; i < cnt; ++i) { + LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size); + err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + } + + ml_tensors_data_h output_data = NULL; + err = ml_single_invoke(mSingle, input_data, &output_data); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_invoke(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + // TODO. Consider mutiple output tensors. + + err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("Output tensor = %u", output_buffers[0].size); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +extern "C" +{ +class IInferenceEngineCommon* EngineCommonInit(void) +{ + LOGI("ENTER"); + + InferenceMLAPI *engine = new InferenceMLAPI(); + + LOGI("LEAVE"); + + return engine; +} + +void EngineCommonDestroy(class IInferenceEngineCommon *engine) +{ + LOGI("ENTER"); + + delete engine; + + LOGI("LEAVE"); +} +} +} /* MLAPIImpl */ +} /* InferenceEngineImpl */ diff --git a/src/inference_engine_mlapi_private.h b/src/inference_engine_mlapi_private.h new file mode 100644 index 0000000..d695f43 --- /dev/null +++ b/src/inference_engine_mlapi_private.h @@ -0,0 +1,85 @@ +/** + * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ +#define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ + +#include +#include + +#include +#include + +#ifdef LOG_TAG +#undef LOG_TAG +#endif + +#define LOG_TAG "INFERENCE_ENGINE_MLAPI" + +using namespace InferenceEngineInterface::Common; + +namespace InferenceEngineImpl { +namespace MLAPIImpl { + +class InferenceMLAPI : public IInferenceEngineCommon { +public: + InferenceMLAPI(); + ~InferenceMLAPI(); + + int SetPluginType(int type) override; + + int SetTargetDevices(int types) override; + + int Load(std::vector model_paths, inference_model_format_e model_format) override; + + int GetInputTensorBuffers(std::vector &buffers) override; + + int GetOutputTensorBuffers(std::vector &buffers) override; + + int GetInputLayerProperty(inference_engine_layer_property &property) override; + + int GetOutputLayerProperty(inference_engine_layer_property &property) override; + + int SetInputLayerProperty(inference_engine_layer_property &property) override; + + int SetOutputLayerProperty(inference_engine_layer_property &property) override; + + int GetBackendCapacity(inference_engine_capacity *capacity) override; + + int Run(std::vector &input_buffers, + std::vector &output_buffers) override; + +private: + int CheckTensorBuffers(std::vector &input_buffers, + std::vector &output_buffers); + + int mPluginType; + int mTargetDevice; + ml_single_h mSingle; + std::vector mDesignated_inputs; + std::vector mDesignated_outputs; + inference_engine_layer_property mInputProperty; + inference_engine_layer_property mOutputProperty; + std::vector mInputTensorBuffer; + std::vector mOutputTensorBuffer; + std::vector mInputTensorInfo; + std::vector mOutputTensorInfo; +}; + +} /* InferenceEngineImpl */ +} /* MLAPIImpl */ + +#endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */ diff --git a/src/inference_engine_nnstreamer.cpp b/src/inference_engine_nnstreamer.cpp deleted file mode 100644 index ddfb784..0000000 --- a/src/inference_engine_nnstreamer.cpp +++ /dev/null @@ -1,453 +0,0 @@ -/** - * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include "inference_engine_nnstreamer_private.h" - -#include -#include -#include -#include -#include - -namespace InferenceEngineImpl { -namespace MLAPIImpl { - -InferenceMLAPI::InferenceMLAPI(void) : - mPluginType(), - mTargetDevice(), - mSingle(), - mDesignated_inputs(), - mDesignated_outputs(), - mInputProperty(), - mOutputProperty(), - mInputTensorBuffer(), - mOutputTensorBuffer(), - mInputTensorInfo(), - mOutputTensorInfo() -{ - LOGI("ENTER"); - - LOGI("LEAVE"); -} - -InferenceMLAPI::~InferenceMLAPI() -{ - mDesignated_inputs.clear(); - std::vector().swap(mDesignated_inputs); - - mDesignated_outputs.clear(); - std::vector().swap(mDesignated_outputs); -} - -int InferenceMLAPI::SetPluginType(int type) -{ - LOGI("ENTER"); - - if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) { - LOGE("Invalid backend type."); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; - } - - mPluginType = type; - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::SetTargetDevices(int types) -{ - LOGI("ENTER"); - - - LOGI("Inference targets are, "); - if (types & INFERENCE_TARGET_CPU) { - mTargetDevice |= INFERENCE_TARGET_CPU; - LOGI("CPU"); - } - - if (types & INFERENCE_TARGET_GPU) { - mTargetDevice |= INFERENCE_TARGET_GPU; - LOGI("GPU"); - } - - if (types & INFERENCE_TARGET_CUSTOM) { - mTargetDevice |= INFERENCE_TARGET_CUSTOM; - LOGI("NPU"); - } - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::Load(std::vector model_paths, inference_model_format_e model_format) -{ - LOGI("ENTER"); - - // ML Single API of MLAPI requires model_paths rule like below, - // "so library file path,nb model file path" or vise versa. - std::string model_str(model_paths[0] + "," + model_paths[1]); - - LOGI("Model name = %s", model_str.c_str()); - - // TODO. Set NNFW backend type and HW type properly. - - ml_nnfw_type_e nnfw_type; - ml_nnfw_hw_e nnfw_hw; - - switch (mPluginType) { - case INFERENCE_BACKEND_MLAPI: - // For now, backend type is MLAPI and target device type is CUSTOM then - // we will use Vivante NPU. - // TODO. other NPU should be considered later. I.e., SRNPU. - if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) { - nnfw_type = ML_NNFW_TYPE_VIVANTE; - nnfw_hw = ML_NNFW_HW_ANY; - LOGI("Vivante tensor filter will be used."); - } else { - LOGE("Invalid target device type."); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; - } - break; - case INFERENCE_BACKEND_NNFW: - nnfw_type = ML_NNFW_TYPE_NNFW; - if (mTargetDevice == INFERENCE_TARGET_CPU) { - nnfw_hw = ML_NNFW_HW_CPU_NEON; - LOGI("Target device is NEON."); - } else if (mTargetDevice == INFERENCE_TARGET_GPU) { - nnfw_hw = ML_NNFW_HW_GPU; - LOGI("Target device is GPU"); - } else { - LOGE("Invalid inference target device type."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } - LOGI("NNFW tensor filter will be used."); - break; - // TODO. - default: - LOGE("Invalid plugin type."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } - - int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_open(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::GetInputTensorBuffers(std::vector &buffers) -{ - LOGI("ENTER"); - - // TODO. Implement this function according to a given nnstreamer backend properly. - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::GetOutputTensorBuffers(std::vector &buffers) -{ - LOGI("ENTER"); - - // Output tensor buffers will be allocated by a backend plugin of nnstreamer - // So add a null tensor buffer object. This buffer will be updated at Run callback. - - // Caution. this tensor buffer will be checked by upper framework to verity if - // the tensor buffer object is valid or not so fill dummy data to the tensor buffer. - - // TODO. Consider multiple output tensors. - - inference_engine_tensor_buffer tensor_buf = { 0, }; - tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; - tensor_buf.buffer = (void *)1; - tensor_buf.size = 1; - tensor_buf.owner_is_backend = 1; - buffers.push_back(tensor_buf); - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - - -int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - ml_tensors_info_h in_info = NULL; - - // TODO. Need to check if model file loading is done. - - int ret = ml_single_get_input_info(mSingle, &in_info); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_get_input_info(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - unsigned int cnt; - ret = ml_tensors_info_get_count(in_info, &cnt); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor count = %u", cnt); - - for (unsigned int i = 0; i < cnt; ++i) { - ml_tensor_type_e in_type; - unsigned int in_dim; - char *in_name = NULL; - size_t in_size; - - ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor type = %d", in_type); - - ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor dimension = %u", in_dim); - - ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor name = %s", in_name); - - ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size); - if (ret != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("input tensor size = %u", in_size); - - // TODO. Compare tensor info from engine to one from a given property. - } - - property.layer_names = mInputProperty.layer_names; - - std::vector::iterator iter; - for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) { - inference_engine_tensor_info tensor_info = *iter; - property.tensor_infos.push_back(tensor_info); - } - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - property.layer_names = mOutputProperty.layer_names; - - inference_engine_tensor_info tensor_info; - - // TODO. Set tensor info from a given nnstreamer backend instead of fixed one. - - tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; - tensor_info.shape = { 1, 1001 }; - tensor_info.size = 1001; - property.tensor_infos.push_back(tensor_info); - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - std::vector::iterator iter; - for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("input layer name = %s", name.c_str()); - } - - mDesignated_inputs.clear(); - std::vector().swap(mDesignated_inputs); - - // TODO. Request input property information to a given nnstreamer backend, - // and set it instead of user-given one, - - mDesignated_inputs = property.layer_names; - mInputProperty = property; - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property) -{ - LOGI("ENTER"); - - std::vector::iterator iter; - for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { - std::string name = *iter; - LOGI("output layer name = %s", name.c_str()); - } - - mDesignated_outputs.clear(); - std::vector().swap(mDesignated_outputs); - - // TODO. Request output property information to a given nnstreamer backend, - // and set it instead of user-given one, - - mDesignated_outputs = property.layer_names; - mOutputProperty = property; - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity) -{ - LOGI("ENTER"); - - if (capacity == NULL) { - LOGE("Bad pointer."); - return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; - } - - // TODO. flag supported accel device types according to a given nnstreamer backend. - capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM; - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::CheckTensorBuffers(std::vector &input_buffers, - std::vector &output_buffers) -{ - LOGI("ENTER"); - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -int InferenceMLAPI::Run(std::vector &input_buffers, - std::vector &output_buffers) -{ - LOGI("ENTER"); - - // Make sure to check if tensor buffer count and binding info one are same. - int err = CheckTensorBuffers(input_buffers, output_buffers); - if (err != INFERENCE_ENGINE_ERROR_NONE) { - return err; - } - - ml_tensors_info_h in_info = NULL; - - err = ml_single_get_input_info(mSingle, &in_info); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_get_input_info(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - ml_tensors_data_h input_data = NULL; - err = ml_tensors_data_create(in_info, &input_data); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_create(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - unsigned int cnt; - err = ml_tensors_info_get_count(in_info, &cnt); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_info_get_count(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - for (unsigned int i = 0; i < cnt; ++i) { - LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size); - err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - } - - ml_tensors_data_h output_data = NULL; - err = ml_single_invoke(mSingle, input_data, &output_data); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_single_invoke(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - // TODO. Consider mutiple output tensors. - - err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size); - if (err != ML_ERROR_NONE) { - LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err); - return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; - } - - LOGI("Output tensor = %u", output_buffers[0].size); - - LOGI("LEAVE"); - - return INFERENCE_ENGINE_ERROR_NONE; -} - -extern "C" -{ -class IInferenceEngineCommon* EngineCommonInit(void) -{ - LOGI("ENTER"); - - InferenceMLAPI *engine = new InferenceMLAPI(); - - LOGI("LEAVE"); - - return engine; -} - -void EngineCommonDestroy(class IInferenceEngineCommon *engine) -{ - LOGI("ENTER"); - - delete engine; - - LOGI("LEAVE"); -} -} -} /* MLAPIImpl */ -} /* InferenceEngineImpl */ diff --git a/src/inference_engine_nnstreamer_private.h b/src/inference_engine_nnstreamer_private.h deleted file mode 100644 index d695f43..0000000 --- a/src/inference_engine_nnstreamer_private.h +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ -#define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ - -#include -#include - -#include -#include - -#ifdef LOG_TAG -#undef LOG_TAG -#endif - -#define LOG_TAG "INFERENCE_ENGINE_MLAPI" - -using namespace InferenceEngineInterface::Common; - -namespace InferenceEngineImpl { -namespace MLAPIImpl { - -class InferenceMLAPI : public IInferenceEngineCommon { -public: - InferenceMLAPI(); - ~InferenceMLAPI(); - - int SetPluginType(int type) override; - - int SetTargetDevices(int types) override; - - int Load(std::vector model_paths, inference_model_format_e model_format) override; - - int GetInputTensorBuffers(std::vector &buffers) override; - - int GetOutputTensorBuffers(std::vector &buffers) override; - - int GetInputLayerProperty(inference_engine_layer_property &property) override; - - int GetOutputLayerProperty(inference_engine_layer_property &property) override; - - int SetInputLayerProperty(inference_engine_layer_property &property) override; - - int SetOutputLayerProperty(inference_engine_layer_property &property) override; - - int GetBackendCapacity(inference_engine_capacity *capacity) override; - - int Run(std::vector &input_buffers, - std::vector &output_buffers) override; - -private: - int CheckTensorBuffers(std::vector &input_buffers, - std::vector &output_buffers); - - int mPluginType; - int mTargetDevice; - ml_single_h mSingle; - std::vector mDesignated_inputs; - std::vector mDesignated_outputs; - inference_engine_layer_property mInputProperty; - inference_engine_layer_property mOutputProperty; - std::vector mInputTensorBuffer; - std::vector mOutputTensorBuffer; - std::vector mInputTensorInfo; - std::vector mOutputTensorInfo; -}; - -} /* InferenceEngineImpl */ -} /* MLAPIImpl */ - -#endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */