CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
-SET(fw_name "inference-engine-nnstreamer")
+SET(fw_name "inference-engine-mlapi")
PROJECT(${fw_name})
--- /dev/null
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inference_engine_error.h>
+#include "inference_engine_mlapi_private.h"
+
+#include <fstream>
+#include <iostream>
+#include <unistd.h>
+#include <time.h>
+#include <queue>
+
+namespace InferenceEngineImpl {
+namespace MLAPIImpl {
+
+InferenceMLAPI::InferenceMLAPI(void) :
+ mPluginType(),
+ mTargetDevice(),
+ mSingle(),
+ mDesignated_inputs(),
+ mDesignated_outputs(),
+ mInputProperty(),
+ mOutputProperty(),
+ mInputTensorBuffer(),
+ mOutputTensorBuffer(),
+ mInputTensorInfo(),
+ mOutputTensorInfo()
+{
+ LOGI("ENTER");
+
+ LOGI("LEAVE");
+}
+
+InferenceMLAPI::~InferenceMLAPI()
+{
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+}
+
+int InferenceMLAPI::SetPluginType(int type)
+{
+ LOGI("ENTER");
+
+ if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) {
+ LOGE("Invalid backend type.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+
+ mPluginType = type;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::SetTargetDevices(int types)
+{
+ LOGI("ENTER");
+
+
+ LOGI("Inference targets are, ");
+ if (types & INFERENCE_TARGET_CPU) {
+ mTargetDevice |= INFERENCE_TARGET_CPU;
+ LOGI("CPU");
+ }
+
+ if (types & INFERENCE_TARGET_GPU) {
+ mTargetDevice |= INFERENCE_TARGET_GPU;
+ LOGI("GPU");
+ }
+
+ if (types & INFERENCE_TARGET_CUSTOM) {
+ mTargetDevice |= INFERENCE_TARGET_CUSTOM;
+ LOGI("NPU");
+ }
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
+{
+ LOGI("ENTER");
+
+ // ML Single API of MLAPI requires model_paths rule like below,
+ // "so library file path,nb model file path" or vise versa.
+ std::string model_str(model_paths[0] + "," + model_paths[1]);
+
+ LOGI("Model name = %s", model_str.c_str());
+
+ // TODO. Set NNFW backend type and HW type properly.
+
+ ml_nnfw_type_e nnfw_type;
+ ml_nnfw_hw_e nnfw_hw;
+
+ switch (mPluginType) {
+ case INFERENCE_BACKEND_MLAPI:
+ // For now, backend type is MLAPI and target device type is CUSTOM then
+ // we will use Vivante NPU.
+ // TODO. other NPU should be considered later. I.e., SRNPU.
+ if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) {
+ nnfw_type = ML_NNFW_TYPE_VIVANTE;
+ nnfw_hw = ML_NNFW_HW_ANY;
+ LOGI("Vivante tensor filter will be used.");
+ } else {
+ LOGE("Invalid target device type.");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+ break;
+ case INFERENCE_BACKEND_NNFW:
+ nnfw_type = ML_NNFW_TYPE_NNFW;
+ if (mTargetDevice == INFERENCE_TARGET_CPU) {
+ nnfw_hw = ML_NNFW_HW_CPU_NEON;
+ LOGI("Target device is NEON.");
+ } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+ nnfw_hw = ML_NNFW_HW_GPU;
+ LOGI("Target device is GPU");
+ } else {
+ LOGE("Invalid inference target device type.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+ LOGI("NNFW tensor filter will be used.");
+ break;
+ // TODO.
+ default:
+ LOGE("Invalid plugin type.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_open(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+ LOGI("ENTER");
+
+ // TODO. Implement this function according to a given ML Single API backend properly.
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+ LOGI("ENTER");
+
+ // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
+ // So add a null tensor buffer object. This buffer will be updated at Run callback.
+
+ // Caution. this tensor buffer will be checked by upper framework to verity if
+ // the tensor buffer object is valid or not so fill dummy data to the tensor buffer.
+
+ // TODO. Consider multiple output tensors.
+
+ inference_engine_tensor_buffer tensor_buf = { 0, };
+ tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
+ tensor_buf.buffer = (void *)1;
+ tensor_buf.size = 1;
+ tensor_buf.owner_is_backend = 1;
+ buffers.push_back(tensor_buf);
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+
+int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ ml_tensors_info_h in_info = NULL;
+
+ // TODO. Need to check if model file loading is done.
+
+ int ret = ml_single_get_input_info(mSingle, &in_info);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_input_info(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ unsigned int cnt;
+ ret = ml_tensors_info_get_count(in_info, &cnt);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor count = %u", cnt);
+
+ for (unsigned int i = 0; i < cnt; ++i) {
+ ml_tensor_type_e in_type;
+ unsigned int in_dim;
+ char *in_name = NULL;
+ size_t in_size;
+
+ ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor type = %d", in_type);
+
+ ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor dimension = %u", in_dim);
+
+ ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor name = %s", in_name);
+
+ ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor size = %u", in_size);
+
+ // TODO. Compare tensor info from engine to one from a given property.
+ }
+
+ property.layer_names = mInputProperty.layer_names;
+
+ std::vector<inference_engine_tensor_info>::iterator iter;
+ for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) {
+ inference_engine_tensor_info tensor_info = *iter;
+ property.tensor_infos.push_back(tensor_info);
+ }
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ property.layer_names = mOutputProperty.layer_names;
+
+ inference_engine_tensor_info tensor_info;
+
+ // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
+
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
+ tensor_info.shape = { 1, 1001 };
+ tensor_info.size = 1001;
+ property.tensor_infos.push_back(tensor_info);
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("input layer name = %s", name.c_str());
+ }
+
+ mDesignated_inputs.clear();
+ std::vector<std::string>().swap(mDesignated_inputs);
+
+ // TODO. Request input property information to a given ML Single API of nnstreamer backend,
+ // and set it instead of user-given one,
+
+ mDesignated_inputs = property.layer_names;
+ mInputProperty = property;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property)
+{
+ LOGI("ENTER");
+
+ std::vector<std::string>::iterator iter;
+ for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+ std::string name = *iter;
+ LOGI("output layer name = %s", name.c_str());
+ }
+
+ mDesignated_outputs.clear();
+ std::vector<std::string>().swap(mDesignated_outputs);
+
+ // TODO. Request output property information to a given ML Single API of nnstreamer backend,
+ // and set it instead of user-given one,
+
+ mDesignated_outputs = property.layer_names;
+ mOutputProperty = property;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity)
+{
+ LOGI("ENTER");
+
+ if (capacity == NULL) {
+ LOGE("Bad pointer.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
+ capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+ LOGI("ENTER");
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceMLAPI::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+ LOGI("ENTER");
+
+ // Make sure to check if tensor buffer count and binding info one are same.
+ int err = CheckTensorBuffers(input_buffers, output_buffers);
+ if (err != INFERENCE_ENGINE_ERROR_NONE) {
+ return err;
+ }
+
+ ml_tensors_info_h in_info = NULL;
+
+ err = ml_single_get_input_info(mSingle, &in_info);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_input_info(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ ml_tensors_data_h input_data = NULL;
+ err = ml_tensors_data_create(in_info, &input_data);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_create(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ unsigned int cnt;
+ err = ml_tensors_info_get_count(in_info, &cnt);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ for (unsigned int i = 0; i < cnt; ++i) {
+ LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size);
+ err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+ }
+
+ ml_tensors_data_h output_data = NULL;
+ err = ml_single_invoke(mSingle, input_data, &output_data);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_invoke(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ // TODO. Consider mutiple output tensors.
+
+ err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("Output tensor = %u", output_buffers[0].size);
+
+ LOGI("LEAVE");
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+extern "C"
+{
+class IInferenceEngineCommon* EngineCommonInit(void)
+{
+ LOGI("ENTER");
+
+ InferenceMLAPI *engine = new InferenceMLAPI();
+
+ LOGI("LEAVE");
+
+ return engine;
+}
+
+void EngineCommonDestroy(class IInferenceEngineCommon *engine)
+{
+ LOGI("ENTER");
+
+ delete engine;
+
+ LOGI("LEAVE");
+}
+}
+} /* MLAPIImpl */
+} /* InferenceEngineImpl */
--- /dev/null
+/**
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
+#define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
+
+#include <inference_engine_common.h>
+#include <nnstreamer-single.h>
+
+#include <memory>
+#include <dlog.h>
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+
+#define LOG_TAG "INFERENCE_ENGINE_MLAPI"
+
+using namespace InferenceEngineInterface::Common;
+
+namespace InferenceEngineImpl {
+namespace MLAPIImpl {
+
+class InferenceMLAPI : public IInferenceEngineCommon {
+public:
+ InferenceMLAPI();
+ ~InferenceMLAPI();
+
+ int SetPluginType(int type) override;
+
+ int SetTargetDevices(int types) override;
+
+ int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
+
+ int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+
+ int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
+
+ int GetInputLayerProperty(inference_engine_layer_property &property) override;
+
+ int GetOutputLayerProperty(inference_engine_layer_property &property) override;
+
+ int SetInputLayerProperty(inference_engine_layer_property &property) override;
+
+ int SetOutputLayerProperty(inference_engine_layer_property &property) override;
+
+ int GetBackendCapacity(inference_engine_capacity *capacity) override;
+
+ int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers) override;
+
+private:
+ int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers);
+
+ int mPluginType;
+ int mTargetDevice;
+ ml_single_h mSingle;
+ std::vector<std::string> mDesignated_inputs;
+ std::vector<std::string> mDesignated_outputs;
+ inference_engine_layer_property mInputProperty;
+ inference_engine_layer_property mOutputProperty;
+ std::vector<inference_engine_tensor_buffer> mInputTensorBuffer;
+ std::vector<inference_engine_tensor_buffer> mOutputTensorBuffer;
+ std::vector<inference_engine_tensor_info> mInputTensorInfo;
+ std::vector<inference_engine_tensor_info> mOutputTensorInfo;
+};
+
+} /* InferenceEngineImpl */
+} /* MLAPIImpl */
+
+#endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */
+++ /dev/null
-/**
- * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <inference_engine_error.h>
-#include "inference_engine_nnstreamer_private.h"
-
-#include <fstream>
-#include <iostream>
-#include <unistd.h>
-#include <time.h>
-#include <queue>
-
-namespace InferenceEngineImpl {
-namespace MLAPIImpl {
-
-InferenceMLAPI::InferenceMLAPI(void) :
- mPluginType(),
- mTargetDevice(),
- mSingle(),
- mDesignated_inputs(),
- mDesignated_outputs(),
- mInputProperty(),
- mOutputProperty(),
- mInputTensorBuffer(),
- mOutputTensorBuffer(),
- mInputTensorInfo(),
- mOutputTensorInfo()
-{
- LOGI("ENTER");
-
- LOGI("LEAVE");
-}
-
-InferenceMLAPI::~InferenceMLAPI()
-{
- mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
-
- mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
-}
-
-int InferenceMLAPI::SetPluginType(int type)
-{
- LOGI("ENTER");
-
- if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_MLAPI != type) {
- LOGE("Invalid backend type.");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
- }
-
- mPluginType = type;
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::SetTargetDevices(int types)
-{
- LOGI("ENTER");
-
-
- LOGI("Inference targets are, ");
- if (types & INFERENCE_TARGET_CPU) {
- mTargetDevice |= INFERENCE_TARGET_CPU;
- LOGI("CPU");
- }
-
- if (types & INFERENCE_TARGET_GPU) {
- mTargetDevice |= INFERENCE_TARGET_GPU;
- LOGI("GPU");
- }
-
- if (types & INFERENCE_TARGET_CUSTOM) {
- mTargetDevice |= INFERENCE_TARGET_CUSTOM;
- LOGI("NPU");
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
-{
- LOGI("ENTER");
-
- // ML Single API of MLAPI requires model_paths rule like below,
- // "so library file path,nb model file path" or vise versa.
- std::string model_str(model_paths[0] + "," + model_paths[1]);
-
- LOGI("Model name = %s", model_str.c_str());
-
- // TODO. Set NNFW backend type and HW type properly.
-
- ml_nnfw_type_e nnfw_type;
- ml_nnfw_hw_e nnfw_hw;
-
- switch (mPluginType) {
- case INFERENCE_BACKEND_MLAPI:
- // For now, backend type is MLAPI and target device type is CUSTOM then
- // we will use Vivante NPU.
- // TODO. other NPU should be considered later. I.e., SRNPU.
- if ((mTargetDevice & INFERENCE_TARGET_CUSTOM) == INFERENCE_TARGET_CUSTOM) {
- nnfw_type = ML_NNFW_TYPE_VIVANTE;
- nnfw_hw = ML_NNFW_HW_ANY;
- LOGI("Vivante tensor filter will be used.");
- } else {
- LOGE("Invalid target device type.");
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
- }
- break;
- case INFERENCE_BACKEND_NNFW:
- nnfw_type = ML_NNFW_TYPE_NNFW;
- if (mTargetDevice == INFERENCE_TARGET_CPU) {
- nnfw_hw = ML_NNFW_HW_CPU_NEON;
- LOGI("Target device is NEON.");
- } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
- nnfw_hw = ML_NNFW_HW_GPU;
- LOGI("Target device is GPU");
- } else {
- LOGE("Invalid inference target device type.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
- LOGI("NNFW tensor filter will be used.");
- break;
- // TODO.
- default:
- LOGE("Invalid plugin type.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
-
- int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_open(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- LOGI("ENTER");
-
- // TODO. Implement this function according to a given nnstreamer backend properly.
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
-{
- LOGI("ENTER");
-
- // Output tensor buffers will be allocated by a backend plugin of nnstreamer
- // So add a null tensor buffer object. This buffer will be updated at Run callback.
-
- // Caution. this tensor buffer will be checked by upper framework to verity if
- // the tensor buffer object is valid or not so fill dummy data to the tensor buffer.
-
- // TODO. Consider multiple output tensors.
-
- inference_engine_tensor_buffer tensor_buf = { 0, };
- tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
- tensor_buf.buffer = (void *)1;
- tensor_buf.size = 1;
- tensor_buf.owner_is_backend = 1;
- buffers.push_back(tensor_buf);
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-
-int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- ml_tensors_info_h in_info = NULL;
-
- // TODO. Need to check if model file loading is done.
-
- int ret = ml_single_get_input_info(mSingle, &in_info);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_get_input_info(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- unsigned int cnt;
- ret = ml_tensors_info_get_count(in_info, &cnt);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor count = %u", cnt);
-
- for (unsigned int i = 0; i < cnt; ++i) {
- ml_tensor_type_e in_type;
- unsigned int in_dim;
- char *in_name = NULL;
- size_t in_size;
-
- ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor type = %d", in_type);
-
- ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor dimension = %u", in_dim);
-
- ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor name = %s", in_name);
-
- ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor size = %u", in_size);
-
- // TODO. Compare tensor info from engine to one from a given property.
- }
-
- property.layer_names = mInputProperty.layer_names;
-
- std::vector<inference_engine_tensor_info>::iterator iter;
- for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) {
- inference_engine_tensor_info tensor_info = *iter;
- property.tensor_infos.push_back(tensor_info);
- }
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- property.layer_names = mOutputProperty.layer_names;
-
- inference_engine_tensor_info tensor_info;
-
- // TODO. Set tensor info from a given nnstreamer backend instead of fixed one.
-
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
- tensor_info.shape = { 1, 1001 };
- tensor_info.size = 1001;
- property.tensor_infos.push_back(tensor_info);
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
- }
-
- mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
-
- // TODO. Request input property information to a given nnstreamer backend,
- // and set it instead of user-given one,
-
- mDesignated_inputs = property.layer_names;
- mInputProperty = property;
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property)
-{
- LOGI("ENTER");
-
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
- }
-
- mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
-
- // TODO. Request output property information to a given nnstreamer backend,
- // and set it instead of user-given one,
-
- mDesignated_outputs = property.layer_names;
- mOutputProperty = property;
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity)
-{
- LOGI("ENTER");
-
- if (capacity == NULL) {
- LOGE("Bad pointer.");
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
- }
-
- // TODO. flag supported accel device types according to a given nnstreamer backend.
- capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- LOGI("ENTER");
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceMLAPI::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
-{
- LOGI("ENTER");
-
- // Make sure to check if tensor buffer count and binding info one are same.
- int err = CheckTensorBuffers(input_buffers, output_buffers);
- if (err != INFERENCE_ENGINE_ERROR_NONE) {
- return err;
- }
-
- ml_tensors_info_h in_info = NULL;
-
- err = ml_single_get_input_info(mSingle, &in_info);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_get_input_info(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- ml_tensors_data_h input_data = NULL;
- err = ml_tensors_data_create(in_info, &input_data);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_data_create(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- unsigned int cnt;
- err = ml_tensors_info_get_count(in_info, &cnt);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- for (unsigned int i = 0; i < cnt; ++i) {
- LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size);
- err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
- }
-
- ml_tensors_data_h output_data = NULL;
- err = ml_single_invoke(mSingle, input_data, &output_data);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_invoke(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- // TODO. Consider mutiple output tensors.
-
- err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("Output tensor = %u", output_buffers[0].size);
-
- LOGI("LEAVE");
-
- return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-extern "C"
-{
-class IInferenceEngineCommon* EngineCommonInit(void)
-{
- LOGI("ENTER");
-
- InferenceMLAPI *engine = new InferenceMLAPI();
-
- LOGI("LEAVE");
-
- return engine;
-}
-
-void EngineCommonDestroy(class IInferenceEngineCommon *engine)
-{
- LOGI("ENTER");
-
- delete engine;
-
- LOGI("LEAVE");
-}
-}
-} /* MLAPIImpl */
-} /* InferenceEngineImpl */
+++ /dev/null
-/**
- * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
-#define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
-
-#include <inference_engine_common.h>
-#include <nnstreamer-single.h>
-
-#include <memory>
-#include <dlog.h>
-
-#ifdef LOG_TAG
-#undef LOG_TAG
-#endif
-
-#define LOG_TAG "INFERENCE_ENGINE_MLAPI"
-
-using namespace InferenceEngineInterface::Common;
-
-namespace InferenceEngineImpl {
-namespace MLAPIImpl {
-
-class InferenceMLAPI : public IInferenceEngineCommon {
-public:
- InferenceMLAPI();
- ~InferenceMLAPI();
-
- int SetPluginType(int type) override;
-
- int SetTargetDevices(int types) override;
-
- int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
-
- int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
-
- int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
-
- int GetInputLayerProperty(inference_engine_layer_property &property) override;
-
- int GetOutputLayerProperty(inference_engine_layer_property &property) override;
-
- int SetInputLayerProperty(inference_engine_layer_property &property) override;
-
- int SetOutputLayerProperty(inference_engine_layer_property &property) override;
-
- int GetBackendCapacity(inference_engine_capacity *capacity) override;
-
- int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers) override;
-
-private:
- int CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers);
-
- int mPluginType;
- int mTargetDevice;
- ml_single_h mSingle;
- std::vector<std::string> mDesignated_inputs;
- std::vector<std::string> mDesignated_outputs;
- inference_engine_layer_property mInputProperty;
- inference_engine_layer_property mOutputProperty;
- std::vector<inference_engine_tensor_buffer> mInputTensorBuffer;
- std::vector<inference_engine_tensor_buffer> mOutputTensorBuffer;
- std::vector<inference_engine_tensor_info> mInputTensorInfo;
- std::vector<inference_engine_tensor_info> mOutputTensorInfo;
-};
-
-} /* InferenceEngineImpl */
-} /* MLAPIImpl */
-
-#endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */