2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
18 #define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
20 #include <inference_engine_common.h>
21 #include <nnstreamer-single.h>
31 #define LOG_TAG "INFERENCE_ENGINE_MLAPI"
33 using namespace InferenceEngineInterface::Common;
35 namespace InferenceEngineImpl
39 class InferenceMLAPI : public IInferenceEngineCommon
45 int SetPrivateData(void *data) override;
47 int SetTargetDevices(int types) override;
49 int SetCLTuner(const inference_engine_cltuner *cltuner) final;
51 int Load(std::vector<std::string> model_paths,
52 inference_model_format_e model_format) override;
54 int GetInputTensorBuffers(
55 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
57 int GetOutputTensorBuffers(
58 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
60 int GetInputLayerProperty(
61 inference_engine_layer_property &property) override;
63 int GetOutputLayerProperty(
64 inference_engine_layer_property &property) override;
66 int SetInputLayerProperty(
67 inference_engine_layer_property &property) override;
69 int SetOutputLayerProperty(
70 inference_engine_layer_property &property) override;
72 int GetBackendCapacity(inference_engine_capacity *capacity) override;
74 int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
75 std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
79 int CheckTensorBuffers(
80 std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
81 std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
82 int SetTensorInfo(ml_tensors_info_h& tensor_info,
83 inference_engine_layer_property& layer_property);
84 int ConvertTensorTypeToInternal(int tensor_type);
85 int ConvertTensorTypeToMLAPI(int tensor_type);
86 int UpdateTensorsInfo();
87 int CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
88 inference_engine_layer_property& layer_property);
90 bool IsFileReadable(const std::string& path);
91 std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
92 std::string GetModelPath(const std::vector<std::string>& model_paths);
93 std::string GetCustomProp();
94 std::string GetFileCustomProp(std::string& path);
95 int GetTensorInfo(std::map<std::string, int>& designated_layers,
96 std::map<std::string, inference_engine_tensor_buffer> &buffers,
97 ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle);
102 ml_tensors_info_h mInputInfoHandle;
103 ml_tensors_info_h mOutputInfoHandle;
104 ml_tensors_data_h mInputDataHandle;
105 ml_tensors_data_h mOutputDataHandle;
106 std::map<std::string, int> mDesignated_inputs;
107 std::map<std::string, int> mDesignated_outputs;
108 inference_engine_layer_property mInputProperty;
109 inference_engine_layer_property mOutputProperty;
112 } /* InferenceEngineImpl */
115 #endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */