2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
18 #define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
20 #include <inference_engine_common.h>
21 #include <nnstreamer-single.h>
30 #define LOG_TAG "INFERENCE_ENGINE_MLAPI"
32 using namespace InferenceEngineInterface::Common;
34 namespace InferenceEngineImpl
38 class InferenceMLAPI : public IInferenceEngineCommon
44 int SetPrivateData(void *data) override;
46 int SetTargetDevices(int types) override;
48 int SetCLTuner(const inference_engine_cltuner *cltuner) final;
50 int Load(std::vector<std::string> model_paths,
51 inference_model_format_e model_format) override;
53 int GetInputTensorBuffers(
54 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
56 int GetOutputTensorBuffers(
57 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
59 int GetInputLayerProperty(
60 inference_engine_layer_property &property) override;
62 int GetOutputLayerProperty(
63 inference_engine_layer_property &property) override;
65 int SetInputLayerProperty(
66 inference_engine_layer_property &property) override;
68 int SetOutputLayerProperty(
69 inference_engine_layer_property &property) override;
71 int GetBackendCapacity(inference_engine_capacity *capacity) override;
73 int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
74 std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
78 int CheckTensorBuffers(
79 std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
80 std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
81 int ConvertTensorType(int tensor_type);
82 int UpdateTensorsInfo();
87 ml_tensors_info_h mInputInfoHandle;
88 ml_tensors_info_h mOutputInfoHandle;
89 ml_tensors_data_h mInputDataHandle;
90 ml_tensors_data_h mOutputDataHandle;
91 std::map<std::string, int> mDesignated_inputs;
92 std::map<std::string, int> mDesignated_outputs;
93 inference_engine_layer_property mInputProperty;
94 inference_engine_layer_property mOutputProperty;
97 } /* InferenceEngineImpl */
100 #endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */