Add GetFileCustomProp
[platform/core/multimedia/inference-engine-mlapi.git] / src / inference_engine_mlapi_private.h
1 /**
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
18 #define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__
19
20 #include <inference_engine_common.h>
21 #include <nnstreamer-single.h>
22
23 #include <memory>
24 #include <dlog.h>
25 #include <tuple>
26
27 #ifdef LOG_TAG
28 #undef LOG_TAG
29 #endif
30
31 #define LOG_TAG "INFERENCE_ENGINE_MLAPI"
32
33 using namespace InferenceEngineInterface::Common;
34
35 namespace InferenceEngineImpl
36 {
37 namespace MLAPIImpl
38 {
39         class InferenceMLAPI : public IInferenceEngineCommon
40         {
41         public:
42                 InferenceMLAPI();
43                 ~InferenceMLAPI();
44
45                 int SetPrivateData(void *data) override;
46
47                 int SetTargetDevices(int types) override;
48
49                 int SetCLTuner(const inference_engine_cltuner *cltuner) final;
50
51                 int Load(std::vector<std::string> model_paths,
52                                  inference_model_format_e model_format) override;
53
54                 int GetInputTensorBuffers(
55                                 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
56
57                 int GetOutputTensorBuffers(
58                                 std::map<std::string, inference_engine_tensor_buffer> &buffers) override;
59
60                 int GetInputLayerProperty(
61                                 inference_engine_layer_property &property) override;
62
63                 int GetOutputLayerProperty(
64                                 inference_engine_layer_property &property) override;
65
66                 int SetInputLayerProperty(
67                                 inference_engine_layer_property &property) override;
68
69                 int SetOutputLayerProperty(
70                                 inference_engine_layer_property &property) override;
71
72                 int GetBackendCapacity(inference_engine_capacity *capacity) override;
73
74                 int Run(std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
75                                 std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
76                                 override;
77
78         private:
79                 int CheckTensorBuffers(
80                                 std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
81                                 std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
82                 int SetTensorInfo(ml_tensors_info_h& tensor_info,
83                                                   inference_engine_layer_property& layer_property);
84                 int ConvertTensorTypeToInternal(int tensor_type);
85                 int ConvertTensorTypeToMLAPI(int tensor_type);
86                 int UpdateTensorsInfo();
87                 int CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
88                                                                   inference_engine_layer_property& layer_property);
89
90                 bool IsFileReadable(const std::string& path);
91                 std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
92                 std::string GetModelPath(const std::vector<std::string>& model_paths);
93                 std::string GetCustomProp();
94                 std::string GetFileCustomProp(std::string& path);
95                 int GetTensorInfo(std::map<std::string, int>& designated_layers,
96                                                   std::map<std::string, inference_engine_tensor_buffer> &buffers,
97                                                   ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle);
98
99                 int mPluginType;
100                 int mTargetDevice;
101                 ml_single_h mSingle;
102                 ml_tensors_info_h mInputInfoHandle;
103                 ml_tensors_info_h mOutputInfoHandle;
104                 ml_tensors_data_h mInputDataHandle;
105                 ml_tensors_data_h mOutputDataHandle;
106                 std::map<std::string, int> mDesignated_inputs;
107                 std::map<std::string, int> mDesignated_outputs;
108                 inference_engine_layer_property mInputProperty;
109                 inference_engine_layer_property mOutputProperty;
110         };
111
112 } /* InferenceEngineImpl */
113 } /* MLAPIImpl */
114
115 #endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */