2 * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __INFERENCE_ENGINE_IMPL_OPENCV_H__
18 #define __INFERENCE_ENGINE_IMPL_OPENCV_H__
20 #include <inference_engine_common.h>
24 #include <opencv2/dnn.hpp>
25 #include <opencv2/imgproc.hpp>
27 * @file inference_engine_opencv_private.h
28 * @brief This file contains the InferenceOpenCV class which
29 * provide OpenCV based inference functionality
36 #define LOG_TAG "INFERENCE_ENGINE_OPENCV"
38 using namespace InferenceEngineInterface::Common;
40 namespace InferenceEngineImpl
44 class InferenceOpenCV : public IInferenceEngineCommon
50 int SetPrivateData(void *data) override;
52 int SetTargetDevices(int types) override;
54 int SetCLTuner(const inference_engine_cltuner *cltuner) final;
56 int Load(std::vector<std::string> model_paths,
57 inference_model_format_e model_format) override;
59 int GetInputTensorBuffers(
60 std::vector<inference_engine_tensor_buffer> &buffers) override;
62 int GetOutputTensorBuffers(
63 std::vector<inference_engine_tensor_buffer> &buffers) override;
65 int GetInputLayerProperty(
66 inference_engine_layer_property &property) override;
68 int GetOutputLayerProperty(
69 inference_engine_layer_property &property) override;
71 int SetInputLayerProperty(
72 inference_engine_layer_property &property) override;
74 int SetOutputLayerProperty(
75 inference_engine_layer_property &property) override;
77 int GetBackendCapacity(inference_engine_capacity *capacity) override;
79 int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
80 std::vector<inference_engine_tensor_buffer> &output_buffers)
84 std::vector<cv::Mat> mInputData;
87 std::vector<inference_engine_tensor_info> mInputTensorInfo;
88 std::vector<inference_engine_tensor_info> mOutputTensorInfo;
89 std::vector<cv::Mat> mOutputBlobs;
90 cv::dnn::Net mNet; /**< Network associated with a network model */
92 std::vector<std::string> mInputLayers;
93 std::vector<std::string> mOutputLayers;
95 std::string mConfigFile;
96 std::string mWeightFile;
99 } /* InferenceEngineImpl */
102 #endif /* __INFERENCE_ENGINE_IMPL_OPENCV_H__ */