1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
6 * @brief A header file for Main Inference Engine API
11 #include <ie_icnn_network.hpp>
12 #include <ie_iextension.h>
14 #include "details/ie_no_copy.hpp"
15 #include "ie_error.hpp"
16 #include "ie_version.hpp"
17 #include "ie_iexecutable_network.hpp"
26 #ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
27 #define INFERENCE_PLUGIN_API(type) extern "C" __declspec(dllexport) type
29 #define INFERENCE_PLUGIN_API(type) extern "C" type
32 #ifdef IMPLEMENT_INFERENCE_ENGINE_PLUGIN
33 #define INFERENCE_PLUGIN_API(type) extern "C" __attribute__((visibility("default"))) type
35 #define INFERENCE_PLUGIN_API(type) extern "C" type
38 #define INFERENCE_PLUGIN_API(TYPE) extern "C" TYPE
41 namespace InferenceEngine {
44 * @brief Responce structure encapsulating information about supported layer
46 struct QueryNetworkResult {
47 std::set<std::string> supportedLayers;
53 * @brief This class is a main plugin interface
55 class IInferencePlugin : public details::IRelease {
58 * @brief Returns plugin version information
59 * @param versionInfo Pointer to version info. Is set by plugin
61 virtual void GetVersion(const Version *&versionInfo) noexcept = 0;
64 * @brief Sets logging callback
65 * Logging is used to track what is going on inside
66 * @param listener Logging sink
68 virtual void SetLogCallback(IErrorListener &listener) noexcept = 0;
71 * @deprecated use LoadNetwork with four parameters (executable network, cnn network, config, response)
72 * @brief Loads a pre-built network with weights to the engine. In case of success the plugin will
74 * @param network Network object acquired from CNNNetReader
75 * @param resp Pointer to the response message that holds a description of an error if any occurred
76 * @return Status code of the operation. OK if succeeded
78 virtual StatusCode LoadNetwork(ICNNNetwork &network, ResponseDesc *resp) noexcept = 0;
81 * @brief Creates an executable network from a network object. User can create as many networks as they need and use
82 * them simultaneously (up to the limitation of the hardware resources)
83 * @param ret Reference to a shared ptr of the returned network interface
84 * @param network Network object acquired from CNNNetReader
85 * @param config Map of pairs: (config parameter name, config parameter value) relevant only for this load operation
86 * @param resp Pointer to the response message that holds a description of an error if any occurred
87 * @return Status code of the operation. OK if succeeded
90 LoadNetwork(IExecutableNetwork::Ptr &ret, ICNNNetwork &network, const std::map<std::string, std::string> &config,
91 ResponseDesc *resp) noexcept = 0;
94 * @brief Creates an executable network from a previously exported network
95 * @param ret Reference to a shared ptr of the returned network interface
96 * @param modelFileName Path to the location of the exported file
97 * @param config Map of pairs: (config parameter name, config parameter value) relevant only for this load operation*
98 * @param resp Pointer to the response message that holds a description of an error if any occurred
99 * @return Status code of the operation. OK if succeeded
102 ImportNetwork(IExecutableNetwork::Ptr &ret, const std::string &modelFileName,
103 const std::map<std::string, std::string> &config, ResponseDesc *resp) noexcept = 0;
106 * @deprecated Uses Infer() working with multiple inputs and outputs
107 * @brief Infers an image(s).
108 * Input and output dimensions depend on the topology.
109 * As an example for classification topologies use a 4D Blob as input (batch, channels, width,
110 * height) and get a 1D blob as output (scoring probability vector). To Infer a batch,
111 * use a 4D Blob as input and get a 2D blob as output in both cases the method will
112 * allocate the resulted blob
113 * @param input Any TBlob<> object that contains the data to infer. The type of TBlob must match the network input precision and size.
114 * @param result Related TBlob<> object that contains the result of the inference action, typically this is a float blob.
115 The blob does not need to be allocated or initialized, the engine allocates the relevant data.
116 * @param resp Pointer to the response message that holds a description of an error if any occurred
117 * @return Status code of the operation. OK if succeeded
119 virtual StatusCode Infer(const Blob &input, Blob &result, ResponseDesc *resp) noexcept = 0;
122 * @deprecated Loads IExecutableNetwork to create IInferRequest.
123 * @brief Infers tensors. Input and output dimensions depend on the topology.
124 * As an example for classification topologies use a 4D Blob as input (batch, channels, width,
125 * height) and get a 1D blob as output (scoring probability vector). To Infer a batch,
126 * use a 4D Blob as input and get a 2D blob as output in both cases the method will
127 * allocate the resulted blob
128 * @param input Map of input blobs accessed by input names
129 * @param result Map of output blobs accessed by output names
130 * @param resp Pointer to the response message that holds a description of an error if any occurred
131 * @return Status code of the operation. OK if succeeded
133 virtual StatusCode Infer(const BlobMap &input, BlobMap &result, ResponseDesc *resp) noexcept = 0;
136 * @deprecated Uses IInferRequest to get performance measures
137 * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer
138 * Note: not all plugins provide meaningful data
139 * @param perfMap Map of layer names to profiling information for that layer
140 * @param resp Pointer to the response message that holds a description of an error if any occurred
141 * @return Status code of the operation. OK if succeeded
143 virtual StatusCode GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap,
144 ResponseDesc *resp) const noexcept = 0;
147 * @brief Registers extension within the plugin
148 * @param extension Pointer to already loaded extension
149 * @param resp Pointer to the response message that holds a description of an error if any occurred
150 * @return Status code of the operation. OK if succeeded
152 virtual StatusCode AddExtension(InferenceEngine::IExtensionPtr extension,
153 InferenceEngine::ResponseDesc *resp) noexcept = 0;
156 * @brief Sets configuration for plugin, acceptable keys can be found in ie_plugin_config.hpp
157 * @param config Map of pairs: (config parameter name, config parameter value)
158 * @param resp Pointer to the response message that holds a description of an error if any occurred
160 virtual StatusCode SetConfig(const std::map<std::string, std::string> &config, ResponseDesc *resp) noexcept = 0;
164 * @depricated Use the version with config parameter
165 * @brief Query plugin if it supports specified network
166 * @param network Network object to query
167 * @param resp Pointer to the response message that holds a description of an error if any occurred
169 virtual void QueryNetwork(const ICNNNetwork& /*network*/, QueryNetworkResult& res) const noexcept {
170 res.rc = InferenceEngine::NOT_IMPLEMENTED;
174 * @brief Query plugin if it supports specified network with specified configuration
175 * @param network Network object to query
176 * @param config Map of pairs: (config parameter name, config parameter value)
177 * @param resp Pointer to the response message that holds a description of an error if any occurred
179 virtual void QueryNetwork(const ICNNNetwork& /*network*/,
180 const std::map<std::string, std::string> &/*config*/, QueryNetworkResult& res) const noexcept {
181 res.rc = InferenceEngine::NOT_IMPLEMENTED;
186 * @brief Creates the default instance of the interface (per plugin)
187 * @param plugin Pointer to the plugin
188 * @param resp Pointer to the response message that holds a description of an error if any occurred
189 * @return Status code of the operation. OK if succeeded
191 INFERENCE_PLUGIN_API(StatusCode) CreatePluginEngine(IInferencePlugin *&plugin, ResponseDesc *resp) noexcept;
192 } // namespace InferenceEngine