1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
10 #include <ie_common.h>
13 namespace InferenceEngine {
16 * @brief minimum API to be implemented by plugin, which is used in InferRequestBase forwarding mechanism
18 class IInferRequestInternal {
20 typedef std::shared_ptr<IInferRequestInternal> Ptr;
22 virtual ~IInferRequestInternal() = default;
25 * @brief Infers specified input(s) in synchronous mode
26 * @note blocks all method of IInferRequest while request is ongoing (running or waiting in queue)
28 virtual void Infer() = 0;
31 * @brief Queries performance measures per layer to get feedback of what is the most time consuming layer.
32 * Note: not all plugins may provide meaningful data
33 * @param perfMap - a map of layer names to profiling information for that layer.
35 virtual void GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const = 0;
38 * @brief Set input/output data to infer
39 * @note: Memory allocation doesn't happen
40 * @param name - a name of input or output blob.
41 * @param data - a reference to input or output blob. The type of Blob must correspond to the network input precision and size.
43 virtual void SetBlob(const char *name, const Blob::Ptr &data) = 0;
46 * @brief Get input/output data to infer
47 * @note: Memory allocation doesn't happen
48 * @param name - a name of input or output blob.
49 * @param data - a reference to input or output blob. The type of Blob must correspond to the network input precision and size.
51 virtual void GetBlob(const char *name, Blob::Ptr &data) = 0;
54 * @brief Sets new batch size when dynamic batching is enabled in executable network that created this request.
55 * @param batch - new batch size to be used by all the following inference calls for this request.
57 virtual void SetBatch(int batch) = 0;
60 } // namespace InferenceEngine