1 // Copyright (C) 2018-2019 Intel Corporation
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
17 #include <ie_extension.h>
32 #include "inference_engine.hpp"
34 typedef std::chrono::high_resolution_clock Time;
35 typedef std::chrono::nanoseconds ns;
37 namespace InferenceEnginePython {
39 InferenceEngine::CNNLayerPtr layer_ptr;
40 InferenceEngine::CNNNetwork network_ptr;
43 std::string precision;
46 std::vector<std::string> children;
47 std::vector<std::string> parents;
49 std::map<std::string, std::string> params;
51 void setAffinity(const std::string &target_affinity);
53 void setParams(const std::map<std::string, std::string> ¶ms_map);
55 std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
57 void setPrecision(std::string precision);
61 InferenceEngine::InputInfo actual;
62 std::vector<size_t> dims;
63 std::string precision;
66 void setPrecision(std::string precision);
68 void setLayout(std::string layout);
72 InferenceEngine::DataPtr actual;
73 std::vector<size_t> dims;
74 std::string precision;
77 void setPrecision(std::string precision);
82 std::string exec_type;
83 std::string layer_type;
86 unsigned execution_index;
90 InferenceEngine::CNNNetwork actual;
92 std::size_t batch_size;
94 void setBatch(const size_t size);
96 void addOutputs(const std::vector<std::string> &out_layers, const std::string &precision);
98 const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> getLayers();
100 const std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
102 const std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
104 void reshape(const std::map<std::string, std::vector<size_t>> &input_shapes);
106 void serialize(const std::string &path_to_xml, const std::string &path_to_bin);
108 void setStats(const std::map<std::string, std::map<std::string, std::vector<float>>> &stats);
110 const std::map<std::string, std::map<std::string, std::vector<float>>> getStats();
112 IENetwork(const std::string &model, const std::string &weights);
114 IENetwork() = default;
117 struct InferRequestWrap {
118 InferenceEngine::IInferRequest::Ptr request_ptr;
119 Time::time_point start_time;
125 int wait(int64_t timeout);
127 void getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr);
129 void setBatch(int size);
131 std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
135 struct IEExecNetwork {
136 InferenceEngine::IExecutableNetwork::Ptr actual;
137 std::vector<InferRequestWrap> infer_requests;
140 IEExecNetwork(const std::string &name, size_t num_requests);
147 std::unique_ptr<InferenceEnginePython::IEExecNetwork> load(const InferenceEnginePython::IENetwork &net,
149 const std::map<std::string, std::string> &config);
151 std::string device_name;
154 void setConfig(const std::map<std::string, std::string> &);
156 void addCpuExtension(const std::string &extension_path);
158 void setInitialAffinity(const InferenceEnginePython::IENetwork &net);
160 IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs);
162 IEPlugin() = default;
164 std::set<std::string> queryNetwork(const InferenceEnginePython::IENetwork &net);
166 InferenceEngine::InferenceEnginePluginPtr actual;
170 T *get_buffer(InferenceEngine::Blob &blob) {
171 return blob.buffer().as<T *>();
174 template<class T, class... Args>
175 std::unique_ptr<T> make_unique(Args &&... args) {
176 return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
179 std::string get_version();
180 }; // namespace InferenceEnginePython