Add a section of how to link IE with CMake project (#99)
[platform/upstream/dldt.git] / inference-engine / ie_bridges / python / src / openvino / inference_engine / dnn_builder / dnn_builder_impl.cpp
1 // Copyright (c) 2018 Intel Corporation
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //        http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "dnn_builder_impl.hpp"
16
17 // using namespace InferenceEnginePython;
18 // using namespace std;
19
20 std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
21                                                                    {"FP16", InferenceEngine::Precision::FP16},
22                                                                    {"Q78",  InferenceEngine::Precision::Q78},
23                                                                    {"I32",  InferenceEngine::Precision::I32},
24                                                                    {"I16",  InferenceEngine::Precision::I16},
25                                                                    {"I8",   InferenceEngine::Precision::I8},
26                                                                    {"U16",  InferenceEngine::Precision::U16},
27                                                                    {"U8",   InferenceEngine::Precision::U8}};
28
29 InferenceEnginePython::ILayer buildILayer(InferenceEngine::ILayer::CPtr it) {
30     std::vector<InferenceEnginePython::Port> in_ports;
31     std::vector<InferenceEnginePython::Port> out_ports;
32     for (const auto &port : it->getInputPorts()) {
33         in_ports.push_back(InferenceEnginePython::Port(port.shape()));
34     }
35     for (const auto &port : it->getOutputPorts()) {
36         out_ports.push_back(InferenceEnginePython::Port(port.shape()));
37     }
38
39     std::map<std::string, std::string> params_map;
40     for (const auto &params : it->getParameters()->getParameters()) {
41         params_map.emplace(params.first, params.second);
42     }
43     std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
44     for (const auto &data : it->getParameters()->getConstantData()) {
45         data_map.emplace(data.first, std::const_pointer_cast<InferenceEngine::Blob>(data.second));
46     }
47     return {it,
48             it->getName(),
49             it->getId(),
50             it->getType(),
51             params_map,
52             data_map,
53             in_ports,
54             out_ports,
55     };
56 }
57
58 // NetworkBuilder
59 InferenceEnginePython::NetworkBuilder::NetworkBuilder(const std::string &name) {
60     // TODO(  ): std::move or instance in heap? Please check in other places.
61     InferenceEngine::Builder::Network network(name);
62     network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
63 }
64
65 InferenceEnginePython::NetworkBuilder InferenceEnginePython::NetworkBuilder::from_ie_network(
66         const InferenceEnginePython::IENetwork &icnn_net) {
67     InferenceEngine::Builder::Network network((InferenceEngine::ICNNNetwork &) icnn_net.actual);
68     NetworkBuilder net_builder = NetworkBuilder();
69     net_builder.network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
70     return net_builder;
71 }
72
73 InferenceEnginePython::INetwork InferenceEnginePython::NetworkBuilder::build() {
74     InferenceEngine::INetwork::Ptr i_net = network_ptr->build();
75     std::vector<ILayer> layers;
76     for (const auto &it : *i_net) {
77         layers.push_back(buildILayer(it));
78     }
79     std::vector<ILayer> inputs;
80     for (const auto &it : i_net->getInputs()) {
81         inputs.push_back(buildILayer(it));
82     }
83     std::vector<ILayer> outputs;
84     for (const auto &it : i_net->getInputs()) {
85         outputs.push_back(buildILayer(it));
86     }
87     return {i_net,             // INetwork ptr
88             i_net->getName(),  // name
89             i_net->size(),     // Number of layers
90             layers,
91             inputs,
92             outputs
93     };
94 }
95
96 std::vector<InferenceEnginePython::LayerBuilder> InferenceEnginePython::NetworkBuilder::getLayers() {
97     std::vector<LayerBuilder> layers;
98     for (const auto &it : network_ptr->getLayers()) {
99         LayerBuilder layer;
100         layer.actual = it;
101         layer.id = it.getId();
102         layers.push_back(layer);
103     }
104     return layers;
105 }
106
107 InferenceEnginePython::LayerBuilder InferenceEnginePython::NetworkBuilder::getLayer(size_t layer_id) {
108     LayerBuilder layer;
109     InferenceEngine::Builder::Layer ie_layer = network_ptr->getLayer(layer_id);
110     layer.actual = ie_layer;
111     layer.id = ie_layer.getId();
112     return layer;
113 }
114
115 void InferenceEnginePython::NetworkBuilder::removeLayer(const LayerBuilder &layer) {
116     network_ptr->removeLayer(layer.id);
117 }
118
119 const std::vector<InferenceEnginePython::Connection> InferenceEnginePython::NetworkBuilder::getLayerConnections(
120         const LayerBuilder &layer) {
121     std::vector<InferenceEngine::Connection> ie_connections = network_ptr->getLayerConnections(layer.id);
122     std::vector<Connection> connections;
123     for (auto const &it : ie_connections) {
124         PortInfo input(it.from().layerId(), it.from().portId());
125         PortInfo output(it.to().layerId(), it.to().portId());
126         connections.push_back(Connection(input, output));
127     }
128     return connections;
129 }
130
131 void InferenceEnginePython::NetworkBuilder::disconnect(const Connection &connection) {
132     network_ptr->disconnect(connection.actual);
133 }
134
135 void InferenceEnginePython::NetworkBuilder::connect(const PortInfo &input, const PortInfo &output) {
136     network_ptr->connect(input.actual, output.actual);
137 }
138
139 size_t InferenceEnginePython::NetworkBuilder::addLayer(const LayerBuilder &layer) {
140     return network_ptr->addLayer(layer.actual);
141 }
142
143 size_t InferenceEnginePython::NetworkBuilder::addAndConnectLayer(const std::vector<PortInfo> &input,
144                                                                  const LayerBuilder &layer) {
145     std::vector<InferenceEngine::PortInfo> ie_ports;
146     for (const auto &it : input) {
147         ie_ports.push_back(it.actual);
148     }
149     return network_ptr->addLayer(ie_ports, layer.actual);
150 }
151 // NetworkBuilder end
152 // NetworkBuilder end
153
154 // Port
155 InferenceEnginePython::Port::Port(const std::vector<size_t> &shapes) {
156     actual = InferenceEngine::Port(shapes);
157     shape = actual.shape();
158 }
159
160 InferenceEnginePython::PortInfo::PortInfo(size_t layer_id, size_t port_id) : PortInfo() {
161     this->actual = InferenceEngine::PortInfo(layer_id, port_id);
162     this->layer_id = layer_id;
163     this->port_id = port_id;
164 }
165 // Port end
166
167 // INetwork
168 std::vector<InferenceEnginePython::Connection> InferenceEnginePython::INetwork::getLayerConnections(size_t layer_id) {
169     std::vector<Connection> connections;
170     for (const auto &it : actual->getLayerConnections(layer_id)) {
171         PortInfo input = PortInfo(it.from().layerId(), it.from().portId());
172         PortInfo output = PortInfo(it.to().layerId(), it.to().portId());
173         connections.push_back(Connection(input, output));
174     }
175     return connections;
176 }
177
178 InferenceEnginePython::IENetwork InferenceEnginePython::INetwork::to_ie_network() {
179     std::shared_ptr<InferenceEngine::ICNNNetwork> icnn_net = InferenceEngine::Builder::convertToICNNNetwork(actual);
180     InferenceEngine::CNNNetwork cnn_net(icnn_net);
181     IENetwork ie_net = IENetwork();
182     ie_net.actual = cnn_net;
183     ie_net.name = name;
184     ie_net.batch_size = cnn_net.getBatchSize();
185     return ie_net;
186 }
187 // INetwork end
188
189 // Connection
190 InferenceEnginePython::Connection::Connection(PortInfo input, PortInfo output) : Connection() {
191     this->actual = InferenceEngine::Connection(InferenceEngine::PortInfo(input.layer_id, input.port_id),
192                                                InferenceEngine::PortInfo(output.layer_id, output.port_id));
193     this->_from = PortInfo(actual.from().layerId(), actual.from().portId());
194     this->to = PortInfo(actual.to().layerId(), actual.to().portId());
195 }
196 // Connection end
197
198 // LayerBuilder
199 InferenceEnginePython::LayerBuilder::LayerBuilder(const std::string &type, const std::string &name) : LayerBuilder() {
200     InferenceEngine::Builder::Layer layer(type, name);
201     this->actual = layer;
202     this->id = layer.getId();
203 }
204
205 const std::string &InferenceEnginePython::LayerBuilder::getName() {
206     return actual.getName();
207 }
208
209 const std::string &InferenceEnginePython::LayerBuilder::getType() {
210     return actual.getType();
211 }
212
213 std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getInputPorts() {
214     std::vector<Port> ports;
215     for (const auto &it : actual.getInputPorts()) {
216         ports.push_back(Port(it.shape()));
217     }
218     return ports;
219 }
220
221 std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getOutputPorts() {
222     std::vector<Port> ports;
223     for (const auto &it : actual.getOutputPorts()) {
224         ports.push_back(Port(it.shape()));
225     }
226     return ports;
227 }
228
229 std::map<std::string, std::string> InferenceEnginePython::LayerBuilder::getParameters() {
230     std::map<std::string, std::string> params_map;
231     for (const auto &it : actual.getParameters()) {
232         params_map.emplace(it.first, it.second);
233     }
234     return params_map;
235 }
236
237 void InferenceEnginePython::LayerBuilder::setParameters(std::map<std::string, std::string> params_map) {
238     std::map<std::string, InferenceEngine::Parameter> ie_params_map;
239     for (const auto &it : params_map) {
240         InferenceEngine::Parameter ie_param((it.second));
241         ie_params_map.emplace(it.first, ie_param);
242     }
243     actual = actual.setParameters(ie_params_map);
244 }
245
246 void InferenceEnginePython::LayerBuilder::setName(const std::string &name) {
247     actual = actual.setName(name);
248 }
249
250 void InferenceEnginePython::LayerBuilder::setType(const std::string &type) {
251     actual = actual.setType(type);
252 }
253
254 void InferenceEnginePython::LayerBuilder::setInputPorts(const std::vector<Port> ports) {
255     std::vector<InferenceEngine::Port> ie_ports;
256     for (const auto &it : ports) {
257         ie_ports.push_back(it.actual);
258     }
259     actual = actual.setInputPorts(ie_ports);
260 }
261
262 void InferenceEnginePython::LayerBuilder::setOutputPorts(const std::vector<Port> ports) {
263     std::vector<InferenceEngine::Port> ie_ports;
264     for (const auto &it : ports) {
265         ie_ports.push_back(it.actual);
266     }
267     actual = actual.setOutputPorts(ie_ports);
268 }
269
270 InferenceEnginePython::ILayer InferenceEnginePython::LayerBuilder::build() {
271     return buildILayer(actual.build());
272 }
273
274 std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::LayerBuilder::getConstantData() {
275     std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
276     for (const auto &it : actual.getConstantData()) {
277         data_map.emplace(it.first, std::const_pointer_cast<InferenceEngine::Blob>(it.second));
278     }
279     return data_map;
280 }
281
282 InferenceEngine::Blob::Ptr InferenceEnginePython::LayerBuilder::allocateBlob(std::vector<size_t> dims,
283                                                                              const std::string &precision) {
284     InferenceEngine::Layout ie_layout;
285     ie_layout = InferenceEngine::TensorDesc::getLayoutByDims(dims);
286     InferenceEngine::Precision ie_precision = precision_map.at(precision);
287     const InferenceEngine::TensorDesc &tdesc = InferenceEngine::TensorDesc(ie_precision, dims, ie_layout);
288     InferenceEngine::Blob::Ptr blob;
289     switch (ie_precision) {
290         case InferenceEngine::Precision::FP32:
291             blob = InferenceEngine::make_shared_blob<float>(tdesc);
292             break;
293         case InferenceEngine::Precision::FP16:
294             blob = InferenceEngine::make_shared_blob<int>(tdesc);
295             break;
296         case InferenceEngine::Precision::I16:
297             blob = InferenceEngine::make_shared_blob<int>(tdesc);
298             break;
299         case InferenceEngine::Precision::U16:
300             blob = InferenceEngine::make_shared_blob<int>(tdesc);
301             break;
302         case InferenceEngine::Precision::U8:
303             blob = InferenceEngine::make_shared_blob<unsigned char>(tdesc);
304             break;
305         case InferenceEngine::Precision::I8:
306             blob = InferenceEngine::make_shared_blob<signed char>(tdesc);
307             break;
308         case InferenceEngine::Precision::I32:
309             blob = InferenceEngine::make_shared_blob<signed int>(tdesc);
310             break;
311         default:
312             blob = InferenceEngine::make_shared_blob<float>(tdesc);
313             break;
314     }
315
316     blob->allocate();
317     return blob;
318 }
319
320 void InferenceEnginePython::LayerBuilder::setConstantData(const std::map<std::string,
321                                                           InferenceEngine::Blob::Ptr> &const_data) {
322     actual.setConstantData(const_data);
323 }
324 // TODO(  ): Fix LAyerBuilder object copying - pass by reference
325 // void LayerBuilder::addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data){
326 //     InferenceEngine::Blob::CPtr c_data = const_pointer_cast<const InferenceEngine::Blob>(data);
327 //     actual.addConstantData(name, c_data);
328 // }
329
330 // LayerBuilder end