1 // Copyright (c) 2018 Intel Corporation
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 #include "dnn_builder_impl.hpp"
17 // using namespace InferenceEnginePython;
18 // using namespace std;
20 std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
21 {"FP16", InferenceEngine::Precision::FP16},
22 {"Q78", InferenceEngine::Precision::Q78},
23 {"I32", InferenceEngine::Precision::I32},
24 {"I16", InferenceEngine::Precision::I16},
25 {"I8", InferenceEngine::Precision::I8},
26 {"U16", InferenceEngine::Precision::U16},
27 {"U8", InferenceEngine::Precision::U8}};
29 InferenceEnginePython::ILayer buildILayer(InferenceEngine::ILayer::CPtr it) {
30 std::vector<InferenceEnginePython::Port> in_ports;
31 std::vector<InferenceEnginePython::Port> out_ports;
32 for (const auto &port : it->getInputPorts()) {
33 in_ports.push_back(InferenceEnginePython::Port(port.shape()));
35 for (const auto &port : it->getOutputPorts()) {
36 out_ports.push_back(InferenceEnginePython::Port(port.shape()));
39 std::map<std::string, std::string> params_map;
40 for (const auto ¶ms : it->getParameters()->getParameters()) {
41 params_map.emplace(params.first, params.second);
43 std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
44 for (const auto &data : it->getParameters()->getConstantData()) {
45 data_map.emplace(data.first, std::const_pointer_cast<InferenceEngine::Blob>(data.second));
59 InferenceEnginePython::NetworkBuilder::NetworkBuilder(const std::string &name) {
60 // TODO( ): std::move or instance in heap? Please check in other places.
61 InferenceEngine::Builder::Network network(name);
62 network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
65 InferenceEnginePython::NetworkBuilder InferenceEnginePython::NetworkBuilder::from_ie_network(
66 const InferenceEnginePython::IENetwork &icnn_net) {
67 InferenceEngine::Builder::Network network((InferenceEngine::ICNNNetwork &) icnn_net.actual);
68 NetworkBuilder net_builder = NetworkBuilder();
69 net_builder.network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
73 InferenceEnginePython::INetwork InferenceEnginePython::NetworkBuilder::build() {
74 InferenceEngine::INetwork::Ptr i_net = network_ptr->build();
75 std::vector<ILayer> layers;
76 for (const auto &it : *i_net) {
77 layers.push_back(buildILayer(it));
79 std::vector<ILayer> inputs;
80 for (const auto &it : i_net->getInputs()) {
81 inputs.push_back(buildILayer(it));
83 std::vector<ILayer> outputs;
84 for (const auto &it : i_net->getInputs()) {
85 outputs.push_back(buildILayer(it));
87 return {i_net, // INetwork ptr
88 i_net->getName(), // name
89 i_net->size(), // Number of layers
96 std::vector<InferenceEnginePython::LayerBuilder> InferenceEnginePython::NetworkBuilder::getLayers() {
97 std::vector<LayerBuilder> layers;
98 for (const auto &it : network_ptr->getLayers()) {
101 layer.id = it.getId();
102 layers.push_back(layer);
107 InferenceEnginePython::LayerBuilder InferenceEnginePython::NetworkBuilder::getLayer(size_t layer_id) {
109 InferenceEngine::Builder::Layer ie_layer = network_ptr->getLayer(layer_id);
110 layer.actual = ie_layer;
111 layer.id = ie_layer.getId();
115 void InferenceEnginePython::NetworkBuilder::removeLayer(const LayerBuilder &layer) {
116 network_ptr->removeLayer(layer.id);
119 const std::vector<InferenceEnginePython::Connection> InferenceEnginePython::NetworkBuilder::getLayerConnections(
120 const LayerBuilder &layer) {
121 std::vector<InferenceEngine::Connection> ie_connections = network_ptr->getLayerConnections(layer.id);
122 std::vector<Connection> connections;
123 for (auto const &it : ie_connections) {
124 PortInfo input(it.from().layerId(), it.from().portId());
125 PortInfo output(it.to().layerId(), it.to().portId());
126 connections.push_back(Connection(input, output));
131 void InferenceEnginePython::NetworkBuilder::disconnect(const Connection &connection) {
132 network_ptr->disconnect(connection.actual);
135 void InferenceEnginePython::NetworkBuilder::connect(const PortInfo &input, const PortInfo &output) {
136 network_ptr->connect(input.actual, output.actual);
139 size_t InferenceEnginePython::NetworkBuilder::addLayer(const LayerBuilder &layer) {
140 return network_ptr->addLayer(layer.actual);
143 size_t InferenceEnginePython::NetworkBuilder::addAndConnectLayer(const std::vector<PortInfo> &input,
144 const LayerBuilder &layer) {
145 std::vector<InferenceEngine::PortInfo> ie_ports;
146 for (const auto &it : input) {
147 ie_ports.push_back(it.actual);
149 return network_ptr->addLayer(ie_ports, layer.actual);
151 // NetworkBuilder end
152 // NetworkBuilder end
155 InferenceEnginePython::Port::Port(const std::vector<size_t> &shapes) {
156 actual = InferenceEngine::Port(shapes);
157 shape = actual.shape();
160 InferenceEnginePython::PortInfo::PortInfo(size_t layer_id, size_t port_id) : PortInfo() {
161 this->actual = InferenceEngine::PortInfo(layer_id, port_id);
162 this->layer_id = layer_id;
163 this->port_id = port_id;
168 std::vector<InferenceEnginePython::Connection> InferenceEnginePython::INetwork::getLayerConnections(size_t layer_id) {
169 std::vector<Connection> connections;
170 for (const auto &it : actual->getLayerConnections(layer_id)) {
171 PortInfo input = PortInfo(it.from().layerId(), it.from().portId());
172 PortInfo output = PortInfo(it.to().layerId(), it.to().portId());
173 connections.push_back(Connection(input, output));
178 InferenceEnginePython::IENetwork InferenceEnginePython::INetwork::to_ie_network() {
179 std::shared_ptr<InferenceEngine::ICNNNetwork> icnn_net = InferenceEngine::Builder::convertToICNNNetwork(actual);
180 InferenceEngine::CNNNetwork cnn_net(icnn_net);
181 IENetwork ie_net = IENetwork();
182 ie_net.actual = cnn_net;
184 ie_net.batch_size = cnn_net.getBatchSize();
190 InferenceEnginePython::Connection::Connection(PortInfo input, PortInfo output) : Connection() {
191 this->actual = InferenceEngine::Connection(InferenceEngine::PortInfo(input.layer_id, input.port_id),
192 InferenceEngine::PortInfo(output.layer_id, output.port_id));
193 this->_from = PortInfo(actual.from().layerId(), actual.from().portId());
194 this->to = PortInfo(actual.to().layerId(), actual.to().portId());
199 InferenceEnginePython::LayerBuilder::LayerBuilder(const std::string &type, const std::string &name) : LayerBuilder() {
200 InferenceEngine::Builder::Layer layer(type, name);
201 this->actual = layer;
202 this->id = layer.getId();
205 const std::string &InferenceEnginePython::LayerBuilder::getName() {
206 return actual.getName();
209 const std::string &InferenceEnginePython::LayerBuilder::getType() {
210 return actual.getType();
213 std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getInputPorts() {
214 std::vector<Port> ports;
215 for (const auto &it : actual.getInputPorts()) {
216 ports.push_back(Port(it.shape()));
221 std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getOutputPorts() {
222 std::vector<Port> ports;
223 for (const auto &it : actual.getOutputPorts()) {
224 ports.push_back(Port(it.shape()));
229 std::map<std::string, std::string> InferenceEnginePython::LayerBuilder::getParameters() {
230 std::map<std::string, std::string> params_map;
231 for (const auto &it : actual.getParameters()) {
232 params_map.emplace(it.first, it.second);
237 void InferenceEnginePython::LayerBuilder::setParameters(std::map<std::string, std::string> params_map) {
238 std::map<std::string, InferenceEngine::Parameter> ie_params_map;
239 for (const auto &it : params_map) {
240 InferenceEngine::Parameter ie_param((it.second));
241 ie_params_map.emplace(it.first, ie_param);
243 actual = actual.setParameters(ie_params_map);
246 void InferenceEnginePython::LayerBuilder::setName(const std::string &name) {
247 actual = actual.setName(name);
250 void InferenceEnginePython::LayerBuilder::setType(const std::string &type) {
251 actual = actual.setType(type);
254 void InferenceEnginePython::LayerBuilder::setInputPorts(const std::vector<Port> ports) {
255 std::vector<InferenceEngine::Port> ie_ports;
256 for (const auto &it : ports) {
257 ie_ports.push_back(it.actual);
259 actual = actual.setInputPorts(ie_ports);
262 void InferenceEnginePython::LayerBuilder::setOutputPorts(const std::vector<Port> ports) {
263 std::vector<InferenceEngine::Port> ie_ports;
264 for (const auto &it : ports) {
265 ie_ports.push_back(it.actual);
267 actual = actual.setOutputPorts(ie_ports);
270 InferenceEnginePython::ILayer InferenceEnginePython::LayerBuilder::build() {
271 return buildILayer(actual.build());
274 std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::LayerBuilder::getConstantData() {
275 std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
276 for (const auto &it : actual.getConstantData()) {
277 data_map.emplace(it.first, std::const_pointer_cast<InferenceEngine::Blob>(it.second));
282 InferenceEngine::Blob::Ptr InferenceEnginePython::LayerBuilder::allocateBlob(std::vector<size_t> dims,
283 const std::string &precision) {
284 InferenceEngine::Layout ie_layout;
285 ie_layout = InferenceEngine::TensorDesc::getLayoutByDims(dims);
286 InferenceEngine::Precision ie_precision = precision_map.at(precision);
287 const InferenceEngine::TensorDesc &tdesc = InferenceEngine::TensorDesc(ie_precision, dims, ie_layout);
288 InferenceEngine::Blob::Ptr blob;
289 switch (ie_precision) {
290 case InferenceEngine::Precision::FP32:
291 blob = InferenceEngine::make_shared_blob<float>(tdesc);
293 case InferenceEngine::Precision::FP16:
294 blob = InferenceEngine::make_shared_blob<int>(tdesc);
296 case InferenceEngine::Precision::I16:
297 blob = InferenceEngine::make_shared_blob<int>(tdesc);
299 case InferenceEngine::Precision::U16:
300 blob = InferenceEngine::make_shared_blob<int>(tdesc);
302 case InferenceEngine::Precision::U8:
303 blob = InferenceEngine::make_shared_blob<unsigned char>(tdesc);
305 case InferenceEngine::Precision::I8:
306 blob = InferenceEngine::make_shared_blob<signed char>(tdesc);
308 case InferenceEngine::Precision::I32:
309 blob = InferenceEngine::make_shared_blob<signed int>(tdesc);
312 blob = InferenceEngine::make_shared_blob<float>(tdesc);
320 void InferenceEnginePython::LayerBuilder::setConstantData(const std::map<std::string,
321 InferenceEngine::Blob::Ptr> &const_data) {
322 actual.setConstantData(const_data);
324 // TODO( ): Fix LAyerBuilder object copying - pass by reference
325 // void LayerBuilder::addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data){
326 // InferenceEngine::Blob::CPtr c_data = const_pointer_cast<const InferenceEngine::Blob>(data);
327 // actual.addConstantData(name, c_data);