Removed Int8 normalizer and statistics (#919)
[platform/upstream/dldt.git] / inference-engine / src / legacy_api / src / ie_util_internal.cpp
1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include "ie_util_internal.hpp"
6
7 #include <ie_layers.h>
8
9 #include <cassert>
10 #include <deque>
11 #include <iomanip>
12 #include <memory>
13 #include <string>
14 #include <unordered_map>
15 #include <unordered_set>
16 #include <utility>
17 #include <vector>
18
19 #include "details/caseless.hpp"
20 #include "details/ie_cnn_network_tools.h"
21 #include "details/os/os_filesystem.hpp"
22 #include "file_utils.h"
23 #include "graph_tools.hpp"
24 #include "net_pass.h"
25 #include "precision_utils.h"
26
27 using std::string;
28
29 namespace InferenceEngine {
30
31 using namespace details;
32
33 DataPtr cloneData(const InferenceEngine::Data& source) {
34     auto cloned = std::make_shared<InferenceEngine::Data>(source);
35     if (cloned != nullptr) {
36         cloned->getCreatorLayer().reset();
37         cloned->getInputTo().clear();
38     }
39     return cloned;
40 }
41
42 namespace {
43 template <typename T>
44 CNNLayerPtr layerCloneImpl(const CNNLayer* source) {
45     auto layer = dynamic_cast<const T*>(source);
46     if (nullptr != layer) {
47         auto newLayer = std::make_shared<T>(*layer);
48         newLayer->_fusedWith = nullptr;
49         newLayer->outData.clear();
50         newLayer->insData.clear();
51         return std::static_pointer_cast<CNNLayer>(newLayer);
52     }
53     return nullptr;
54 }
55
56 /* Make this function explicit for TensorIterator layer
57  * because of specific handling of the body field */
58 template <>
59 CNNLayerPtr layerCloneImpl<TensorIterator>(const CNNLayer* source) {
60     auto layer = dynamic_cast<const TensorIterator*>(source);
61     if (nullptr != layer) {
62         auto newLayer = std::make_shared<TensorIterator>(*layer);
63         newLayer->_fusedWith = nullptr;
64         newLayer->outData.clear();
65         newLayer->insData.clear();
66
67         newLayer->body = NetPass::CopyTIBody(newLayer->body);
68
69         return std::static_pointer_cast<CNNLayer>(newLayer);
70     }
71     return nullptr;
72 }
73
74 }  // namespace
75
76 CNNLayerPtr clonelayer(const CNNLayer& source) {
77     using fptr = CNNLayerPtr (*)(const CNNLayer*);
78     // Most derived layers must go first in this list
79     static const fptr cloners[] = {&layerCloneImpl<ExperimentalDetectronTopKROIs>,
80                                    &layerCloneImpl<ExperimentalDetectronGenerateProposalsSingleImageLayer>,
81                                    &layerCloneImpl<ExperimentalDetectronPriorGridGeneratorLayer>,
82                                    &layerCloneImpl<ScatterUpdateLayer>,
83                                    &layerCloneImpl<ScatterElementsUpdateLayer>,
84                                    &layerCloneImpl<NonMaxSuppressionLayer>,
85                                    &layerCloneImpl<SelectLayer>,
86                                    &layerCloneImpl<BatchNormalizationLayer>,
87                                    &layerCloneImpl<TopKLayer>,
88                                    &layerCloneImpl<PowerLayer>,
89                                    &layerCloneImpl<ScaleShiftLayer>,
90                                    &layerCloneImpl<PReLULayer>,
91                                    &layerCloneImpl<TileLayer>,
92                                    &layerCloneImpl<ReshapeLayer>,
93                                    &layerCloneImpl<CropLayer>,
94                                    &layerCloneImpl<EltwiseLayer>,
95                                    &layerCloneImpl<GemmLayer>,
96                                    &layerCloneImpl<PadLayer>,
97                                    &layerCloneImpl<GatherLayer>,
98                                    &layerCloneImpl<StridedSliceLayer>,
99                                    &layerCloneImpl<ShuffleChannelsLayer>,
100                                    &layerCloneImpl<DepthToSpaceLayer>,
101                                    &layerCloneImpl<SpaceToDepthLayer>,
102                                    &layerCloneImpl<SpaceToBatchLayer>,
103                                    &layerCloneImpl<BatchToSpaceLayer>,
104                                    &layerCloneImpl<SparseFillEmptyRowsLayer>,
105                                    &layerCloneImpl<SparseSegmentReduceLayer>,
106                                    &layerCloneImpl<ExperimentalSparseWeightedReduceLayer>,
107                                    &layerCloneImpl<SparseToDenseLayer>,
108                                    &layerCloneImpl<BucketizeLayer>,
109                                    &layerCloneImpl<ReverseSequenceLayer>,
110                                    &layerCloneImpl<RangeLayer>,
111                                    &layerCloneImpl<FillLayer>,
112                                    &layerCloneImpl<BroadcastLayer>,
113                                    &layerCloneImpl<MathLayer>,
114                                    &layerCloneImpl<ReduceLayer>,
115                                    &layerCloneImpl<ClampLayer>,
116                                    &layerCloneImpl<ReLULayer>,
117                                    &layerCloneImpl<SoftMaxLayer>,
118                                    &layerCloneImpl<GRNLayer>,
119                                    &layerCloneImpl<MVNLayer>,
120                                    &layerCloneImpl<NormLayer>,
121                                    &layerCloneImpl<SplitLayer>,
122                                    &layerCloneImpl<ConcatLayer>,
123                                    &layerCloneImpl<FullyConnectedLayer>,
124                                    &layerCloneImpl<PoolingLayer>,
125                                    &layerCloneImpl<DeconvolutionLayer>,
126                                    &layerCloneImpl<DeformableConvolutionLayer>,
127                                    &layerCloneImpl<ConvolutionLayer>,
128                                    &layerCloneImpl<TensorIterator>,
129                                    &layerCloneImpl<RNNSequenceLayer>,
130                                    &layerCloneImpl<LSTMCell>,
131                                    &layerCloneImpl<GRUCell>,
132                                    &layerCloneImpl<RNNCell>,
133                                    &layerCloneImpl<QuantizeLayer>,
134                                    &layerCloneImpl<BinaryConvolutionLayer>,
135                                    &layerCloneImpl<WeightableLayer>,
136                                    &layerCloneImpl<OneHotLayer>,
137                                    &layerCloneImpl<CNNLayer>,
138                                    &layerCloneImpl<UniqueLayer>};
139     for (auto cloner : cloners) {
140         auto cloned = cloner(&source);
141         if (nullptr != cloned) {
142             return cloned;
143         }
144     }
145     assert(!"All layers derived from CNNLayer so we must never get here");
146     return nullptr;  // Silence "control may reach end of non-void function" warning
147 }
148
149 std::shared_ptr<ICNNNetwork> cloneNetwork(const ICNNNetwork& network) {
150     if (auto func = network.getFunction()) {
151         CNNNetwork net(func);
152
153         InputsDataMap originInputs;
154         OutputsDataMap originOutputs;
155         network.getInputsInfo(originInputs);
156         network.getOutputsInfo(originOutputs);
157         InputsDataMap clonedInputs = net.getInputsInfo();
158         OutputsDataMap clonedOutputs = net.getOutputsInfo();
159
160         for (const auto& outputInfo : originOutputs) {
161             if (clonedOutputs.find(outputInfo.first) == clonedOutputs.end())
162                 THROW_IE_EXCEPTION << "Cannot clone network! Cloned network doesn't contain all outputs";
163             clonedOutputs[outputInfo.first]->setPrecision(outputInfo.second->getPrecision());
164             clonedOutputs[outputInfo.first]->setLayout(outputInfo.second->getLayout());
165         }
166         for (const auto& inputInfo : originInputs) {
167             if (clonedInputs.find(inputInfo.first) == clonedInputs.end())
168                 THROW_IE_EXCEPTION << "Cannot clone network! Cloned network doesn't contain all inputs";
169             clonedInputs[inputInfo.first]->setPrecision(inputInfo.second->getPrecision());
170             clonedInputs[inputInfo.first]->setLayout(inputInfo.second->getLayout());
171             clonedInputs[inputInfo.first]->getPreProcess() = inputInfo.second->getPreProcess();
172         }
173         return net;
174     }
175
176     return cloneNet(network);
177 }
178
179 details::CNNNetworkImplPtr cloneNet(const ICNNNetwork& origin_network) {
180     std::shared_ptr<ICNNNetwork> clonedNetwork;
181     // Call conversion only on the copy of nGraph function
182     if (auto func = origin_network.getFunction()) {
183         clonedNetwork = cloneNetwork(origin_network);
184     }
185     const ICNNNetwork& network = (clonedNetwork) ? *clonedNetwork : origin_network;
186
187     std::vector<CNNLayerPtr> layers;
188     details::CNNNetworkIterator i(&network);
189     while (i != details::CNNNetworkIterator()) {
190         layers.push_back(*i);
191         i++;
192     }
193
194     // copy of the network
195     details::CNNNetworkImplPtr net = cloneNet(layers);
196     // going over output layers and aligning output ports and outputs
197     OutputsDataMap outputs;
198     network.getOutputsInfo(outputs);
199     OutputsDataMap outputInfo;
200     net->getOutputsInfo(outputInfo);
201     for (auto o : outputs) {
202         auto it = outputInfo.find(o.first);
203         if (it != outputInfo.end()) {
204             outputInfo.erase(it);
205         } else {
206             net->addOutput(o.first);
207         }
208     }
209     // remove output ports which unconnected with outputs
210     for (auto o : outputInfo) {
211         net->removeOutput(o.first);
212     }
213     IE_SUPPRESS_DEPRECATED_START
214     net->setPrecision(network.getPrecision());
215     IE_SUPPRESS_DEPRECATED_END
216     net->setName(network.getName());
217
218     InputsDataMap externalInputsData;
219     network.getInputsInfo(externalInputsData);
220
221     InputsDataMap clonedInputs;
222     net->getInputsInfo(clonedInputs);
223     for (auto&& it : externalInputsData) {
224         auto inp = clonedInputs.find(it.first);
225         if (inp != clonedInputs.end() && nullptr != inp->second) {
226             inp->second->setPrecision(it.second->getPrecision());
227             inp->second->getPreProcess() = it.second->getPreProcess();
228         }
229     }
230
231     return net;
232 }
233
234 details::CNNNetworkImplPtr cloneNet(const std::vector<CNNLayerPtr>& layers) {
235     auto net = std::make_shared<InferenceEngine::details::CNNNetworkImpl>();
236
237     // Src to cloned data map
238     std::unordered_map<InferenceEngine::DataPtr, InferenceEngine::DataPtr> dataMap;
239     // Cloned to src data map
240     std::unordered_map<InferenceEngine::DataPtr, InferenceEngine::DataPtr> clonedDataMap;
241     std::vector<InferenceEngine::DataPtr> clonedDatas;
242
243     auto createDataImpl = [&](const InferenceEngine::DataPtr& data) {
244         assert(nullptr != data);
245         if (!contains(dataMap, data)) {
246             auto clonedData = cloneData(*data);
247             dataMap[data] = clonedData;
248             clonedDataMap[clonedData] = data;
249             clonedDatas.push_back(clonedData);
250             net->getData(clonedData->getName()) = clonedData;
251             return clonedData;
252         }
253         return dataMap[data];
254     };
255
256     auto cloneLayerImpl = [&](const CNNLayer& srcLayer) {
257         CNNLayerPtr clonedLayer = clonelayer(srcLayer);
258         clonedLayer->_fusedWith = nullptr;
259         // We will need to reconstruct all connections in new graph
260         clonedLayer->outData.clear();
261         clonedLayer->insData.clear();
262         net->addLayer(clonedLayer);
263         return clonedLayer;
264     };
265
266     for (auto&& srcLayer : layers) {
267         CNNLayerPtr clonedLayer = cloneLayerImpl(*srcLayer);
268         for (auto&& src : srcLayer->insData) {
269             auto data = src.lock();
270             auto clonedData = createDataImpl(data);
271
272             string inputName;
273             // Find input name
274             for (auto&& inp : data->getInputTo()) {
275                 if (srcLayer == inp.second) {
276                     inputName = inp.first;
277                     break;
278                 }
279             }
280             assert(!inputName.empty());
281             clonedData->getInputTo().insert({inputName, clonedLayer});
282             clonedLayer->insData.push_back(clonedData);
283         }
284
285         for (auto&& data : srcLayer->outData) {
286             auto clonedData = createDataImpl(data);
287             clonedData->getCreatorLayer() = clonedLayer;
288             clonedLayer->outData.push_back(clonedData);
289             for (auto&& inp : data->getInputTo()) {
290                 auto layer = inp.second;
291                 // TODO(amalyshe) is it the best place to check priorbox and remove
292                 // such edge from outputs?
293                 if (std::find(layers.begin(), layers.end(), layer) == layers.end() &&
294                     !(CaselessEq<string>()(layer->type, "priorbox") ||
295                       CaselessEq<string>()(layer->type, "PriorBoxClustered"))) {
296                     net->addOutput(data->getName());
297                     break;
298                 }
299             }
300         }
301     }
302
303     for (auto&& data : clonedDatas) {
304         auto layer = data->getCreatorLayer().lock();
305         // create an artificial input layer because logic in some algorithms rely
306         // on existence of these layers in the network
307         if (nullptr == layer) {
308             assert(contains(clonedDataMap, data));
309             auto originalData = clonedDataMap[data];
310             assert(nullptr != originalData);
311
312             if (auto originalLayer = originalData->getCreatorLayer().lock()) {
313                 if (CaselessEq<string>()(originalLayer->type, "input") ||
314                     CaselessEq<string>()(originalLayer->type, "const") ||
315                     CaselessEq<string>()(originalLayer->type, "memory")) {
316                     layer = cloneLayerImpl(*originalLayer);
317                     layer->outData.push_back(data);
318                     data->getCreatorLayer() = layer;
319                 }
320             }
321
322             if (nullptr == layer) {
323                 LayerParams params = { data->getName(), std::string("Input"), data->getPrecision() };
324                 layer = std::make_shared<CNNLayer>(params);
325                 // this place should be transactional
326                 layer->outData.push_back(data);
327                 data->getCreatorLayer() = layer;
328                 net->addLayer(layer);
329             }
330         }
331         if (CaselessEq<string>()(layer->type, "input")) {
332             auto input = std::make_shared<InferenceEngine::InputInfo>();
333             input->setInputData(data);
334             net->setInputInfo(input);
335         }
336     }
337
338     net->resolveOutput();
339
340     return net;
341 }
342
343 struct NodePrinter {
344     enum FILL_COLOR { DATA, SUPPORTED_LAYER, UNSOPPORTED_LAYER };
345
346     std::unordered_set<InferenceEngine::Data*> printed_data;
347     std::unordered_set<InferenceEngine::CNNLayer*> printed_layers;
348     std::ostream& out;
349
350     printer_callback layer_cb;
351
352     explicit NodePrinter(std::ostream& os, printer_callback cb): out(os), layer_cb(std::move(cb)) {}
353
354     bool isPrinted(const CNNLayerPtr& layer) {
355         return static_cast<bool>(printed_layers.count(layer.get()));
356     }
357
358     bool isPrinted(const DataPtr& datum) {
359         return static_cast<bool>(printed_data.count(datum.get()));
360     }
361
362     string colorToStr(FILL_COLOR color) {
363         switch (color) {
364         case DATA:
365             return "#FCF6E3";
366         case SUPPORTED_LAYER:
367             return "#D9EAD3";
368         case UNSOPPORTED_LAYER:
369             return "#F4CCCC";
370         default:
371             return "#FFFFFF";
372         }
373     }
374
375     string formatSize_(const std::vector<unsigned int>& spatialDims) {
376         string result;
377         if (spatialDims.empty()) return result;
378         result = std::to_string(spatialDims[0]);
379         for (auto dim : spatialDims) {
380             result += "x" + std::to_string(dim);
381         }
382         return result;
383     }
384
385     string cleanNodeName_(string node_name) const {
386         // remove dot and dash symbols from node name. It is incorrectly displayed in xdot
387         node_name.erase(remove(node_name.begin(), node_name.end(), '.'), node_name.end());
388         std::replace(node_name.begin(), node_name.end(), '-', '_');
389         std::replace(node_name.begin(), node_name.end(), ':', '_');
390         return node_name;
391     }
392
393     void printLayerNode(const CNNLayerPtr& layer) {
394         auto node_name = "layer_" + cleanNodeName_(layer->name);
395         printed_layers.insert(layer.get());
396
397         ordered_properties printed_properties;
398
399         ordered_properties node_properties = {{"shape", "box"},
400                                               {"style", "filled"},
401                                               {"fillcolor", colorToStr(SUPPORTED_LAYER)}};
402
403         auto type = layer->type;
404         printed_properties.emplace_back("type", type);
405
406         if (type == "Convolution") {
407             auto* conv = dynamic_cast<ConvolutionLayer*>(layer.get());
408
409             if (conv != nullptr) {
410                 unsigned int depth = conv->_out_depth, group = conv->_group;
411
412                 printed_properties.emplace_back(
413                     "kernel size", formatSize_({&(conv->_kernel[0]), &(conv->_kernel[conv->_kernel.size() - 1])}));
414                 printed_properties.emplace_back("output depth", std::to_string(depth));
415                 printed_properties.emplace_back("group", std::to_string(group));
416                 printed_properties.emplace_back(
417                     "padding begin", formatSize_({&(conv->_padding[0]), &(conv->_padding[conv->_padding.size() - 1])}));
418                 printed_properties.emplace_back(
419                     "padding end",
420                     formatSize_({&(conv->_pads_end[0]), &(conv->_pads_end[conv->_pads_end.size() - 1])}));
421                 printed_properties.emplace_back(
422                     "strides", formatSize_({&(conv->_stride[0]), &(conv->_stride[conv->_stride.size() - 1])}));
423                 printed_properties.emplace_back(
424                     "dilations", formatSize_({&(conv->_dilation[0]), &(conv->_dilation[conv->_dilation.size() - 1])}));
425             }
426         } else if (type == "Pooling") {
427             auto* pool = dynamic_cast<PoolingLayer*>(layer.get());
428
429             if (pool != nullptr) {
430                 printed_properties.emplace_back(
431                     "window size", formatSize_({&(pool->_kernel[0]), &(pool->_kernel[pool->_kernel.size() - 1])}));
432                 printed_properties.emplace_back(
433                     "padding begin", formatSize_({&(pool->_padding[0]), &(pool->_padding[pool->_padding.size() - 1])}));
434                 printed_properties.emplace_back(
435                     "padding end",
436                     formatSize_({&(pool->_pads_end[0]), &(pool->_pads_end[pool->_pads_end.size() - 1])}));
437                 printed_properties.emplace_back(
438                     "strides", formatSize_({&(pool->_stride[0]), &(pool->_stride[pool->_stride.size() - 1])}));
439             }
440         } else if (type == "ReLU") {
441             auto* relu = dynamic_cast<ReLULayer*>(layer.get());
442
443             if (relu != nullptr) {
444                 float negative_slope = relu->negative_slope;
445
446                 if (negative_slope != 0.0f)
447                     printed_properties.emplace_back("negative_slope", CNNLayer::ie_serialize_float(negative_slope));
448             }
449         } else if (type == "Eltwise") {
450             auto* eltwise = dynamic_cast<EltwiseLayer*>(layer.get());
451
452             if (eltwise != nullptr) {
453                 std::string operation;
454
455                 if (eltwise->_operation == EltwiseLayer::Sum)
456                     operation = "Sum";
457                 else if (eltwise->_operation == EltwiseLayer::Prod)
458                     operation = "Prod";
459                 else if (eltwise->_operation == EltwiseLayer::Max)
460                     operation = "Max";
461                 else if (eltwise->_operation == EltwiseLayer::Sub)
462                     operation = "Sub";
463                 else if (eltwise->_operation == EltwiseLayer::Min)
464                     operation = "Min";
465                 else if (eltwise->_operation == EltwiseLayer::Div)
466                     operation = "Div";
467                 else if (eltwise->_operation == EltwiseLayer::Squared_diff)
468                     operation = "Squared_diff";
469                 else if (eltwise->_operation == EltwiseLayer::Equal)
470                     operation = "Equal";
471                 else if (eltwise->_operation == EltwiseLayer::Not_equal)
472                     operation = "Not_equal";
473                 else if (eltwise->_operation == EltwiseLayer::Less)
474                     operation = "Less";
475                 else if (eltwise->_operation == EltwiseLayer::Less_equal)
476                     operation = "Less_equal";
477                 else if (eltwise->_operation == EltwiseLayer::Greater)
478                     operation = "Greater";
479                 else if (eltwise->_operation == EltwiseLayer::Greater_equal)
480                     operation = "Greater_equal";
481                 else if (eltwise->_operation == EltwiseLayer::Logical_NOT)
482                     operation = "Logical_NOT";
483                 else if (eltwise->_operation == EltwiseLayer::Logical_AND)
484                     operation = "Logical_AND";
485                 else if (eltwise->_operation == EltwiseLayer::Logical_OR)
486                     operation = "Logical_OR";
487                 else if (eltwise->_operation == EltwiseLayer::Logical_XOR)
488                     operation = "Logical_XOR";
489                 else if (eltwise->_operation == EltwiseLayer::Floor_mod)
490                     operation = "Floor_mod";
491                 else if (eltwise->_operation == EltwiseLayer::Pow)
492                     operation = "Pow";
493                 else if (eltwise->_operation == EltwiseLayer::Mean)
494                     operation = "Mean";
495
496                 printed_properties.emplace_back("operation", operation);
497             }
498         }
499
500         if (layer_cb != nullptr) {
501             layer_cb(layer, printed_properties, node_properties);
502         }
503
504         printNode(node_name, layer->name, node_properties, printed_properties);
505     }
506
507     void printDataNode(const std::shared_ptr<Data>& data) {
508         auto node_name = "data_" + cleanNodeName_(data->getName());
509         printed_data.insert(data.get());
510
511         ordered_properties printed_properties;
512         ordered_properties node_properties = {{"shape", "ellipse"},
513                                               {"style", "filled"},
514                                               {"fillcolor", colorToStr(DATA)}};
515
516         std::stringstream dims_ss;
517         size_t idx = data->getTensorDesc().getDims().size();
518         dims_ss << '[';
519         for (auto& dim : data->getTensorDesc().getDims()) {
520             dims_ss << dim << ((--idx) != 0u ? ", " : "");
521         }
522         dims_ss << ']';
523
524         printed_properties.emplace_back("dims", dims_ss.str());
525         printed_properties.emplace_back("precision", data->getPrecision().name());
526
527         std::stringstream ss;
528         ss << data->getTensorDesc().getLayout();
529         printed_properties.emplace_back("layout", ss.str());
530         printed_properties.emplace_back("name", data->getName());
531         if (data->getCreatorLayer().lock() != nullptr)
532             printed_properties.emplace_back("creator layer", data->getCreatorLayer().lock()->name);
533         printNode(node_name, data->getName(), node_properties, printed_properties);
534     }
535
536     void printNode(string const& node_name, const string& node_title, ordered_properties const& node_properties,
537                    ordered_properties const& printed_properties) {
538         // normalization of names, removing all prohibited symbols like "/"
539         string nodeNameN = node_name;
540         std::replace(nodeNameN.begin(), nodeNameN.end(), '/', '_');
541         string dataNameN = node_title;
542         std::replace(dataNameN.begin(), dataNameN.end(), '/', '_');
543
544         out << '\t' << nodeNameN << " [";
545         for (auto& node_property : node_properties) {
546             out << node_property.first << "=\"" << node_property.second << "\", ";
547         }
548
549         out << "label=\"" << node_title;
550         for (auto& printed_property : printed_properties) {
551             out << "\\n" << printed_property.first << ": " << printed_property.second;
552         }
553         out << "\"];\n";
554     }
555
556     void printEdge(const CNNLayerPtr& from_, const DataPtr& to_, bool reverse) {
557         auto from_name = "layer_" + cleanNodeName_(from_->name);
558         auto to_name = "data_" + cleanNodeName_(to_->getName());
559         std::replace(from_name.begin(), from_name.end(), '/', '_');
560         std::replace(to_name.begin(), to_name.end(), '/', '_');
561         if (reverse) std::swap(from_name, to_name);
562         out << '\t' << from_name << " -> " << to_name << ";\n";
563     }
564 };
565
566 void saveGraphToDot(InferenceEngine::ICNNNetwork& network, std::ostream& out, printer_callback layer_cb) {
567     NodePrinter printer(out, std::move(layer_cb));
568
569     out << "digraph Network {\n";
570     // Traverse graph and print nodes
571     for (const auto& layer : details::CNNNetSortTopologically(network)) {
572         printer.printLayerNode(layer);
573
574         // Print output Data Object
575         for (auto& dataptr : layer->outData) {
576             if (!printer.isPrinted(dataptr)) {
577                 printer.printDataNode(dataptr);
578             }
579             printer.printEdge(layer, dataptr, false);
580         }
581
582         // Print input Data objects
583         for (auto& datum : layer->insData) {
584             auto dataptr = datum.lock();
585             if (!printer.isPrinted(dataptr)) {
586                 printer.printDataNode(dataptr);
587             }
588             printer.printEdge(layer, dataptr, true);
589         }
590     }
591     out << "}" << std::endl;
592 }
593
594 std::unordered_set<DataPtr> getRootDataObjects(ICNNNetwork& network) {
595     std::unordered_set<DataPtr> ret;
596     details::CNNNetworkIterator i(&network);
597     while (i != details::CNNNetworkIterator()) {
598         CNNLayer::Ptr layer = *i;
599
600         // TODO: Data without creatorLayer
601         if (CaselessEq<string>()(layer->type, "input") || CaselessEq<string>()(layer->type, "const") ||
602             CaselessEq<string>()(layer->type, "memory")) {
603             ret.insert(layer->outData.begin(), layer->outData.end());
604         }
605         i++;
606     }
607     return ret;
608 }
609
610 }  // namespace InferenceEngine