Cnnnetwork deprecated methods (#1077)
[platform/upstream/dldt.git] / inference-engine / src / vpu / graph_transformer / src / utils / runtime_graph.cpp
1 // Copyright (C) 2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include "vpu/utils/runtime_graph.hpp"
6
7 #include <cnn_network_impl.hpp>
8 #include <exec_graph_info.hpp>
9
10 #include <vector>
11 #include <map>
12 #include <string>
13 #include <utility>
14 #include <memory>
15
16 using namespace InferenceEngine;
17
18 namespace vpu {
19
20     InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraph(GraphMetaInfo &graphMetaInfo, const std::vector<float>& perfInfo) {
21         auto net = std::make_shared<InferenceEngine::details::CNNNetworkImpl>();
22         net->setName(graphMetaInfo.graphName);
23
24         std::map<size_t, CNNLayerPtr> stageMetaIndexToLayer;
25
26         auto createLayerFromMeta = [&](const StageMetaInfo &stageMetaInfo) -> CNNLayer::Ptr {
27             auto layer = std::make_shared<CNNLayer>(LayerParams{stageMetaInfo.stageName,
28                                                                 stageMetaInfo.layerType,
29                                                                 Precision::FP16});
30
31             layer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = stageMetaInfo.layerName;
32             layer->params[ExecGraphInfoSerialization::IMPL_TYPE] = stageMetaInfo.stageType;
33             layer->params[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(stageMetaInfo.execOrder);
34
35             std::stringstream layoutStream;
36             int ind = 0;
37             for (auto &outLayout : stageMetaInfo.outLayouts) {
38                 if (ind == 0) {
39                     layoutStream << outLayout;
40                     ind++;
41                     continue;
42                 }
43                 layoutStream << ',' << outLayout;
44             }
45             layer->params[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = layoutStream.str();
46
47             std::string outPrecisionsStr;
48             ind = 0;
49             for (auto &outPrecision : stageMetaInfo.outPrecisions) {
50                 if (ind == 0) {
51                     outPrecisionsStr += outPrecision.name();
52                     ind++;
53                     continue;
54                 }
55                 outPrecisionsStr += ',' + std::string(outPrecision.name());
56             }
57             layer->params[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outPrecisionsStr;
58
59             if (stageMetaInfo.execOrder < 0) {
60                 layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed";
61             } else {
62                 layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(stageMetaInfo.execTime);
63             }
64
65             return layer;
66         };
67
68         //
69         // Write performance counts
70         //
71
72         const auto deviceTimings = perfInfo.data();
73         auto deviceTimingsCount = perfInfo.size();
74
75         if (deviceTimingsCount > 0) {
76             std::size_t timeIndex = 0;
77
78             for (auto &stageMeta : graphMetaInfo.stagesMeta) {
79                 if (stageMeta.status == ie::InferenceEngineProfileInfo::EXECUTED &&
80                     timeIndex < deviceTimingsCount) {
81                     stageMeta.execTime += deviceTimings[timeIndex];
82                     timeIndex++;
83                 }
84             }
85         }
86
87         //
88         // Add all stages to network
89         //
90
91         for (std::size_t i = 0; i < graphMetaInfo.stagesMeta.size(); i++) {
92             const auto stageMetaData = graphMetaInfo.stagesMeta[i];
93
94             if (stageMetaData.status == ie::InferenceEngineProfileInfo::LayerStatus::OPTIMIZED_OUT ||
95                 stageMetaData.stageName == "<Receive-Tensor>" ||
96                 stageMetaData.stageName == "<none>") {
97                 continue;
98             }
99
100             auto layer = createLayerFromMeta(stageMetaData);
101             stageMetaIndexToLayer.insert(std::make_pair(i, layer));
102             net->addLayer(layer);
103         }
104
105         //
106         // Add all edges to network
107         //
108
109         for (const auto &dataMetaData : graphMetaInfo.datasMeta) {
110             ::InferenceEngine::DataPtr data;
111
112             auto parent = stageMetaIndexToLayer[dataMetaData.parentIndex];
113             data = std::make_shared<::InferenceEngine::Data>(dataMetaData.name, dataMetaData.desc);
114             parent->outData.push_back(data);
115             data->getCreatorLayer() = parent;
116
117             for (auto &childMetaIndex : dataMetaData.childrenIndices) {
118                 auto child = stageMetaIndexToLayer[childMetaIndex];
119                 data->getInputTo()[child->name] = child;
120                 child->insData.push_back(data);
121             }
122         }
123
124         //
125         // Specify inputs data
126         //
127
128         for (std::size_t i = 0; i < graphMetaInfo.stagesMeta.size(); i++) {
129             const auto stageMetaData = graphMetaInfo.stagesMeta[i];
130
131             if (stageMetaData.inputsNum != 0 ||
132                 stageMetaData.stageName == "<Receive-Tensor>" ||
133                 stageMetaData.stageName == "<none>") {
134                 continue;
135             }
136
137             auto input = stageMetaIndexToLayer[i];
138             auto inputInfo = std::make_shared<InputInfo>();
139             inputInfo->setInputData(input->outData[0]);
140             net->setInputInfo(inputInfo);
141         }
142
143         return net;
144     }
145 }  // namespace vpu