Move downgrade passes to pass folder (#1675)
[platform/upstream/dldt.git] / ngraph / test / runtime / ie / ie_executable.cpp
1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
16
17 #include "ie_executable.hpp"
18 #include "ie_tensor.hpp"
19 #include "ngraph/op/get_output_element.hpp"
20 #include "ngraph/opsets/opset.hpp"
21 #include "ngraph/pass/manager.hpp"
22 #include "ngraph/shape.hpp"
23 #include "ngraph/type/element_type.hpp"
24 #include "pass/opset1_upgrade.hpp"
25
26 using namespace std;
27 using namespace ngraph;
28
29 NGRAPH_SUPPRESS_DEPRECATED_START
30
31 namespace
32 {
33     InferenceEngine::Blob::Ptr fill_blob(InferenceEngine::SizeVector shape,
34                                          const void* data,
35                                          size_t data_size,
36                                          const element::Type& elem_type)
37     {
38         InferenceEngine::Layout layout;
39         switch (shape.size())
40         {
41         case 0: layout = InferenceEngine::Layout::SCALAR; break;
42         case 1: layout = InferenceEngine::Layout::C; break;
43         case 2: layout = InferenceEngine::Layout::NC; break;
44         case 3: layout = InferenceEngine::Layout::CHW; break;
45         case 4: layout = InferenceEngine::Layout::NCHW; break;
46         case 5: layout = InferenceEngine::Layout::NCDHW; break;
47         case 6: layout = InferenceEngine::Layout::GOIDHW; break;
48         default: THROW_IE_EXCEPTION << "Can't convert dims " << shape.size() << " to Layout!";
49         }
50
51         InferenceEngine::MemoryBlob::Ptr blob;
52
53 #define MAKE_IE_TBLOB(type_, precision_, shape_, layout_)                                          \
54     make_shared<InferenceEngine::TBlob<type_>>(                                                    \
55         InferenceEngine::TensorDesc{InferenceEngine::Precision::precision_, shape_, layout_})
56
57         switch (elem_type)
58         {
59         case element::Type_t::f32: blob = MAKE_IE_TBLOB(float, FP32, shape, layout); break;
60         case element::Type_t::i16: blob = MAKE_IE_TBLOB(int16_t, I16, shape, layout); break;
61         case element::Type_t::u8: blob = MAKE_IE_TBLOB(uint8_t, U8, shape, layout); break;
62         case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break;
63         case element::Type_t::u16: blob = MAKE_IE_TBLOB(uint16_t, U16, shape, layout); break;
64         case element::Type_t::i32: blob = MAKE_IE_TBLOB(int32_t, I32, shape, layout); break;
65         case element::Type_t::u32: blob = MAKE_IE_TBLOB(uint32_t, U32, shape, layout); break;
66         case element::Type_t::i64: blob = MAKE_IE_TBLOB(int64_t, I64, shape, layout); break;
67         case element::Type_t::u64: blob = MAKE_IE_TBLOB(uint64_t, U64, shape, layout); break;
68         case element::Type_t::boolean: blob = MAKE_IE_TBLOB(uint8_t, BOOL, shape, layout); break;
69         default: THROW_IE_EXCEPTION << "Can't convert type " << elem_type << " to IE Precision!";
70         }
71 #undef MAKE_IE_TBLOB
72
73         blob->allocate();
74         uint8_t* blob_ptr = blob->rwmap().as<uint8_t*>();
75         memcpy(blob_ptr, data, data_size * elem_type.size());
76         return blob;
77     }
78 }
79
80 namespace
81 {
82     std::set<NodeTypeInfo> get_ie_ops()
83     {
84         std::set<NodeTypeInfo> ie_ops = get_opset1().get_type_info_set();
85         auto& opset2 = get_opset2().get_type_info_set();
86         ie_ops.insert(opset2.begin(), opset2.end());
87         auto& opset3 = get_opset3().get_type_info_set();
88         ie_ops.insert(opset3.begin(), opset3.end());
89         return ie_ops;
90     }
91 }
92
93 runtime::ie::IE_Executable::IE_Executable(shared_ptr<Function> func, string device)
94     : m_device{device}
95 {
96     static std::set<NodeTypeInfo> ie_ops = get_ie_ops();
97     pass::Manager passes;
98     passes.register_pass<pass::Opset1Upgrade>();
99     passes.run_passes(func);
100
101     for (const auto& node : func->get_ops())
102     {
103         if (ie_ops.find(node->get_type_info()) == ie_ops.end())
104         {
105             if (node->get_type_info() == op::GetOutputElement::type_info)
106             {
107                 // IE currently can handle GetOutuputElement op;
108                 continue;
109             }
110             else
111             {
112                 cout << "UNSUPPORTED OP DETECTED: " << node->get_type_info().name << endl;
113                 THROW_IE_EXCEPTION << "Detected op not belonging to opset1!";
114             }
115         }
116     }
117
118 #ifdef NGRAPH_DEBUG_ENABLE
119     cout << "Nodes in test: ";
120     for (const auto& node : func->get_ops())
121     {
122         cout << node << endl;
123     }
124     cout << endl;
125 #endif
126
127     m_network = InferenceEngine::CNNNetwork(func);
128     set_parameters_and_results(*func);
129 }
130
131 bool runtime::ie::IE_Executable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
132                                       const vector<shared_ptr<runtime::Tensor>>& inputs)
133 {
134     InferenceEngine::Core ie;
135
136     //  Loading model to the plugin (BACKEND_NAME)
137     InferenceEngine::ExecutableNetwork exe_network = ie.LoadNetwork(m_network, m_device);
138     //  Create infer request
139     InferenceEngine::InferRequest infer_request = exe_network.CreateInferRequest();
140     //  Prepare input and output blobs
141     InferenceEngine::InputsDataMap input_info = m_network.getInputsInfo();
142
143     if (input_info.size() != inputs.size())
144     {
145         THROW_IE_EXCEPTION << "Function inputs number differ from number of given inputs";
146     }
147
148     size_t i = 0;
149     for (const auto& it : input_info)
150     {
151         shared_ptr<runtime::ie::IETensor> tv =
152             static_pointer_cast<runtime::ie::IETensor>(inputs[i]);
153         infer_request.SetBlob(it.first,
154                               fill_blob(it.second->getTensorDesc().getDims(),
155                                         tv->get_data_ptr(),
156                                         tv->get_element_count(),
157                                         tv->get_element_type()));
158         i++;
159     }
160
161     //  Prepare output blobs
162     string output_name = m_network.getOutputsInfo().begin()->first;
163
164     infer_request.Infer();
165     InferenceEngine::Blob::Ptr output = infer_request.GetBlob(output_name);
166
167     InferenceEngine::MemoryBlob::Ptr moutput =
168         InferenceEngine::as<InferenceEngine::MemoryBlob>(output);
169     if (!moutput)
170     {
171         THROW_IE_EXCEPTION << "Cannot get output MemoryBlob in call_with_validate()";
172     }
173
174     auto lm = moutput->rmap();
175     uint8_t* output_ptr = lm.as<uint8_t*>();
176     outputs[0]->write(output_ptr, moutput->byteSize());
177     return true;
178 }