1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
17 #include "ie_executable.hpp"
18 #include "ie_tensor.hpp"
19 #include "ngraph/op/get_output_element.hpp"
20 #include "ngraph/opsets/opset.hpp"
21 #include "ngraph/pass/manager.hpp"
22 #include "ngraph/shape.hpp"
23 #include "ngraph/type/element_type.hpp"
24 #include "pass/opset1_upgrade.hpp"
27 using namespace ngraph;
29 NGRAPH_SUPPRESS_DEPRECATED_START
33 InferenceEngine::Blob::Ptr fill_blob(InferenceEngine::SizeVector shape,
36 const element::Type& elem_type)
38 InferenceEngine::Layout layout;
41 case 0: layout = InferenceEngine::Layout::SCALAR; break;
42 case 1: layout = InferenceEngine::Layout::C; break;
43 case 2: layout = InferenceEngine::Layout::NC; break;
44 case 3: layout = InferenceEngine::Layout::CHW; break;
45 case 4: layout = InferenceEngine::Layout::NCHW; break;
46 case 5: layout = InferenceEngine::Layout::NCDHW; break;
47 case 6: layout = InferenceEngine::Layout::GOIDHW; break;
48 default: THROW_IE_EXCEPTION << "Can't convert dims " << shape.size() << " to Layout!";
51 InferenceEngine::MemoryBlob::Ptr blob;
53 #define MAKE_IE_TBLOB(type_, precision_, shape_, layout_) \
54 make_shared<InferenceEngine::TBlob<type_>>( \
55 InferenceEngine::TensorDesc{InferenceEngine::Precision::precision_, shape_, layout_})
59 case element::Type_t::f32: blob = MAKE_IE_TBLOB(float, FP32, shape, layout); break;
60 case element::Type_t::i16: blob = MAKE_IE_TBLOB(int16_t, I16, shape, layout); break;
61 case element::Type_t::u8: blob = MAKE_IE_TBLOB(uint8_t, U8, shape, layout); break;
62 case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break;
63 case element::Type_t::u16: blob = MAKE_IE_TBLOB(uint16_t, U16, shape, layout); break;
64 case element::Type_t::i32: blob = MAKE_IE_TBLOB(int32_t, I32, shape, layout); break;
65 case element::Type_t::u32: blob = MAKE_IE_TBLOB(uint32_t, U32, shape, layout); break;
66 case element::Type_t::i64: blob = MAKE_IE_TBLOB(int64_t, I64, shape, layout); break;
67 case element::Type_t::u64: blob = MAKE_IE_TBLOB(uint64_t, U64, shape, layout); break;
68 case element::Type_t::boolean: blob = MAKE_IE_TBLOB(uint8_t, BOOL, shape, layout); break;
69 default: THROW_IE_EXCEPTION << "Can't convert type " << elem_type << " to IE Precision!";
74 uint8_t* blob_ptr = blob->rwmap().as<uint8_t*>();
75 memcpy(blob_ptr, data, data_size * elem_type.size());
82 std::set<NodeTypeInfo> get_ie_ops()
84 std::set<NodeTypeInfo> ie_ops = get_opset1().get_type_info_set();
85 auto& opset2 = get_opset2().get_type_info_set();
86 ie_ops.insert(opset2.begin(), opset2.end());
87 auto& opset3 = get_opset3().get_type_info_set();
88 ie_ops.insert(opset3.begin(), opset3.end());
93 runtime::ie::IE_Executable::IE_Executable(shared_ptr<Function> func, string device)
96 static std::set<NodeTypeInfo> ie_ops = get_ie_ops();
98 passes.register_pass<pass::Opset1Upgrade>();
99 passes.run_passes(func);
101 for (const auto& node : func->get_ops())
103 if (ie_ops.find(node->get_type_info()) == ie_ops.end())
105 if (node->get_type_info() == op::GetOutputElement::type_info)
107 // IE currently can handle GetOutuputElement op;
112 cout << "UNSUPPORTED OP DETECTED: " << node->get_type_info().name << endl;
113 THROW_IE_EXCEPTION << "Detected op not belonging to opset1!";
118 #ifdef NGRAPH_DEBUG_ENABLE
119 cout << "Nodes in test: ";
120 for (const auto& node : func->get_ops())
122 cout << node << endl;
127 m_network = InferenceEngine::CNNNetwork(func);
128 set_parameters_and_results(*func);
131 bool runtime::ie::IE_Executable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
132 const vector<shared_ptr<runtime::Tensor>>& inputs)
134 InferenceEngine::Core ie;
136 // Loading model to the plugin (BACKEND_NAME)
137 InferenceEngine::ExecutableNetwork exe_network = ie.LoadNetwork(m_network, m_device);
138 // Create infer request
139 InferenceEngine::InferRequest infer_request = exe_network.CreateInferRequest();
140 // Prepare input and output blobs
141 InferenceEngine::InputsDataMap input_info = m_network.getInputsInfo();
143 if (input_info.size() != inputs.size())
145 THROW_IE_EXCEPTION << "Function inputs number differ from number of given inputs";
149 for (const auto& it : input_info)
151 shared_ptr<runtime::ie::IETensor> tv =
152 static_pointer_cast<runtime::ie::IETensor>(inputs[i]);
153 infer_request.SetBlob(it.first,
154 fill_blob(it.second->getTensorDesc().getDims(),
156 tv->get_element_count(),
157 tv->get_element_type()));
161 // Prepare output blobs
162 string output_name = m_network.getOutputsInfo().begin()->first;
164 infer_request.Infer();
165 InferenceEngine::Blob::Ptr output = infer_request.GetBlob(output_name);
167 InferenceEngine::MemoryBlob::Ptr moutput =
168 InferenceEngine::as<InferenceEngine::MemoryBlob>(output);
171 THROW_IE_EXCEPTION << "Cannot get output MemoryBlob in call_with_validate()";
174 auto lm = moutput->rmap();
175 uint8_t* output_ptr = lm.as<uint8_t*>();
176 outputs[0]->write(output_ptr, moutput->byteSize());