1 // Copyright (C) 2018 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
10 #include <CPP/detection_output.hpp> // todo: find a way to remove this
11 #include <description_buffer.hpp>
12 #include "cldnn_infer_request.h"
14 using namespace InferenceEngine;
16 namespace CLDNNPlugin {
18 const std::string CLDNNInferRequest::fp32_suffix = "_fp32";
20 Blob::Ptr CLDNNInferRequest::createInputBlob(const TensorDesc& desc, uint8_t* mem_ptr) {
21 const Layout l = desc.getLayout();
22 const Precision p = desc.getPrecision();
23 const SizeVector sz = SizeVector(desc.getDims().rbegin(), desc.getDims().rend());
27 if (mem_ptr != nullptr)
28 return make_shared_blob<float>(p, l, sz, reinterpret_cast<float*>(mem_ptr));
30 return make_shared_blob<float, const SizeVector>(p, l, sz);
32 if (mem_ptr != nullptr)
33 return make_shared_blob<uint16_t>(p, l, sz, reinterpret_cast<uint16_t*>(mem_ptr));
35 return make_shared_blob<uint16_t, const SizeVector>(p, l, sz);
37 if (mem_ptr != nullptr)
38 return make_shared_blob<int16_t>(p, l, sz, reinterpret_cast<int16_t*>(mem_ptr));
40 return make_shared_blob<int16_t, const SizeVector>(p, l, sz);
42 if (mem_ptr != nullptr)
43 return make_shared_blob<uint8_t>(p, l, sz, reinterpret_cast<uint8_t*>(mem_ptr));
45 return make_shared_blob<uint8_t, const SizeVector>(Precision::U8, l, sz);
47 THROW_IE_EXCEPTION << "The plugin does not support input " << p.name() << " precision";
51 Blob::Ptr CLDNNInferRequest::createOutputBlob(const TensorDesc& desc, uint8_t* mem_ptr) {
52 const Layout l = desc.getLayout();
53 const Precision p = desc.getPrecision();
54 const SizeVector sz = SizeVector(desc.getDims().rbegin(), desc.getDims().rend());
58 if (mem_ptr != nullptr)
59 return make_shared_blob<float>(p, l, sz, reinterpret_cast<float*>(mem_ptr));
61 return make_shared_blob<float, const SizeVector>(p, l, sz);
63 if (mem_ptr != nullptr)
64 return make_shared_blob<uint16_t>(p, l, sz, reinterpret_cast<uint16_t*>(mem_ptr));
66 return make_shared_blob<uint16_t, const SizeVector>(p, l, sz);
68 THROW_IE_EXCEPTION << "The plugin does not support output " << p.name() << " precision";
72 void CLDNNInferRequest::copyOutputData(const cldnn::memory& outputMemory,
75 size_t n = (bi == nullptr) ? bptr->size() : bi->buf_size;
76 size_t offset = (bi == nullptr) ? 0 : bi->buf_offset;
78 auto layout = outputMemory.get_layout();
79 auto size = layout.size;
80 auto l_padd = layout.data_padding.lower_size();
81 auto u_padd = layout.data_padding.upper_size();
83 auto h_padding = u_padd.spatial[0] + l_padd.spatial[0];
84 auto v_padding_l = (h_padding + size.spatial[0]) * u_padd.spatial[1];
85 auto v_padding_u = (h_padding + size.spatial[0]) * l_padd.spatial[1];
87 switch (bptr->precision()) {
88 case Precision::FP32: {
89 TBlob<float>::Ptr out_f = std::dynamic_pointer_cast<TBlob<float>>(bptr);
90 if (out_f == nullptr) {
91 THROW_IE_EXCEPTION << "Invalid output blob";
93 auto resPtr = outputMemory.pointer<float>();
94 float *resVec = out_f->data() + offset;
96 if (h_padding || v_padding_l || v_padding_u) {
98 for (size_t b = 0; b < size.batch[0]; b++) {
99 for (size_t f = 0; f < size.feature[0]; f++) {
101 for (size_t y = 0; y < size.spatial[1]; y++) {
102 i += l_padd.spatial[0];
103 for (size_t x = 0; x < size.spatial[0]; x++, i++) {
104 *resVec++ = resPtr[i];
106 i += u_padd.spatial[0];
112 for (size_t i = 0; i < n; i++) {
113 resVec[i] = resPtr[i];
118 case Precision::FP16: {
119 TBlob<uint16_t>::Ptr out_f = std::dynamic_pointer_cast<TBlob<uint16_t>>(bptr);
120 if (out_f == nullptr) {
121 THROW_IE_EXCEPTION << "Invalid output blob";
123 auto resPtr = outputMemory.pointer<uint16_t>();
124 uint16_t *resVec = out_f->data() + offset;
126 if (h_padding || v_padding_l || v_padding_u) {
128 for (size_t b = 0; b < size.batch[0]; b++) {
129 for (size_t f = 0; f < size.feature[0]; f++) {
131 for (size_t y = 0; y < size.spatial[1]; y++) {
132 i += l_padd.spatial[0];
133 for (size_t x = 0; x < size.spatial[0]; x++, i++) {
134 *resVec++ = resPtr[i];
136 i += u_padd.spatial[0];
142 for (size_t i = 0; i < n; i++) {
143 resVec[i] = resPtr[i];
149 THROW_IE_EXCEPTION << "The plugin does not support output " << bptr->precision() << " precision";
153 void CLDNNInferRequest::copyInputData(std::shared_ptr<cldnn::network> network,
154 const cldnn::primitive_id &inputName,
155 const cldnn::layout& inputLayout,
156 const Blob &inputBlob, buf_info* bi) {
157 size_t n = (bi == nullptr) ? inputBlob.size() : bi->buf_size;
158 size_t offset = (bi == nullptr) ? 0 : bi->buf_offset;
160 switch (inputBlob.precision()) {
161 case Precision::FP32: {
162 float* blob_ptr = const_cast<float*>(inputBlob.cbuffer().as<const float*>()) + offset;
163 network->set_input_data(inputName, cldnn::memory::attach(inputLayout, blob_ptr, n));
166 case Precision::FP16: {
167 uint16_t* blob_ptr = const_cast<uint16_t*>(inputBlob.cbuffer().as<const uint16_t*>()) + offset;
168 network->set_input_data(inputName, cldnn::memory::attach(inputLayout, blob_ptr, n));
171 case Precision::U8: {
172 uint8_t* blob_ptr = const_cast<uint8_t*>(inputBlob.cbuffer().as<const uint8_t*>()) + offset;
173 network->set_input_data(inputName, cldnn::memory::attach(inputLayout, blob_ptr, n));
177 THROW_IE_EXCEPTION << "The plugin does not support input " << inputBlob.precision() << " precision";
181 void CLDNNInferRequest::AllocateInputs() {
183 for (auto &input : m_env.inputLayouts) {
184 std::string name = input.first;
185 cldnn::layout layout = input.second;
187 InputInfo::Ptr ni = _networkInputs.at(input.first);
188 const TensorDesc& desc = ni->getTensorDesc();
190 cldnn::memory inputMem = cldnn::memory::allocate(*(m_env.engine), layout);
191 cldnn::pointer<uint8_t> mem_ptr = inputMem.pointer<uint8_t>();
193 inputsMemory.insert({ name, inputMem });
194 _inputs[name] = createInputBlob(desc, mem_ptr.data());
196 if (desc.getPrecision() == Precision::I16) {
197 cldnn::layout layout_fp32 = layout;
198 layout_fp32.data_type = cldnn::data_types::f32;
199 cldnn::memory inputMem_fp32 = cldnn::memory::allocate(*(m_env.engine), layout_fp32);
200 inputsMemory.insert({ input.first + fp32_suffix, inputMem_fp32 });
205 void CLDNNInferRequest::AllocateInputsDyn() {
207 for (auto &input : m_env.inputLayouts) {
208 InputInfo::Ptr ni = _networkInputs.at(input.first);
209 TensorDesc desc = ni->getTensorDesc();
210 SizeVector& dims = desc.getDims();
213 *dims.begin() = static_cast<size_t>(m_env.m_max_batch);
215 THROW_IE_EXCEPTION << "Empty dimensions for input blob " << input.first;
218 Blob::Ptr inputBlob = createInputBlob(desc);
219 if (desc.getPrecision() == Precision::I16) {
220 auto fp32inputBlob = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(Precision::FP32,
223 fp32inputBlob->allocate();
224 _inputs[input.first + fp32_suffix] = fp32inputBlob;
226 inputBlob->allocate();
227 _inputs[input.first] = inputBlob;
231 void CLDNNInferRequest::AllocateOutputs() {
232 auto networkOutputsIDs = m_env.network->get_output_ids();
233 auto allPrimitiveIds = m_env.network->get_all_primitives();
236 for (auto& no : _networkOutputs) {
237 // Find correct output ID. Start with name stored in IR.
238 std::string outputID = m_env.primitiveIDs.at(no.first);
239 while (std::find(networkOutputsIDs.begin(), networkOutputsIDs.end(), outputID) == networkOutputsIDs.end()) {
240 // If current ID isn't found in cldnn network outputs, get previous primitive id and try again.
241 auto prim = allPrimitiveIds.find(outputID);
242 if (prim == allPrimitiveIds.end()) {
243 THROW_IE_EXCEPTION << "Unknown primitive id " << outputID;
246 if (m_env.prevPrimitiveIDs.at(outputID).size() != 1 || prim->second != "_optimized_") {
247 THROW_IE_EXCEPTION << "Unable to find parent for output primitive " << outputID;
249 outputID = m_env.prevPrimitiveIDs.at(outputID)[0];
252 cldnn::memory output_mem = m_env.network->get_output_memory(outputID);
253 cldnn::pointer<uint8_t> output_mem_ptr = output_mem.pointer<uint8_t>();
254 if (output_mem_ptr.data() == nullptr) {
255 THROW_IE_EXCEPTION << "Empty output memory for primitive " << outputID;
258 DataPtr oi = no.second;
259 const TensorDesc& desc = oi->getTensorDesc();
261 _outputs[no.first] = createOutputBlob(desc, output_mem_ptr.data());
262 outputsMap[no.first] = outputID;
266 void CLDNNInferRequest::AllocateOutputsDyn() {
268 for (auto& no : _networkOutputs) {
269 DataPtr oi = no.second;
270 TensorDesc desc = oi->getTensorDesc();
271 SizeVector& dims = desc.getDims();
274 *dims.begin() = static_cast<size_t>(m_env.m_max_batch);
276 THROW_IE_EXCEPTION << "Empty dimensions for output blob " << no.first;
279 Blob::Ptr outputBlob = createOutputBlob(desc);
280 outputBlob->allocate();
281 _outputs[no.first] = outputBlob;
285 void CLDNNInferRequest::SetBatch(int new_batch) {
286 if (m_env.m_max_batch < 0)
287 THROW_IE_EXCEPTION << "Dynamic batch is not enabled.";
289 if (new_batch < 1 || new_batch > m_env.m_max_batch) {
290 THROW_IE_EXCEPTION << "Invalid dynamic batch size " << new_batch <<
291 " for this request.";
294 if (new_batch == m_curBatch)
298 batchOutputs.clear();
300 // tune expected inputs
301 for (auto &input : m_env.inputLayouts) {
302 cldnn::tensor dims = input.second.size;
303 const SizeVector sz = { size_t(dims.spatial[0]), size_t(dims.spatial[1]), size_t(dims.feature[0]), 1 };
304 size_t single_batch = std::accumulate(std::begin(sz), std::end(sz), (size_t)1, std::multiplies<size_t>());
305 std::vector<buf_info> in_buf;
308 size_t bsz = single_batch;
311 // calculate metadata for input buffers
312 for (unsigned nb = 0; nb < m_env.m_bv_sz; nb++) {
313 unsigned int mask = 1 << nb;
315 buf_info ib = { offset, bsz };
316 in_buf.push_back(ib);
318 if (new_batch & mask)
323 batchInputs[input.first] = in_buf;
326 // tune expected outputs
327 for (auto& no : _networkOutputs) {
328 auto res_output = m_env.outputDims.find(no.first);
330 InferenceEngine::SizeVector sz;
331 if (res_output != m_env.outputDims.end())
332 sz = res_output->second;
334 sz = m_env.outputDims.at(m_env.primitiveIDs.at(no.first));
337 size_t single_batch = std::accumulate(std::begin(sz), std::end(sz), (size_t)1, std::multiplies<size_t>());
338 std::vector<buf_info> out_buf;
341 size_t bsz = single_batch;
343 // calculate metadata for output buffers
344 for (unsigned nb = 0; nb < m_env.m_bv_sz; nb++) {
345 unsigned int mask = 1 << nb;
347 buf_info ob = { offset, bsz };
348 out_buf.push_back(ob);
350 if (new_batch & mask)
356 batchOutputs[no.first] = out_buf;
359 m_curBatch = new_batch;
362 CLDNNInferRequest::CLDNNInferRequest(InferenceEnv env, bool useProfiling,
363 InputsDataMap networkInputs, OutputsDataMap networkOutputs)
364 : InferRequestInternal(networkInputs, networkOutputs),
367 m_useProfiling(useProfiling) {
368 if (m_env.m_max_batch > 1) {
370 AllocateOutputsDyn();
376 // Fill implementations map
377 if (m_useProfiling) {
378 auto extractImplementationFromInfo = [](const std::string& info) -> std::string {
379 std::string def_implementation = "undef";
380 std::string impl_section = "implementation :";
381 std::string::size_type pos = info.find(impl_section);
382 if (pos == std::string::npos) {
383 return def_implementation;
386 std::string::size_type end_pos = info.find(',', pos);
387 if (end_pos == std::string::npos) {
388 return def_implementation;
391 std::string::size_type length = end_pos - pos - impl_section.size();
393 auto trim = [](const std::string& str) {
394 size_t first = str.find_first_not_of(' ');
395 if (std::string::npos == first) {
398 size_t last = str.find_last_not_of(' ');
399 return str.substr(first, (last - first + 1));
401 std::string tmp = trim(info.substr(pos + impl_section.size(), length));
403 return tmp.length() > 1 ? tmp : def_implementation;
406 // Parse primitive info and extract implementation name.
407 for (auto& id : m_env.profilingIDs) {
408 std::string prim_info = "";
410 prim_info = m_env.network->get_primitive_info(id);
411 } catch (std::exception& e) { }
413 implementationsMap.insert({id, extractImplementationFromInfo(prim_info)});
418 void CLDNNInferRequest::execAndParse() {
419 auto networkOutputs = m_env.network->execute();
421 // Collect outputs as requested by the model
422 for (auto& no : _networkOutputs) {
423 std::string outputID = outputsMap[no.first];
424 auto outputMemory = networkOutputs.at(outputID).get_memory();
425 Blob::Ptr bptr = _outputs[no.first];
427 auto out_ptr = outputMemory.pointer<uint8_t>();
428 auto blob_ptr = bptr->buffer().as<uint8_t*>();
430 // If Async API is used, copy of output blobs is not needed, unless SetBlob function was called.
431 // But in the case when old API is used we have to copy data to memory provided by user.
432 if (blob_ptr != &out_ptr[0]) {
433 copyOutputData(outputMemory, bptr);
437 // finally collect profiling info
438 if (m_useProfiling) {
439 std::map<cldnn::primitive_id, cldnn::event> executedPrimitives = m_env.network->get_executed_primitives();
440 auto allPrimitives = m_env.network->get_all_primitives();
442 // Get profiling info for all layers
443 for (auto &profiledID : m_env.profilingIDs) {
444 std::string impl = implementationsMap.at(profiledID);
445 impl.copy(m_env.perfMap[profiledID].exec_type, impl.length());
447 // Change status if layer wasn't executed by cldnn engine
448 if (executedPrimitives.find(profiledID) == executedPrimitives.end()) {
449 if (allPrimitives.find(profiledID) != allPrimitives.end() &&
450 allPrimitives.at(profiledID) == "_optimized_") {
451 // Layer was marked as optimized by cldnn
452 m_env.perfMap[profiledID].status = InferenceEngineProfileInfo::OPTIMIZED_OUT;
454 // Layer wasn't run for some reason
455 m_env.perfMap[profiledID].status = InferenceEngineProfileInfo::NOT_RUN;
457 m_env.perfMap[profiledID].cpu_uSec = m_env.perfMap[profiledID].realTime_uSec = 0;
461 auto event = executedPrimitives.at(profiledID);
462 executedPrimitives.erase(profiledID);
464 cldnn::instrumentation::profiling_info cldnnInfo{profiledID, event.get_profiling_info()};
467 for (auto &interval : cldnnInfo.intervals) {
468 using duration_t = std::chrono::duration<long long, std::chrono::microseconds::period>;
469 auto count = std::chrono::duration_cast<duration_t>(interval.value->value()).count();
471 if (interval.name == "submission") {
472 m_env.perfMap[profiledID].cpu_uSec = count;
473 } else if (interval.name == "executing") {
474 m_env.perfMap[profiledID].realTime_uSec = count;
475 } else if (interval.name == "duration") { // "duration" is used for CPU layers
476 m_env.perfMap[profiledID].cpu_uSec = count;
477 static const std::string cpuExecType("CPU");
478 memset(m_env.perfMap[profiledID].exec_type, 0, sizeof(m_env.perfMap[profiledID].exec_type));
479 cpuExecType.copy(m_env.perfMap[profiledID].exec_type,
480 cpuExecType.length()); // Override execType as CPU
487 void CLDNNInferRequest::execAndParseDyn() {
488 std::vector<std::map<cldnn::primitive_id, cldnn::network_output>> networkOutputs(m_env.m_bv_sz);
490 // set up exection and put all graphs into driver queue
491 for (unsigned nb = 0; nb < m_env.m_bv_sz; nb++) {
492 unsigned int mask = 1 << nb;
494 if (m_curBatch & mask) {
495 networkOutputs[nb] = m_env.batchNetworks[nb]->execute();
499 // now try to get execution results
500 for (unsigned nb = 0; nb < m_env.m_bv_sz; nb++) {
501 unsigned int mask = 1 << nb;
503 if (m_curBatch & mask) {
504 for (auto& no : _networkOutputs) {
505 std::string outputID = no.first;
506 while ((m_env.primitiveIDs.find(outputID) != m_env.primitiveIDs.end()) &&
507 (m_env.primitiveIDs.at(outputID) != outputID)) {
508 outputID = m_env.primitiveIDs.at(outputID);
511 auto outputMemory = networkOutputs[nb].at(outputID).get_memory();
512 Blob::Ptr bptr = _outputs[no.first];
514 copyOutputData(outputMemory, bptr, &batchOutputs[no.first][nb]);
520 void CLDNNInferRequest::InferImpl() {
521 IE_PROFILING_AUTO_SCOPE(CLDNN_INFER)
523 // execute input pre-processing.
524 execDataPreprocessing(_inputs);
526 for (auto &item : _inputs) {
527 if (m_env.m_max_batch > 1) {
528 PrepareInputDyn(item.first, *item.second);
530 PrepareInput(item.first, *item.second);
534 // The actual inference
535 if (m_env.m_max_batch > 1) {
542 void CLDNNInferRequest::GetPerformanceCounts(
543 std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
544 if (!m_useProfiling) {
545 THROW_IE_EXCEPTION << "Performance counters were not enabled";
547 perfMap = m_env.perfMap;
551 void CLDNNInferRequest::PrepareInput(const cldnn::primitive_id &inputName, const Blob &inputBlob) {
553 if (m_env.inputLayouts.find(inputName) == m_env.inputLayouts.end()) {
554 THROW_IE_EXCEPTION << "Input name mismatch.";
556 auto inputLayout = m_env.inputLayouts.at(inputName);
557 auto is_same_buffer = [](const Blob& blob, const cldnn::memory& memory) -> bool {
558 const std::string str_not_allocated("Input data was not allocated.");
559 cldnn::pointer<const uint8_t> ptr = memory.pointer<const uint8_t>();
560 const uint8_t* blob_ptr = blob.cbuffer().as<const uint8_t*>();
561 const uint8_t* mem_ptr = ptr.data();
562 if (blob_ptr == nullptr || mem_ptr == nullptr) {
563 THROW_IE_EXCEPTION << str_not_allocated;
565 return (blob_ptr == mem_ptr) && (blob.byteSize() == memory.size());
568 const cldnn::memory& memory = inputsMemory.at(inputName);
569 if (inputBlob.precision() == Precision::I16) {
570 // clDNN doesn't support I16 input precision, so we always have to convert input data to fp32 precision
571 const cldnn::memory& fp32_mem = inputsMemory.at(inputName+fp32_suffix);
572 cldnn::pointer<float> ptr = fp32_mem.pointer<float>();
573 InferenceEngine::copyToFloat<int16_t>(ptr.data(), &inputBlob);
574 m_env.network->set_input_data(inputName, fp32_mem);
575 } else if (is_same_buffer(inputBlob, memory)) {
576 // If input memory was allocated by cldnn engine and wasn't overwritten by user set_input_data method won't copy input data.
577 switch (inputBlob.precision()) {
578 case Precision::FP32:
579 case Precision::FP16:
580 case Precision::U8: {
581 m_env.network->set_input_data(inputName, memory);
585 THROW_IE_EXCEPTION << "Unsupported input precision " << inputBlob.precision();
588 // Otherwise, we have to attach to user memory and then copy the data.
589 copyInputData(m_env.network, inputName, inputLayout, inputBlob);
593 void CLDNNInferRequest::PrepareInputDyn(const cldnn::primitive_id &inputName, const Blob &inputBlob) {
594 // now try to get execution results
595 for (unsigned nb = 0; nb < m_env.m_bv_sz; nb++) {
596 unsigned int mask = 1 << nb;
598 if (m_curBatch & mask) {
599 auto inputLayout = m_env.inputLayouts.at(inputName);
600 inputLayout.size.batch[0] = mask;
601 copyInputData(m_env.batchNetworks[nb], inputName, inputLayout, inputBlob, &batchInputs[inputName][nb]);
606 }; // namespace CLDNNPlugin