1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
9 #include <unordered_set>
12 #include <mkldnn_types.h>
13 #include "mkldnn_memory.h"
14 #include "mkldnn_node.h"
15 #include "mkldnn_extension_utils.h"
17 using namespace InferenceEngine;
18 using namespace mkldnn;
20 namespace MKLDNNPlugin {
22 MKLDNNMemory::MKLDNNMemory(const engine& eng) : eng(eng) {}
24 size_t MKLDNNMemory::GetSize() const {
25 uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(GetDataType()));
27 auto desc = GetDescriptor();
28 std::vector<int> dims(desc.data.layout_desc.blocking.padding_dims,
29 desc.data.layout_desc.blocking.padding_dims + desc.data.ndims);
30 return std::accumulate(std::begin(dims), std::end(dims), (size_t) 1, std::multiplies<size_t>()) * itemSize;
33 void MKLDNNMemory::Create(memory::dims dims, memory::data_type data_type, memory::format format, const void* data) {
34 if (!isConsistant(dims, format)) {
35 THROW_IE_EXCEPTION << "dims and format are inconsistent.";
38 if (format == memory::blocked) {
42 memory::desc desc = MKLDNNMemoryDesc({dims}, data_type, format);
44 if (format == memory::any) {
45 CreateBlockingDesc(desc);
51 void MKLDNNMemory::Create(const mkldnn::memory::desc& desc, const void *data) {
52 auto primitive_desc = memory::primitive_desc(desc, eng);
53 uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(desc.data.data_type));
55 if (data == nullptr) {
56 prim.reset(new memory(primitive_desc));
59 if (desc.data.format == mkldnn_wino_fmt)
61 if (prim->get_primitive_desc().desc().data.ndims > 0) {
62 real_size = static_cast<size_t>(prim->get_primitive_desc().desc().data.layout_desc.blocking.padding_dims[0]);
63 for (int i = 1; i < prim->get_primitive_desc().desc().data.ndims; i++) {
64 real_size *= prim->get_primitive_desc().desc().data.layout_desc.blocking.padding_dims[i];
67 uint8_t* dataPtr = static_cast<uint8_t*>(GetData());
68 dataPtr += itemSize * prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
70 memset(dataPtr, 0, real_size * itemSize);
72 // MKLDNN accepts not a const data, probably need to remove some level of consteness in a call stack
73 prim.reset(new memory(primitive_desc, const_cast<void*>(data)));
77 void MKLDNNMemory::SetData(memory::data_type dataType, memory::format format, const void* data, size_t size, bool ftz) const {
78 uint8_t itemSize = MKLDNNExtensionUtils::sizeOfDataType(mkldnn::memory::data_type(dataType));
80 if (static_cast<mkldnn_memory_format_t>(format) != GetDescriptor().data.format ||
81 GetDataType() != dataType) {
82 auto memData = GetDescriptor().data;
84 std::vector<ptrdiff_t> dims(memData.dims, memData.dims + memData.ndims);
86 auto dataType = GetDataType();
88 MKLDNNMemory src(eng);
89 src.Create(dims, dataType, format, data);
91 std::shared_ptr<mkldnn::reorder> pReorder =
92 std::shared_ptr<mkldnn::reorder>(new mkldnn::reorder(src.GetPrimitive(), GetPrimitive()));
94 mkldnn::stream(stream::kind::eager).submit({*pReorder});
96 uint8_t* dataPtr = static_cast<uint8_t*>(GetData());
97 // We cannot support strides for i/o blobs because it affects performance.
98 dataPtr += itemSize * prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
99 memcpy(dataPtr, data, size);
102 if (ftz && dataType == mkldnn_f32) {
103 // Internal blobs haven't strides yet.
104 auto *memData = static_cast<float *>(GetData());
105 memData += prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
106 size_t realSize = GetSize() / sizeof(float);
107 for (size_t i = 0; i < realSize; i++) {
108 if (memData[i] != 0 && (fabsf(memData[i]) < std::numeric_limits<float>::min())) {
115 void MKLDNNMemory::SetData(const MKLDNNMemory& memory, bool ftz) const {
116 mkldnn::reorder reorderPrim(memory.GetPrimitive(), GetPrimitive());
117 mkldnn::stream(stream::kind::eager).submit({reorderPrim});
119 if (ftz && memory.GetDataType() == mkldnn::memory::f32 && GetFormat() != mkldnn::memory::wino_fmt) {
120 // Internal blobs haven't strides yet.
121 auto *memData = static_cast<float *>(GetData());
122 memData += prim->get_primitive_desc().desc().data.layout_desc.blocking.offset_padding;
123 size_t realSize = GetSize() / sizeof(float);
124 for (size_t i = 0; i < realSize; i++) {
125 if (memData[i] != 0 && (fabsf(memData[i]) < std::numeric_limits<float>::min())) {
132 void MKLDNNMemory::FillZero() {
133 void* dataPtr = GetData();
134 memset(dataPtr, 0, GetSize());
137 bool MKLDNNMemory::isConsistant(memory::dims dims, memory::format format) {
138 using f = mkldnn::memory::format;
181 case f::OIdhw8i16o2i:
189 case f::gOIhw8i16o2i:
190 case f::gOIhw8o16i2o:
201 case f::gOIdhw16i16o:
202 case f::gOIdhw8i16o2i:
206 case f::gOIdhw16o16i:
208 case f::format_undef:
218 return (dims.size() == ndims);
221 bool MKLDNNMemory::IsPlainFormat(memory::format format) {
222 std::vector<memory::format> plains = {memory::nc, memory::nchw, memory::ncdhw, memory::nhwc, memory::ndhwc, memory::chwn,
223 memory::oi, memory::io, memory::oihw, memory::oidhw, memory::ihwo, memory::tnc,
227 for (auto it : plains) {
236 memory::format MKLDNNMemory::GetPlainFormat(memory::dims dims) {
237 switch (dims.size()) {
247 return memory::ncdhw;
249 return memory::blocked;
253 InferenceEngine::Layout MKLDNNMemory::GetPlainLayout(memory::dims dims) {
254 switch (dims.size()) {
255 case 0: return Layout::SCALAR;
256 case 1: return Layout::C;
257 case 2: return Layout::NC;
258 case 3: return Layout::CHW;
259 case 4: return Layout::NCHW;
261 return Layout::BLOCKED;
265 void MKLDNNMemory::CreateBlockingDesc(memory::desc &desc) {
266 auto dims = desc.data.dims;
267 int ndims = desc.data.ndims;
269 desc.data.format = mkldnn_blocked;
271 auto& blk = desc.data.layout_desc.blocking;
273 blk.offset_padding = 0;
275 for (int i = 0; i < ndims; i++) {
276 blk.block_dims[i] = 1;
277 blk.strides[1][i] = 1;
278 blk.padding_dims[i] = dims[i];
279 blk.offset_padding_to_data[i] = 0;
282 int perm[TENSOR_MAX_DIMS] = {0};
284 for (int i = 0; i < ndims; ++i) {
288 blk.strides[0][perm[ndims - 1]] = 1;
290 for (int d = 1; d < ndims; ++d) {
291 const int prev_idx = perm[ndims - d];
292 const int curr_idx = perm[ndims - 1 - d];
294 blk.strides[0][curr_idx] = dims[curr_idx] == 0 ? 1 : blk.strides[0][prev_idx] * (std::max)((ptrdiff_t)1, dims[prev_idx]);
297 memory::format MKLDNNMemory::Convert(const InferenceEngine::Layout layout) {
304 return memory::ncdhw;
306 return memory::ndhwc;
314 return memory::blocked;
318 std::string MKLDNNMemory::formatToString(memory::format fmt) {
320 case memory::format_undef: return "undef";
321 case memory::any: return "any";
322 case memory::blocked: return "blocked";
324 case memory::x: return "x";
326 case memory::nc: return "nc";
327 case memory::oi: return "oi";
328 case memory::io: return "io";
330 case memory::ntc: return "ntc";
331 case memory::tnc: return "tnc";
333 case memory::nchw: return "nchw";
334 case memory::nhwc: return "nhwc";
335 case memory::chwn: return "chwn";
336 case memory::nChw8c: return "nChw8c";
337 case memory::nChw16c: return "nChw16c";
339 case memory::ncdhw: return "ncdhw";
340 case memory::ndhwc: return "ndhwc";
341 case memory::nCdhw8c: return "nCdhw8c";
342 case memory::nCdhw16c: return "nCdhw16c";
344 case memory::oihw: return "oihw";
345 case memory::ihwo: return "ihwo";
346 case memory::OIhw8i8o: return "OIhw8i8o";
347 case memory::OIhw16i16o: return "OIhw16i16o";
348 case memory::OIhw8o8i: return "OIhw8o8i";
349 case memory::OIhw16o16i: return "OIhw16o16i";
350 case memory::OIhw8i16o2i: return "OIhw8i16o2i";
351 case memory::OIhw8o16i2o: return "OIhw8o16i2o";
352 case memory::Ohwi8o: return "Ohwi8o";
353 case memory::Ohwi16o: return "Ohwi16o";
354 case memory::OhIw16o4i: return "OhIw16o4i";
356 case memory::oidhw: return "oidhw";
357 case memory::OIdhw8i8o: return "OIdhw8i8o";
358 case memory::OIdhw16i16o: return "OIdhw16i16o";
359 case memory::OIdhw8o8i: return "OIdhw8o8i";
360 case memory::OIdhw16o16i: return "OIdhw16o16i";
361 case memory::OIdhw8i16o2i: return "OIdhw8i16o2i";
362 case memory::Odhwi8o: return "Odhwi8o";
363 case memory::Odhwi16o: return "Odhwi16o";
365 case memory::goihw: return "goihw";
366 case memory::hwigo: return "hwigo";
367 case memory::hwio: return "hwio";
368 case memory::gOIhw8i8o: return "gOIhw8i8o";
369 case memory::gOIhw16i16o: return "gOIhw16i16o";
370 case memory::gOIhw8i16o2i: return "gOIhw8i16o2i";
371 case memory::gOIhw8o16i2o: return "gOIhw8o16i2o";
372 case memory::gOhwi8o: return "gOhwi8o";
373 case memory::gOhwi16o: return "gOhwi16o";
374 case memory::gOIhw8o8i: return "gOIhw8o8i";
375 case memory::gOIhw16o16i: return "gOIhw16o16i";
376 case memory::gOhIw16o4i: return "gOhIw16o4i";
378 case memory::goidhw: return "goidhw";
379 case memory::gOIdhw8i8o: return "gOIdhw8i8o";
380 case memory::gOIdhw16i16o: return "gOIdhw16i16o";
381 case memory::gOIdhw8i16o2i: return "gOIdhw8i16o2i";
382 case memory::gOdhwi8o: return "gOdhwi8o";
383 case memory::gOdhwi16o: return "gOdhwi16o";
384 case memory::gOIdhw8o8i: return "gOIdhw8o8i";
385 case memory::gOIdhw16o16i: return "gOIdhw16o16i";
388 THROW_IE_EXCEPTION << "Unknown data format.";
393 bool MKLDNNMemoryDesc::operator==(const MKLDNNMemoryDesc &rhs) const {
394 auto dims_equal = [] (mkldnn_memory_desc_t ldata, mkldnn_memory_desc_t rdata) {
395 if (ldata.ndims != rdata.ndims)
397 for (int i = 0; i < ldata.ndims; i++) {
398 if (ldata.dims[i] != rdata.dims[i])
403 auto blocking_equal = [] (mkldnn_memory_desc_t ldata, mkldnn_memory_desc_t rdata) {
404 if (ldata.ndims != rdata.ndims)
406 mkldnn_blocking_desc_t lblock = ldata.layout_desc.blocking;
407 mkldnn_blocking_desc_t rblock = rdata.layout_desc.blocking;
408 if (lblock.offset_padding != rblock.offset_padding)
410 for (int i = 0; i < ldata.ndims; i++) {
411 if (lblock.block_dims[i] != rblock.block_dims[i] ||
412 lblock.offset_padding_to_data[i] != rblock.offset_padding_to_data[i] ||
413 lblock.padding_dims[i] != rblock.padding_dims[i] || lblock.strides[0][i] != rblock.strides[0][i] ||
414 lblock.strides[1][i] != rblock.strides[1][i])
419 return dims_equal(this->desc.data, rhs.desc.data) &&
420 this->desc.data.data_type == rhs.desc.data.data_type &&
421 this->desc.data.format == rhs.desc.data.format &&
422 this->desc.data.primitive_kind == rhs.desc.data.primitive_kind &&
423 blocking_equal(this->desc.data, rhs.desc.data);
426 bool MKLDNNMemoryDesc::operator!=(const MKLDNNMemoryDesc &rhs) const {
427 return !(*this == rhs);
430 MKLDNNMemoryDesc::operator mkldnn::memory::desc() const {
434 MKLDNNMemoryDesc::MKLDNNMemoryDesc(mkldnn::memory::dims dims, mkldnn::memory::data_type dataType,
435 mkldnn::memory::format format): desc(dims, dataType, mkldnn::memory::any) {
436 if (format != memory::blocked) {
437 desc = mkldnn::memory::desc(dims, dataType, format);
440 MKLDNNMemory::CreateBlockingDesc(desc);
443 MKLDNNMemoryDesc::operator InferenceEngine::TensorDesc() const {
445 switch (desc.data.data_type) {
447 precision = Precision::FP32;
450 precision = Precision::U8;
453 precision = Precision::I8;
456 precision = Precision::I16;
459 precision = Precision::I32;
462 precision = Precision::BIN;
465 THROW_IE_EXCEPTION << "Cannot cast to TensorDesc. Unsupported precision!";
470 auto blkInfo = desc.data.layout_desc.blocking;
471 auto offset = static_cast<size_t>(blkInfo.offset_padding);
472 SizeVector offsetsForDims;
473 SizeVector dims = getDims().ToSizeVector();
474 switch (getFormat()) {
475 case memory::format_undef:
476 THROW_IE_EXCEPTION << "Cannot cast to tensor desc. Format is undefined!";
478 layout = Layout::ANY;
479 return TensorDesc(precision, dims, layout);
492 layout = Layout::CHW;
497 layout = Layout::CHW;
499 blkDims = {static_cast<size_t>(dims[1]),
500 static_cast<size_t>(dims[0]),
501 static_cast<size_t>(dims[2])};
505 layout = Layout::NCHW;
506 order = {0, 1, 2, 3};
510 layout = Layout::NCDHW;
511 order = {0, 1, 2, 3, 4};
515 layout = Layout::NHWC;
516 order = {0, 2, 3, 1};
517 if (precision == Precision::BIN) {
518 blkDims = {static_cast<size_t>(dims[0]),
519 static_cast<size_t>(dims[2]),
520 static_cast<size_t>(dims[3]),
521 static_cast<size_t>(rnd_up(dims[1], 8))};
523 blkDims = {static_cast<size_t>(dims[0]),
524 static_cast<size_t>(dims[2]),
525 static_cast<size_t>(dims[3]),
526 static_cast<size_t>(dims[1])};
530 layout = Layout::NDHWC;
531 order = {0, 2, 3, 4, 1};
532 blkDims = {static_cast<size_t>(dims[0]),
533 static_cast<size_t>(dims[2]),
534 static_cast<size_t>(dims[3]),
535 static_cast<size_t>(dims[4]),
536 static_cast<size_t>(dims[1])};
540 order = {0, 1, 2, 3, 1};
542 blkDims[1] = blkDims[1] / 8 + (blkDims[1] % 8 ? 1 : 0);
543 blkDims.push_back(8);
544 layout = Layout::BLOCKED;
546 case memory::nCdhw8c:
547 order = {0, 1, 2, 3, 4, 1};
549 blkDims[1] = blkDims[1] / 8 + (blkDims[1] % 8 ? 1 : 0);
550 blkDims.push_back(8);
551 layout = Layout::BLOCKED;
553 case memory::nChw16c:
554 order = {0, 1, 2, 3, 1};
556 blkDims[1] = blkDims[1] / 16 + (blkDims[1] % 16 ? 1 : 0);
557 blkDims.push_back(16);
558 layout = Layout::BLOCKED;
560 case memory::nCdhw16c:
561 order = {0, 1, 2, 3, 4, 1};
563 blkDims[1] = blkDims[1] / 16 + (blkDims[1] % 16 ? 1 : 0);
564 blkDims.push_back(16);
565 layout = Layout::BLOCKED;
567 case memory::blocked:
570 for (size_t i = 0; i < blkDims.size(); i++) {
572 if ((i && blkInfo.strides[0][i - 1] < blkInfo.strides[0][i]) || blkInfo.block_dims[i] != 1) {
573 THROW_IE_EXCEPTION << "Cannot cast to tensor desc."
574 << " Unsupported blocked format.";
577 if (order.size() == 3 && order[0] == 0 && order[1] == 1 && order[2] == 2)
578 layout = Layout::CHW;
580 layout = Layout::BLOCKED;
583 THROW_IE_EXCEPTION << "Cannot cast to tensor desc. Format is unsupported!";
586 SizeVector strides(blkDims.size());
588 if (layout == Layout::NHWC || layout == Layout::NDHWC || layout == Layout::CHW) {
589 for (size_t i = 0; i < order.size(); i++) {
590 strides[i] = static_cast<size_t>(blkInfo.strides[0][order[i]]);
593 strides[blkDims.size() - 1] = 1;
594 for (size_t i = 2; i <= order.size(); i++) {
595 if (blkDims.size() - i < dims.size()) {
596 strides[blkDims.size() - i] = static_cast<size_t>(blkInfo.strides[0][order[blkDims.size() - i]]);
598 strides[blkDims.size() - i] = strides[blkDims.size() - i + 1] * blkDims[blkDims.size() - i + 1];
603 for (size_t i = 0; i < blkDims.size() && i < TENSOR_MAX_DIMS; i++) {
605 offsetsForDims.push_back(blkInfo.offset_padding_to_data[i]);
607 offsetsForDims.push_back(0);
610 TensorDesc tensorDesc(precision, dims, {blkDims, order, offset, offsetsForDims, strides});
612 tensorDesc.setLayout(layout);
616 MKLDNNMemoryDesc::MKLDNNMemoryDesc(const TensorDesc& tDesc):
617 desc({}, mkldnn::memory::data_type::f32, mkldnn::memory::format::format_undef) {
618 mkldnn::memory::data_type data_type;
619 switch (tDesc.getPrecision()) {
620 case Precision::FP32:
621 data_type = mkldnn::memory::data_type::f32;
624 data_type = mkldnn::memory::data_type::u8;
627 data_type = mkldnn::memory::data_type::s8;
630 data_type = mkldnn::memory::data_type::s16;
633 data_type = mkldnn::memory::data_type::s32;
636 data_type = mkldnn::memory::data_type::bin;
639 THROW_IE_EXCEPTION << "Cannot create MKLDNNMemoryDesc from TensorDesc. Unsupported precision!";
642 mkldnn::memory::format mkldnnFormat = memory::format::format_undef;
643 SizeVector blkdDims = tDesc.getBlockingDesc().getBlockDims();
644 SizeVector order = tDesc.getBlockingDesc().getOrder();
645 SizeVector offsetsToData = tDesc.getBlockingDesc().getOffsetPaddingToData();
646 SizeVector strides = tDesc.getBlockingDesc().getStrides();
647 auto realDims = MKLDNNDims(tDesc.getDims());
648 switch (tDesc.getLayout()) {
650 mkldnnFormat = memory::format::any;
653 mkldnnFormat = memory::format::nchw;
656 mkldnnFormat = memory::format::ncdhw;
659 mkldnnFormat = memory::format::nhwc;
662 mkldnnFormat = memory::format::ndhwc;
665 mkldnnFormat = memory::format::oihw;
669 mkldnnFormat = memory::format::x;
672 if (order == SizeVector{0, 1, 2})
673 mkldnnFormat = memory::format::tnc;
674 else if (order == SizeVector{1, 0, 2})
675 mkldnnFormat = memory::format::ntc;
677 mkldnnFormat = memory::format::blocked;
681 mkldnnFormat = memory::format::nc;
684 mkldnnFormat = memory::format::blocked;
685 if (realDims.ndims() == 1) {
686 mkldnnFormat = memory::format::x;
687 } else if (realDims.ndims() == 2) {
688 mkldnnFormat = memory::format::nc;
689 } else if (realDims.ndims() == 4) {
690 if (order.size() == 5 && order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3 && order[4] == 1) {
691 if (blkdDims[4] == 8) {
692 mkldnnFormat = memory::format::nChw8c;
693 } else if (blkdDims[4] == 16) {
694 mkldnnFormat = memory::format::nChw16c;
696 } else if (order.size() == 4) {
697 if (order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3) {
698 mkldnnFormat = memory::format::nchw;
699 } else if (order[0] == 0 && order[1] == 2 && order[2] == 3 && order[3] == 1) {
700 mkldnnFormat = memory::format::nhwc;
703 } else if (realDims.ndims() == 5) {
704 if (order.size() == 6 &&
705 order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3 && order[4] == 4 && order[5] == 1) {
706 if (blkdDims[5] == 8) {
707 mkldnnFormat = memory::format::nCdhw8c;
708 } else if (blkdDims[5] == 16) {
709 mkldnnFormat = memory::format::nCdhw16c;
711 } else if (order.size() == 5) {
712 if (order[0] == 0 && order[1] == 1 && order[2] == 2 && order[3] == 3 && order[4] == 4) {
713 mkldnnFormat = memory::format::ncdhw;
714 } else if (order[0] == 0 && order[1] == 2 && order[2] == 3 && order[3] == 4 && order[4] == 1) {
715 mkldnnFormat = memory::format::ndhwc;
721 mkldnnFormat = memory::format::blocked;
724 if (mkldnnFormat == memory::format_undef)
725 THROW_IE_EXCEPTION << "Cannot detect the right memory format!";
727 bool notDefault = false;
728 size_t currentStride = 1;
729 for (size_t i = 0; i < order.size(); i++) {
730 if (offsetsToData[i] != 0) {
734 if (strides[strides.size() - (1 +i)] != currentStride) {
738 currentStride *= blkdDims[blkdDims.size() - (1 + i)];
741 bool blocked = false;
742 std::unordered_set<size_t> exist_order;
743 for (auto& ord : order) {
744 if (exist_order.find(ord) != exist_order.end()) {
748 exist_order.insert(ord);
751 if (notDefault && mkldnnFormat == memory::blocked && blocked)
752 THROW_IE_EXCEPTION << "Currently MKLDNNPlugin supports only packaged memory for unknown blocked format";
754 if (mkldnnFormat == memory::blocked) {
755 desc = MKLDNNMemoryDesc(realDims, data_type, memory::any);
756 desc.data.format = mkldnn_blocked;
758 auto& blk = desc.data.layout_desc.blocking;
760 blk.offset_padding = tDesc.getBlockingDesc().getOffsetPadding();
762 for (size_t i = 0; i < realDims.ndims(); i++) {
763 blk.block_dims[i] = 1;
764 blk.strides[1][i] = 1;
765 blk.padding_dims[i] = realDims[i];
766 blk.offset_padding_to_data[i] = offsetsToData[i];
769 int perm[TENSOR_MAX_DIMS] = {0};
771 for (size_t i = 0; i < realDims.ndims(); ++i) {
775 blk.strides[0][perm[realDims.ndims() - 1]] = 1;
777 for (int d = 1; d < realDims.ndims(); ++d) {
778 const int prev_idx = perm[realDims.ndims() - d];
779 const int curr_idx = perm[realDims.ndims() - 1 - d];
781 blk.strides[0][curr_idx] = realDims[curr_idx] == 0 ? 1 : blk.strides[0][prev_idx] * (std::max)((ptrdiff_t)1, realDims[prev_idx]);
784 desc = MKLDNNMemoryDesc(realDims, data_type, mkldnnFormat);
787 desc.data.layout_desc.blocking.offset_padding = tDesc.getBlockingDesc().getOffsetPadding();
788 for (size_t i = 0; i < tDesc.getBlockingDesc().getOffsetPaddingToData().size() && i < TENSOR_MAX_DIMS; i++) {
789 desc.data.layout_desc.blocking.offset_padding_to_data[i] = static_cast<ptrdiff_t>(offsetsToData[i]);
793 for (size_t i = 0; i < strides.size() && i < desc.data.ndims; i++) {
794 desc.data.layout_desc.blocking.strides[0][i] = static_cast<ptrdiff_t>(strides[order[i]]);
799 bool MKLDNNMemoryDesc::blocksExtended() const {
800 for (int i = 0; i < desc.data.ndims; i++) {
801 if (desc.data.dims[i] != desc.data.layout_desc.blocking.padding_dims[i])
807 } // namespace MKLDNNPlugin