dnn(openvino): fix custom layers BlockingDesc
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Wed, 30 Nov 2022 01:26:09 +0000 (01:26 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 3 Dec 2022 01:34:10 +0000 (01:34 +0000)
modules/dnn/src/ie_ngraph.cpp

index 235fa7dcbb24849d545f5cefac47053159b5d5dd..aee3e294e5cf4689fb55167574fe2790d4a8677d 100644 (file)
@@ -204,14 +204,13 @@ public:
         std::vector<InferenceEngine::DataConfig> outDataConfig;
 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
         InferenceEngine::SizeVector order;
-        size_t offset = std::numeric_limits<size_t>::max();
         for (int i = 0; i < node->get_input_size(); ++i)
         {
             InferenceEngine::DataConfig conf;
             auto shape = node->input_value(i).get_shape();
             order.resize(shape.size());
             std::iota(order.begin(), order.end(), 0);
-            conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
+            conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order});
             inDataConfig.push_back(conf);
         }
 
@@ -221,7 +220,7 @@ public:
             auto shape = node->output(i).get_shape();
             order.resize(shape.size());
             std::iota(order.begin(), order.end(), 0);
-            conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
+            conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order});
             outDataConfig.push_back(conf);
         }
 #else