1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "test_graph.hpp"
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
17 using namespace ::testing;
19 using namespace mkldnn;
22 struct gather_test_params {
23 std::string inIdxPrecision;
24 InferenceEngine::SizeVector inIdx;
25 InferenceEngine::SizeVector inDict;
27 InferenceEngine::SizeVector out;
32 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
35 template <typename data_t>
36 void ref_gather(InferenceEngine::TBlob<data_t> &srcIdx, InferenceEngine::TBlob<float> &srcDct, InferenceEngine::TBlob<float> &dst, size_t axis) {
38 const data_t *src_dataIdx = srcIdx.data();
39 float* src_dataDict = srcDct.data();
40 float *dst_data = dst.data();
41 size_t src_size = srcIdx.size();
43 std::vector<size_t> dims = srcDct.getTensorDesc().getDims();
44 std::vector<size_t> dims_actual;
46 // Remove redundant dimensions
47 for (size_t i = 0; i < dims.size(); i++) {
49 for (size_t j = i; j < dims.size(); j++)
50 dims_actual.push_back(dims[j]);
55 // Find number of dictionaries, index range and data length
56 size_t numDictionaries = 1;
57 for (i = 0; i < axis; i++)
58 numDictionaries *= dims_actual[i];
59 size_t indexRange = dims_actual[axis];
60 size_t dataLength = 1;
61 for (i = axis + 1; i < dims_actual.size(); i++)
62 dataLength *= dims_actual[i];
64 // The gathering process
65 for (i = 0; i < src_size; i++) {
66 unsigned int idx = static_cast<unsigned int>(src_dataIdx[i]);
71 // Copying data to destination from Dictionary
72 for (j = 0; j < numDictionaries; j++) {
73 memcpy(&dst_data[dataLength * (i + j * src_size)],
74 &src_dataDict[dataLength * (idx + j * indexRange)], sizeof(float) * dataLength);
77 for (j = 0; j < numDictionaries; j++) {
78 std::fill_n(&dst_data[dataLength * (i + j * src_size)], dataLength, 0.0f);
84 class MKLDNNCPUExtGatherTests: public TestsCommon, public WithParamInterface<gather_test_params> {
85 std::string model_t = R"V0G0N(
86 <net Name="Gather_net" version="2" precision="FP32" batch="1">
88 <layer name="InputText" type="Input" precision="_IIDXP_" id="1">
95 <layer name="InputDictionary" type="Input" precision="FP32" id="2">
102 <layer name="gather" id="3" type="Gather" precision="FP32">
120 <edge from-layer="1" from-port="1" to-layer="3" to-port="2"/>
121 <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
126 std::string getModel(gather_test_params p) {
127 std::string model = model_t;
132 for (auto& idx : p.inIdx) {
134 inIdx += std::to_string(idx) + "</dim>\n";
137 for (auto& dct : p.inDict) {
139 inDict += std::to_string(dct) + "</dim>\n";
142 for (auto& dst : p.out) {
144 out += std::to_string(dst) + "</dim>\n";
147 REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
148 REPLACE_WITH_STR(model, "_IIDX_", inIdx);
149 REPLACE_WITH_STR(model, "_IDICT_", inDict);
150 REPLACE_WITH_NUM(model, "_AX_", p.axis);
151 REPLACE_WITH_STR(model, "_OUT_", out);
156 template <typename data_t>
157 static void fill_data_dbgval(data_t *data, size_t size) {
158 for (size_t i = 0; i < size; i++) {
159 data[i] = static_cast<data_t>(i & (sizeof(data_t) * 8 - 1));
163 virtual void TearDown() {
166 virtual void SetUp() {
168 TestsCommon::SetUp();
169 gather_test_params p = ::testing::WithParamInterface<gather_test_params>::GetParam();
170 std::string model = getModel(p);
172 InferenceEngine::CNNNetReader net_reader;
173 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
175 InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
176 MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
177 extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
179 MKLDNNGraphTestClass graph;
180 graph.CreateGraph(net_reader.getNetwork(), extMgr);
182 auto& nodes = graph.getNodes();
183 nodes = graph.getNodes();
185 for (auto &node : nodes) {
186 if (node->getName() == "gather") {
187 ASSERT_EQ(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
188 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
189 p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
191 ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
192 ASSERT_EQ(p.selectedType,
193 node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType);
196 ASSERT_EQ(4, nodes.size());
199 InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.inDict, InferenceEngine::TensorDesc::getLayoutByDims(p.inDict) });
201 fill_data(srcDict->buffer(), srcDict->size());
202 auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
203 if (srcDictPtr == nullptr)
204 FAIL() << "Cannot cast blob to TBlob<float>.";
207 InferenceEngine::OutputsDataMap out;
208 out = net_reader.getNetwork().getOutputsInfo();
209 InferenceEngine::BlobMap outputBlobs;
211 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
213 InferenceEngine::TBlob<float>::Ptr output;
214 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
216 outputBlobs[item.first] = output;
219 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
223 InferenceEngine::Blob::Ptr srcIdx;
224 if (p.inIdxPrecision == "I32") {
225 srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
227 fill_data_dbgval(static_cast<int32_t*>(srcIdx->buffer()), srcIdx->size());
228 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
229 if (srcIdxPtr == nullptr)
230 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
233 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
235 else if (p.inIdxPrecision == "FP32") {
236 srcIdx = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
238 fill_data(srcIdx->buffer(), srcIdx->size());
239 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcIdx.get());
240 if (srcIdxPtr == nullptr)
241 FAIL() << "Cannot cast blob to TBlob<float>.";
244 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
246 else if (p.inIdxPrecision == "U16") {
247 srcIdx = InferenceEngine::make_shared_blob<uint16_t>({ InferenceEngine::Precision::U16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
249 fill_data_dbgval(static_cast<uint16_t*>(srcIdx->buffer()), srcIdx->size());
250 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<uint16_t>*>(srcIdx.get());
251 if (srcIdxPtr == nullptr)
252 FAIL() << "Cannot cast blob to TBlob<uint16_t>.";
255 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
257 else if (p.inIdxPrecision == "I16") {
258 srcIdx = InferenceEngine::make_shared_blob<int16_t>({ InferenceEngine::Precision::I16, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
260 fill_data_dbgval(static_cast<int16_t*>(srcIdx->buffer()), srcIdx->size());
261 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int16_t>*>(srcIdx.get());
262 if (srcIdxPtr == nullptr)
263 FAIL() << "Cannot cast blob to TBlob<int16_t>.";
266 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
268 else if (p.inIdxPrecision == "U8") {
269 srcIdx = InferenceEngine::make_shared_blob<uint8_t>({ InferenceEngine::Precision::U8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
271 fill_data_dbgval(static_cast<uint8_t*>(srcIdx->buffer()), srcIdx->size());
272 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<uint8_t>*>(srcIdx.get());
273 if (srcIdxPtr == nullptr)
274 FAIL() << "Cannot cast blob to TBlob<uint8_t>.";
277 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
279 else if (p.inIdxPrecision == "I8") {
280 srcIdx = InferenceEngine::make_shared_blob<int8_t>({ InferenceEngine::Precision::I8, p.inIdx, InferenceEngine::TensorDesc::getLayoutByDims(p.inIdx) });
282 fill_data_dbgval(static_cast<int8_t*>(srcIdx->buffer()), srcIdx->size());
283 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int8_t>*>(srcIdx.get());
284 if (srcIdxPtr == nullptr)
285 FAIL() << "Cannot cast blob to TBlob<int8_t>.";
288 ref_gather(*srcIdxPtr, *srcDictPtr, dst_ref, p.axis);
294 InferenceEngine::BlobMap srcs;
295 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
296 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
299 graph.Infer(srcs, outputBlobs);
300 compare(*output, dst_ref);
301 } catch (const InferenceEngine::details::InferenceEngineException &e) {
307 TEST_P(MKLDNNCPUExtGatherTests, TestsGather) {}
309 INSTANTIATE_TEST_CASE_P(
310 TestsGather, MKLDNNCPUExtGatherTests,
312 gather_test_params{ "FP32", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
313 gather_test_params{ "I32", {1, 1, 12, 256}, {1, 1, 71, 16}, 0, {1, 12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
314 gather_test_params{ "I32", {12, 256}, {71, 16}, 0, {12, 256, 16}, 1, MKLDNNPlugin::impl_desc_type::unknown },
315 gather_test_params{ "I32", {3, 4}, {2, 5, 6}, 0, {3, 4, 5, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown },
316 gather_test_params{ "I32", {3, 4}, {5, 1}, 0, {3, 4, 1}, 1, MKLDNNPlugin::impl_desc_type::unknown },
317 gather_test_params{ "FP32", {1, 1, 12, 256}, {1, 1, 71, 16}, 1, {1, 71, 12, 256}, 1, MKLDNNPlugin::impl_desc_type::unknown },
318 gather_test_params{ "I32", {1, 1, 3, 4}, {1, 2, 5, 6}, 1, {2, 3, 4, 6}, 1, MKLDNNPlugin::impl_desc_type::unknown },
319 gather_test_params{ "I32", {1, 1, 3, 4}, {1, 2, 5, 6}, 2, {2, 5, 3, 4}, 1, MKLDNNPlugin::impl_desc_type::unknown },
320 gather_test_params{ "I32", {12, 4, 9, 8}, {6, 13, 10, 3}, 1, {6, 12, 4, 9, 8, 10, 3}, 1, MKLDNNPlugin::impl_desc_type::unknown }
326 struct gatherTF_test_params {
327 InferenceEngine::SizeVector in_dim;
328 std::vector<int32_t> in;
330 InferenceEngine::SizeVector dct_dim;
331 std::vector<float> dct;
335 InferenceEngine::SizeVector ref_dim;
336 std::vector<float> ref;
338 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
341 class MKLDNNCPUExtGatherTFTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
342 std::string model_t = R"V0G0N(
343 <net Name="Gather_net" version="2" precision="FP32" batch="1">
345 <layer name="InputText" type="Input" precision="I32" id="1">
352 <layer name="InputDictionary" type="Input" precision="FP32" id="2">
359 <layer name="gather" id="3" type="Gather" precision="FP32">
377 <edge from-layer="1" from-port="1" to-layer="3" to-port="2"/>
378 <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
383 std::string getModel(gatherTF_test_params p) {
384 std::string model = model_t;
389 for (auto& idx : p.in_dim) {
391 inIdx += std::to_string(idx) + "</dim>\n";
394 for (auto& dct : p.dct_dim) {
396 inDict += std::to_string(dct) + "</dim>\n";
399 for (auto& dst : p.ref_dim) {
401 out += std::to_string(dst) + "</dim>\n";
404 REPLACE_WITH_STR(model, "_IIDX_", inIdx);
405 REPLACE_WITH_STR(model, "_IDICT_", inDict);
406 REPLACE_WITH_NUM(model, "_AX_", p.axis);
407 REPLACE_WITH_STR(model, "_OUT_", out);
413 virtual void TearDown() {
416 virtual void SetUp() {
418 TestsCommon::SetUp();
419 gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
420 std::string model = getModel(p);
422 InferenceEngine::CNNNetReader net_reader;
423 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
425 InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
426 MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
427 extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
429 MKLDNNGraphTestClass graph;
430 graph.CreateGraph(net_reader.getNetwork(), extMgr);
433 InferenceEngine::Blob::Ptr srcIdx;
434 srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.in_dim) });
436 memcpy(static_cast<int32_t*>(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*p.in.size());
437 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
438 if (srcIdxPtr == nullptr)
439 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
442 InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) });
444 memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size());
445 auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
446 if (srcDictPtr == nullptr)
447 FAIL() << "Cannot cast blob to TBlob<float>.";
450 InferenceEngine::OutputsDataMap out;
451 out = net_reader.getNetwork().getOutputsInfo();
452 InferenceEngine::BlobMap outputBlobs;
453 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
454 InferenceEngine::TBlob<float>::Ptr output;
455 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
457 outputBlobs[item.first] = output;
460 InferenceEngine::BlobMap srcs;
461 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
462 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
463 graph.Infer(srcs, outputBlobs);
466 if (memcmp((*output).data(), &p.ref[0], p.ref.size()) != 0)
467 FAIL() << "Wrong result with compare TF reference!";
468 } catch (const InferenceEngine::details::InferenceEngineException &e) {
474 TEST_P(MKLDNNCPUExtGatherTFTests, TestsGather) {}
477 std::vector<int32_t> in0 = { 0, 1, 1, 0 };
478 std::vector<int32_t> in1 = { 0, 1, 2, 1 };
479 std::vector<float> dict = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f };
480 std::vector<float> ref_in0_a0_d223 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }; // 2x2x2x3
481 std::vector<float> ref_in0_a2_d232 = { 1.f, 2.f, 2.f, 1.f, 3.f, 4.f, 4.f, 3.f, 5.f, 6.f, 6.f, 5.f, 7.f, 8.f, 8.f, 7.f, 9.f, 10.f, 10.f, 9.f, 11.f, 12.f, 12.f, 11.f }; // 2x3x2x2
482 std::vector<float> ref_in1_a0_d322 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 5.f, 6.f, 7.f, 8.f }; // 2x2x2x2
483 std::vector<float> ref_in1_a1_d232 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 3.f, 4.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f, 9.f, 10.f }; // 2x2x2x2
484 std::vector<float> ref_in1_a2_d223 = { 1.f, 2.f, 3.f, 2.f, 4.f, 5.f, 6.f, 5.f, 7.f, 8.f, 9.f, 8.f, 10.f, 11.f, 12.f, 11.f }; // 2x2x2x2
486 INSTANTIATE_TEST_CASE_P(
487 TestsGather, MKLDNNCPUExtGatherTFTests,
489 gatherTF_test_params{ { 2, 2 }, in0,{ 2, 2, 3 }, dict, 0, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
490 gatherTF_test_params{ { 2, 2 }, in0,{ 2, 2, 3 }, dict,-3, { 2, 2, 2, 3 }, ref_in0_a0_d223 },
491 gatherTF_test_params{ { 2, 2 }, in0,{ 2, 3, 2 }, dict, 2, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
492 gatherTF_test_params{ { 2, 2 }, in0,{ 2, 3, 2 }, dict,-1, { 2, 3, 2, 2 }, ref_in0_a2_d232 },
493 gatherTF_test_params{ { 2, 2 }, in1,{ 3, 2, 2 }, dict, 0, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
494 gatherTF_test_params{ { 2, 2 }, in1,{ 3, 2, 2 }, dict,-3, { 2, 2, 2, 2 }, ref_in1_a0_d322 },
495 gatherTF_test_params{ { 2, 2 }, in1,{ 2, 3, 2 }, dict, 1, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
496 gatherTF_test_params{ { 2, 2 }, in1,{ 2, 3, 2 }, dict,-2, { 2, 2, 2, 2 }, ref_in1_a1_d232 },
497 gatherTF_test_params{ { 2, 2 }, in1,{ 2, 2, 3 }, dict, 2, { 2, 2, 2, 2 }, ref_in1_a2_d223 },
498 gatherTF_test_params{ { 2, 2 }, in1,{ 2, 2, 3 }, dict,-1, { 2, 2, 2, 2 }, ref_in1_a2_d223 }));
501 class MKLDNNCPUExtGatherHolesTests : public TestsCommon, public WithParamInterface<gatherTF_test_params> {
502 std::string model_t = R"V0G0N(
503 <net Name="Gather_net" version="2" precision="FP32" batch="1">
505 <layer name="InputText" type="Input" precision="I32" id="1">
513 <layer name="InputDictionary" type="Input" precision="FP32" id="2">
522 <layer name="Input3" type="Input" precision="FP32" id="3">
532 <layer name="gather" id="4" type="Gather" precision="FP32">
554 <layer name="con" id="5" type="Concat" precision="FP32">
555 <concat_data axis="1"/>
581 <edge from-layer="1" from-port="1" to-layer="4" to-port="2"/>
582 <edge from-layer="2" from-port="2" to-layer="4" to-port="1"/>
583 <edge from-layer="4" from-port="3" to-layer="5" to-port="1"/>
584 <edge from-layer="3" from-port="3" to-layer="5" to-port="2"/>
589 std::string getModel(gatherTF_test_params p) {
590 std::string model = model_t;
595 for (auto& idx : p.in_dim) {
597 inIdx += std::to_string(idx) + "</dim>\n";
600 for (auto& dct : p.dct_dim) {
602 inDict += std::to_string(dct) + "</dim>\n";
605 for (auto& dst : p.ref_dim) {
607 out += std::to_string(dst) + "</dim>\n";
610 REPLACE_WITH_STR(model, "_OUTC_", inIdx);
611 REPLACE_WITH_STR(model, "_IDICT_", inDict);
612 REPLACE_WITH_NUM(model, "_AX_", p.axis);
613 REPLACE_WITH_STR(model, "_OUT_", out);
619 virtual void TearDown() {
622 virtual void SetUp() {
624 TestsCommon::SetUp();
625 gatherTF_test_params p = ::testing::WithParamInterface<gatherTF_test_params>::GetParam();
626 std::string model = getModel(p);
628 InferenceEngine::CNNNetReader net_reader;
629 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
631 InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
632 MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
633 extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
635 MKLDNNGraphTestClass graph;
636 graph.CreateGraph(net_reader.getNetwork(), extMgr);
639 InferenceEngine::Blob::Ptr srcIdx;
641 InferenceEngine::SizeVector in_dim = {2, 2};
642 srcIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, in_dim, InferenceEngine::TensorDesc::getLayoutByDims(in_dim) });
644 memcpy(static_cast<int32_t*>(srcIdx->buffer()), &p.in[0], sizeof(int32_t)*in_size);
645 auto * srcIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(srcIdx.get());
646 if (srcIdxPtr == nullptr)
647 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
650 InferenceEngine::Blob::Ptr srcDict = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.dct_dim, InferenceEngine::TensorDesc::getLayoutByDims(p.dct_dim) });
652 memcpy(srcDict->buffer(), &p.dct[0], sizeof(float)*p.dct.size());
653 auto * srcDictPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(srcDict.get());
654 if (srcDictPtr == nullptr)
655 FAIL() << "Cannot cast blob to TBlob<float>.";
658 InferenceEngine::OutputsDataMap out;
659 out = net_reader.getNetwork().getOutputsInfo();
660 InferenceEngine::BlobMap outputBlobs;
661 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
662 InferenceEngine::TBlob<float>::Ptr output;
663 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
665 outputBlobs[item.first] = output;
668 InferenceEngine::BlobMap srcs;
669 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputDictionary", srcDict));
670 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("InputText", srcIdx));
671 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("Input3", srcIdx));
672 graph.Infer(srcs, outputBlobs);
675 if (memcmp((*output).data(), &p.ref[0], p.ref.size()) != 0)
676 FAIL() << "Wrong result with compare TF reference!";
678 catch (const InferenceEngine::details::InferenceEngineException &e) {
684 TEST_P(MKLDNNCPUExtGatherHolesTests, TestsGather) {}
686 INSTANTIATE_TEST_CASE_P(
687 TestsGather, MKLDNNCPUExtGatherHolesTests,
689 gatherTF_test_params{ { 1, 5, 2, 2 }, in1,{ 1, 3, 2, 2 }, dict, 1,{ 2, 2, 2, 2 }, ref_in1_a0_d322 }));