Publishing 2019 R3 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / internal / graph_conv_test.cpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <inference_engine/cnn_network_impl.hpp>
14 #include "tests_common.hpp"
15
16 #define XBYAK_NO_OP_NAMES
17 #define XBYAK_UNDEF_JNL
18 #include "../../../../../../../thirdparty/mkl-dnn/src/cpu/xbyak/xbyak_util.h"
19
20 using namespace InferenceEngine;
21 using namespace ::testing;
22 using namespace std;
23 using namespace mkldnn;
24
25 struct conv_test_params {
26     // Formats: NCHW, NCDHW
27     vector<size_t> dims;
28     // Formats: WH, WHD
29     vector<size_t> kernel;
30     vector<size_t> strides;
31     vector<size_t> pads_begin;
32     vector<size_t> pads_end;
33
34     size_t out_c;
35     size_t grp_c;
36     string auto_pad;
37
38     size_t num_prim_desc;
39
40     int selectedType;
41     vector<MKLDNNPlugin::impl_desc_type> preferTypes;
42
43     vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
44 };
45
46 template <typename data_t>
47 void ref_conv(const TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
48                 TBlob<data_t> &dst, conv_test_params prm) {
49     SizeVector src_dims = src.getTensorDesc().getDims();
50     auto dims_size = src_dims.size();
51
52     size_t KW = prm.kernel[X_AXIS];
53     size_t KH = prm.kernel[Y_AXIS];
54     size_t KD = dims_size == 5 ? prm.kernel[Z_AXIS] : 1u;
55     size_t GC = prm.grp_c;
56
57     size_t IC = src_dims[1];
58     size_t ID = dims_size == 5 ? src_dims[dims_size - 3] : 1u;
59     size_t IH = src_dims[dims_size - 2];
60     size_t IW = src_dims[dims_size - 1];
61
62     size_t OW = (IW + prm.pads_end[X_AXIS] + prm.pads_begin[X_AXIS] - prm.kernel[X_AXIS]) / prm.strides[X_AXIS] + 1u;
63     size_t OH = (IH + prm.pads_end[Y_AXIS] + prm.pads_begin[Y_AXIS] - prm.kernel[Y_AXIS]) / prm.strides[Y_AXIS] + 1u;
64     size_t OD = dims_size == 5 ? (ID + 2u * prm.pads_begin[Z_AXIS] - prm.kernel[Z_AXIS]) / prm.strides[Z_AXIS] + 1u : 1u;
65     size_t OC = prm.out_c;
66
67
68     const data_t *src_data = src.readOnly();
69     const data_t *weights_data = weights;
70     const data_t *bias_data = weights_data + KW * KH * KD * OC * IC / GC;
71     data_t *dst_data = dst.data();
72
73     IE_ASSERT(KW * KH * KD * OC * IC / GC + OC == weightsSize);
74     SizeVector dst_dims = dst.getTensorDesc().getDims();
75     auto dst_dims_size = dst_dims.size();
76     IE_ASSERT(OW == dst_dims[dst_dims_size - 1]);
77     IE_ASSERT(OH == dst_dims[dst_dims_size - 2]);
78
79     size_t SC1 = OH * OW;
80     size_t SC2 = SC1 * OD;
81     size_t SC3 = OC / GC;
82     size_t SC4 = SC2 * SC3;
83
84     size_t IC1 = IH * IW;
85     size_t IC2 = IC1 * ID;
86     size_t IC3 = IC / GC;
87     size_t IC4 = IC2 * IC3;
88
89     size_t KC1 = KH * KW;
90     size_t KC2 = KC1 * KD;
91     size_t KC3 = IC3 * KC2;
92     size_t KC4 = SC3 * KC3;
93
94     for (uint32_t g = 0; g < GC; g++) {
95         size_t gc = g * SC4;
96         size_t goc = g * SC3;
97         size_t gic = g * IC4;
98         size_t gkc = g * KC4;
99         for (uint32_t oc = 0; oc < OC / GC; oc++) {
100             size_t cc = gc + oc * SC2;
101             size_t gooc = goc + oc;
102             size_t gkoc = gkc + oc * KC3;
103             for (uint32_t od = 0; od < OD; od++) {
104                 size_t dc = cc + od * SC1;
105                 for (uint32_t oh = 0; oh < OH; oh++) {
106                     size_t hc = dc + oh * OW;
107                     for (uint32_t ow = 0; ow < OW; ow++) {
108                         size_t oidx = hc + ow;
109
110                         dst_data[oidx] = bias_data[gooc];
111
112                         for (size_t ic = 0; ic < IC / GC; ic++) {
113                             size_t icc = gkoc + ic * KC2;
114                             size_t kicc = gic + ic * IC2;
115                             for (size_t kd = 0; kd < KD; kd++) {
116                                 int32_t id = dims_size == 5 ? od * prm.strides[Z_AXIS] - prm.pads_begin[Z_AXIS] + kd : 0;
117                                 if (id < 0 || id >= (int32_t)ID) continue;
118                                 size_t kidc = kicc + id * IC1;
119                                 size_t kdc = icc + kd * KC1;
120                                 for (size_t kh = 0; kh < KH; kh++) {
121                                     int32_t ih = oh * prm.strides[Y_AXIS] - prm.pads_begin[Y_AXIS] + kh;
122                                     if (ih < 0 || ih >= (int32_t)IH) continue;
123                                     size_t kihc = kidc + ih * IW;
124                                     size_t khc = kdc + kh * KW;
125                                     for (size_t kw = 0; kw < KW; kw++) {
126                                         int32_t iw = ow * prm.strides[X_AXIS] - prm.pads_begin[X_AXIS] + kw;
127                                         if (iw < 0 || iw >= (int32_t)IW) continue;
128
129                                         size_t iidx = kihc + iw;
130                                         size_t widx = khc + kw;
131
132                                         dst_data[oidx] += src_data[iidx] * weights_data[widx];
133                                     }
134                                 }
135                             }
136                         }
137                     }
138                 }
139             }
140         }
141     }
142 }
143
144 class MKLDNNGraphConvolutionTests: public TestsCommon,
145                                    public WithParamInterface<conv_test_params> {
146     std::string model_t_5D = R"V0G0N(
147 <net name="Convolution_Only" version="4" precision="FP32" batch="1">
148     <layers>
149         <layer name="in1" type="Input" precision="FP32" id="0">
150             <output>
151                 <port id="0">__SRC_DIMS__
152                 </port>
153             </output>
154         </layer>
155         <layer name="conv1" id="1" type="Convolution" precision="FP32">
156             <convolution _AP_ kernel="_K_"
157                          pads_begin="_PB_"  pads_end="_PE_"
158                          strides="_KS_"
159                          output="_OC_"  group="_GC_" PrimitivesPriority="_IMPLS_"/>
160
161             <weights offset="0" size="_S1_" />
162             <biases offset="_S1_" size="_S2_" />
163
164             <input>
165                 <port id="1">__SRC_DIMS__
166                 </port>
167             </input>
168             <output>
169                 <port id="2">
170                     <dim>_IN_</dim>
171                     <dim>_OC_</dim>__DST_DIMS__
172                 </port>
173             </output>
174         </layer>
175     </layers>
176     <edges>
177         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
178     </edges>
179 </net>
180 )V0G0N";
181
182 protected:
183     std::string getModel(conv_test_params p) {
184         std::string model = model_t_5D;
185         std::string s_dims;
186         for (auto& dim : p.dims) {
187             s_dims += "\n                    <dim>";
188             s_dims += std::to_string(dim) + "</dim>";
189         }
190         REPLACE_WITH_STR(model, "__SRC_DIMS__", s_dims);
191
192         s_dims = "";
193         int k_len = p.kernel.size();
194         for (size_t i = 2; i < p.dims.size(); i++) {
195             size_t inx = k_len - i + 1;
196             size_t dim = (p.dims[i] + p.pads_end[inx] + p.pads_begin[inx] - p.kernel[inx]) / p.strides[inx] + 1lu;
197             s_dims += "\n                    <dim>";
198             s_dims += std::to_string(dim) + "</dim>";
199         }
200         REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
201
202         REPLACE_WITH_NUM(model, "_IN_", p.dims[0]);
203
204         REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_K_", p.kernel);
205         REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_KS_", p.strides);
206         REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PB_", p.pads_begin);
207         REPLACE_WITH_NUM_VECTOR_REVERSE(model, "_PE_", p.pads_end);
208         string auto_pad;
209         if (!p.auto_pad.empty()) auto_pad = string("auto_pad=") + string("\"") + p.auto_pad + string("\"");
210         REPLACE_WITH_STR(model, "_AP_", auto_pad);
211
212         REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
213         REPLACE_WITH_NUM(model, "_OC_", p.out_c);
214
215         size_t w_data_size = 1;
216         for (auto ker : p.kernel) {
217             w_data_size *= ker;
218         }
219
220         w_data_size = (w_data_size * p.out_c * p.dims[1] / p.grp_c) * sizeof(float);
221         size_t b_data_size = p.out_c * sizeof(float);
222
223         REPLACE_WITH_NUM(model, "_S1_", w_data_size);
224         REPLACE_WITH_NUM(model, "_S2_", b_data_size);
225
226         std::string impls;
227         for (const auto& preferType : p.preferTypes) {
228             if (!impls.empty())
229                 impls += ",";
230             impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
231         }
232         REPLACE_WITH_STR(model, "_IMPLS_", impls);
233
234         return model;
235     }
236
237     virtual void TearDown() {
238     }
239
240     virtual void SetUp() {
241         try {
242             TestsCommon::SetUp();
243             conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
244             std::string model = getModel(p);
245
246             CNNNetReader net_reader;
247             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
248
249             size_t blob_size = p.out_c * p.dims[1] / p.grp_c;
250             for (auto k : p.kernel) {
251                 blob_size *= k;
252             }
253             blob_size = (blob_size + p.out_c) * sizeof(float);
254             TBlob<uint8_t> *weights = new TBlob<uint8_t>
255                     ({ Precision::U8, {blob_size}, C });
256             weights->allocate();
257
258             fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
259
260             TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
261
262             net_reader.SetWeights(weights_ptr);
263             CNNNetwork network = net_reader.getNetwork();
264
265             MKLDNNGraphTestClass graph;
266             graph.CreateGraph(network);
267
268             auto& nodes = graph.getNodes();
269             nodes = graph.getNodes();
270             bool isWino = false;
271             for (auto &node : nodes) {
272                 if (node->getType() == MKLDNNPlugin::Convolution) {
273                     ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
274                     for (const auto prim : node->getSupportedPrimitiveDescriptors()) {
275                         std::cout << MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(prim.getImplementationType()) << " ";
276                     }
277                     std::cout << std::endl;
278                     for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
279                         p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
280                     }
281                     ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
282                     Xbyak::util::Cpu cpu;
283                     if (cpu.has(Xbyak::util::Cpu::tAVX512F)
284                             && cpu.has(Xbyak::util::Cpu::tAVX512BW)
285                             && cpu.has(Xbyak::util::Cpu::tAVX512VL)
286                             && cpu.has(Xbyak::util::Cpu::tAVX512DQ)
287                             && !p.preferTypes.empty()
288                             && p.preferTypes[0] == MKLDNNPlugin::impl_desc_type::jit_avx512_winograd) {
289                         isWino = true;
290                         ASSERT_EQ(p.preferTypes[0], node->getSelectedPrimitiveDescriptor()->getImplementationType());
291                     } else {
292                         ASSERT_EQ(p.selectedType,
293                                   node->getSelectedPrimitiveDescriptor()->getImplementationType() & p.selectedType);
294                     }
295                 }
296             }
297
298             Layout layout = ANY;
299             switch (p.dims.size()) {
300                 case 4:
301                     layout = NCHW;
302                     break;
303                 case 5:
304                     layout = NCDHW;
305                     break;
306             }
307
308             Blob::Ptr src = make_shared_blob<float>
309                     ({ Precision::FP32, p.dims, layout });
310             src->allocate();
311             fill_data(src->buffer(), src->size());
312
313             auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
314
315             if (srcPtr == nullptr)
316                 FAIL() << "Cannot cast blob to TBlob<float>.";
317
318             BlobMap srcs;
319             srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
320
321             OutputsDataMap out;
322             out = network.getOutputsInfo();
323             BlobMap outputBlobs;
324
325             std::pair<std::string, DataPtr> item = *out.begin();
326
327             TBlob<float>::Ptr output;
328             output = make_shared_blob<float>(item.second->getTensorDesc());
329             output->allocate();
330             outputBlobs[item.first] = output;
331
332             graph.Infer(srcs, outputBlobs);
333
334             TBlob<float> dst_ref(item.second->getTensorDesc());
335             dst_ref.allocate();
336             ref_conv(*srcPtr, (const float *)weights->buffer(), weights->size() / sizeof(float), dst_ref, p);
337             compare(*output, dst_ref, 0.0002f);
338         } catch (const details::InferenceEngineException &e) {
339             FAIL() << e.what();
340         }
341     }
342 };
343
344 TEST_P(MKLDNNGraphConvolutionTests, TestsConvolution) {}
345
346 INSTANTIATE_TEST_CASE_P(
347         TestConvolution, MKLDNNGraphConvolutionTests,
348         ::testing::Values(
349         /*0*/   conv_test_params{{1, 9, 16, 32},
350                                  {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 6, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1 },
351                 conv_test_params{{1, 9, 32, 16},
352                                  {2, 4}, {1, 1}, {1, 1}, {0, 2}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
353                 conv_test_params{{1, 9, 32, 16},
354                                  {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
355                 conv_test_params{{1, 3, 40, 40},
356                                  {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
357                 conv_test_params{{1, 1, 40, 40},
358                                  {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
359                 conv_test_params{{1, 1, 32, 16},
360                                  {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit },
361                 conv_test_params{{1, 9, 32, 16},
362                                  {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any,
363                                  {MKLDNNPlugin::impl_desc_type::ref_any} },
364                 conv_test_params{{1, 4, 54, 96},
365                                  {3, 3}, {1, 1}, {1, 1}, {0, 0}, 64, 1, "", 3, MKLDNNPlugin::impl_desc_type::ref_any,
366                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd, MKLDNNPlugin::impl_desc_type::ref_any}},
367                 // 5D
368         /*8*/   conv_test_params{{1, 3, 15, 20, 20},
369                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
370                                  {MKLDNNPlugin::impl_desc_type::ref_any} },
371                 conv_test_params{{1, 24, 15, 20, 20},
372                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
373                                  {MKLDNNPlugin::impl_desc_type::ref_any} },
374                 conv_test_params{{1, 32, 15, 20, 20},
375                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
376                                  {MKLDNNPlugin::impl_desc_type::ref_any} },
377                 conv_test_params{{1, 3, 15, 25, 20},
378                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
379                 conv_test_params{{1, 24, 15, 25, 20},
380                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
381         /*13*/  conv_test_params{{1, 32, 15, 25, 20},
382                                  {3, 3, 3}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
383 #ifdef USE_MKL
384                 conv_test_params{{1, 9, 16, 32},
385                                  {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 6, MKLDNNPlugin::impl_desc_type::gemm,
386                                  {MKLDNNPlugin::impl_desc_type::gemm_any,
387                                   MKLDNNPlugin::impl_desc_type::gemm_blas,
388                                   MKLDNNPlugin::impl_desc_type::gemm_avx512,
389                                   MKLDNNPlugin::impl_desc_type::gemm_avx2,
390                                   MKLDNNPlugin::impl_desc_type::gemm_sse42} },
391                 conv_test_params{{1, 5, 15, 20, 20},
392                                  {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
393                 conv_test_params{{1, 5, 15, 20, 20},
394                                  {3, 3, 3}, {3, 2, 1}, {0, 0, 0}, {0, 0, 0}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
395                 // conv_test_params{{1, 5, 15, 20, 20},
396                 //                  {3, 3, 3}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, 64, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
397                 conv_test_params{{1, 16, 30, 30, 10},
398                                  {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas,
399                                  {MKLDNNPlugin::impl_desc_type::gemm_blas} },
400                 conv_test_params{{1, 4, 16, 16, 16},
401                                  {3, 3, 3}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, 8, 1, "", 2, MKLDNNPlugin::impl_desc_type::gemm_blas },
402 #endif
403         /*20*/  conv_test_params{{1, 16, 30, 30, 10},
404                                  {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::jit },
405                 conv_test_params{{1, 16, 30, 30, 10},
406                                  {5, 5, 5}, {1, 1, 1}, {2, 2, 2}, {2, 2, 2}, 16, 1, "", 2, MKLDNNPlugin::impl_desc_type::ref_any,
407                                  {MKLDNNPlugin::impl_desc_type::ref_any} }));
408
409 class MKLDNNGraphDynBatchConvolutionTests: public MKLDNNGraphConvolutionTests {
410 protected:
411     virtual void SetUp() {
412         try {
413             TestsCommon::SetUp();
414             conv_test_params p = ::testing::WithParamInterface<conv_test_params>::GetParam();
415             std::string model = getModel(p);
416             std::vector<size_t> dims = p.dims;
417             if (dims[0] < 2)
418                 dims[0] = 2;
419
420             CNNNetReader net_reader;
421             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
422
423             size_t blob_size = p.out_c * dims[1] / p.grp_c;
424             for (auto k : p.kernel) {
425                 blob_size *= k;
426             }
427             blob_size = (blob_size + p.out_c) * sizeof(float);
428             TBlob<uint8_t> *weights = new TBlob<uint8_t>({ Precision::U8, {blob_size}, Layout::C });
429             weights->allocate();
430             fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
431             TBlob<uint8_t>::Ptr weights_ptr = TBlob<uint8_t>::Ptr(weights);
432
433             net_reader.SetWeights(weights_ptr);
434             CNNNetwork network = net_reader.getNetwork();
435             auto implNet = dynamic_cast<details::CNNNetworkImpl *>(&((ICNNNetwork&)network));
436             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
437             ResponseDesc resp;
438             StatusCode sts  = implNet->setBatchSizeReshape(dims[0], &resp);
439             ASSERT_EQ((int)StatusCode::OK, sts) << resp.msg;
440
441             MKLDNNGraphTestClass graph;
442             graph.CreateGraph(network);
443
444             Layout layout = ANY;
445             switch (dims.size()) {
446                 case 4:
447                     layout = NCHW;
448                     break;
449                 case 5:
450                     layout = NCDHW;
451                     break;
452             }
453
454             Blob::Ptr src = make_shared_blob<float>({ Precision::FP32, dims, layout });
455             src->allocate();
456             fill_data(src->buffer(), src->size());
457
458             auto * srcPtr = dynamic_cast<TBlob<float>*>(src.get());
459
460             if (srcPtr == nullptr)
461                 FAIL() << "Cannot cast blob to TBlob<float>.";
462
463             BlobMap srcs;
464             srcs.insert(std::pair<std::string, Blob::Ptr>("in1", src));
465
466             OutputsDataMap out;
467             out = network.getOutputsInfo();
468             BlobMap outputBlobs;
469
470             std::pair<std::string, DataPtr> item = *out.begin();
471
472             TBlob<float>::Ptr output;
473             output = make_shared_blob<float>(item.second->getTensorDesc());
474             output->allocate();
475             outputBlobs[item.first] = output;
476
477             auto checkConvolution = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
478                 return node->getType() == MKLDNNPlugin::Convolution;
479             };
480
481             graph.checkDynBatch(srcs, outputBlobs, dims[0], dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
482             graph.checkDynBatch(srcs, outputBlobs, 1, dims[0], checkConvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
483         } catch (const details::InferenceEngineException &e) {
484             FAIL() << e.what();
485         }
486     }
487 };
488
489 TEST_P(MKLDNNGraphDynBatchConvolutionTests, TestsDynBatchConvolution) {}
490
491 INSTANTIATE_TEST_CASE_P(
492         TestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests,
493         ::testing::Values(
494                 conv_test_params{{1, 8, 16, 32},
495                                  {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 7, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1,
496                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd}},
497                 conv_test_params{{1, 9, 32, 16},
498                                  {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
499                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
500                 conv_test_params{{1, 9, 32, 16},
501                                  {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
502                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
503                 conv_test_params{{1, 3, 40, 40},
504                                  {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
505                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
506                 conv_test_params{{1, 1, 40, 40},
507                                  {3, 3}, {1, 2}, {0, 0}, {0, 0}, 20, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
508                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
509                 conv_test_params{{1, 1, 32, 16},
510                                  {2, 4}, {2, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::jit,
511                                  {MKLDNNPlugin::impl_desc_type::jit_avx512_winograd} },
512 #ifdef USE_MKL
513                 conv_test_params{{1, 9, 16, 32},
514                                  {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 7, MKLDNNPlugin::impl_desc_type::gemm,
515                                  {MKLDNNPlugin::impl_desc_type::gemm_any,
516                                   MKLDNNPlugin::impl_desc_type::gemm_blas,
517                                   MKLDNNPlugin::impl_desc_type::gemm_avx512,
518                                   MKLDNNPlugin::impl_desc_type::gemm_avx2,
519                                   MKLDNNPlugin::impl_desc_type::gemm_sse42}
520                 },
521 #endif
522                 conv_test_params{{1, 9, 32, 16},
523                                  {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any, {MKLDNNPlugin::impl_desc_type::ref_any} }));