Publishing R3
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / internal / graph_deconv_test.cpp
1 // Copyright (C) 2018 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5
6 #include <gtest/gtest.h>
7 #include <gmock/gmock-spec-builders.h>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "mock_mkldnn_primitive.hpp"
10
11 #include "test_graph.hpp"
12
13 #include "single_layer_common.hpp"
14 #include <mkldnn_plugin/mkldnn_extension_utils.h>
15 #include <inference_engine/cnn_network_impl.hpp>
16 #include "tests_common.hpp"
17
18
19 using namespace ::testing;
20 using namespace std;
21 using namespace mkldnn;
22
23
24 struct deconv_test_params {
25     struct {
26         size_t n;
27         size_t c;
28         size_t h;
29         size_t w;
30     } in;
31
32     size_t krn_w;
33     size_t krn_h;
34     size_t str_w;
35     size_t str_h;
36     size_t pad_w;
37     size_t pad_h;
38
39     size_t out_c;
40     size_t grp_c;
41
42     size_t num_prim_desc;
43
44     std::vector<int> selectedTypes;
45     std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
46
47     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
48 };
49
50 template <typename data_t>
51 void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
52                 InferenceEngine::TBlob<data_t> &dst, deconv_test_params prm) {
53
54     size_t G  = prm.grp_c;
55     size_t KW = prm.krn_w;
56     size_t KH = prm.krn_h;
57
58     size_t PW = prm.pad_w;
59     size_t PH = prm.pad_h;
60
61     size_t SW = prm.str_w;
62     size_t SH = prm.str_h;
63
64     size_t IW = src.dims()[3];
65     size_t IH = src.dims()[2];
66     size_t IC = src.dims()[1];
67     size_t MB = src.dims()[0];
68
69     size_t OC = prm.out_c;
70
71     size_t OW = SW * (IW - 1) + KW - 2 * PW;
72     size_t OH = SH * (IH - 1) + KH - 2 * PH;
73
74     const data_t *src_data = src.readOnly();
75     const data_t *weights_data = weights;
76
77     data_t *dst_data = dst.data();
78
79 #   pragma omp parallel for collapse(4) schedule(static)
80     for (int g = 0; g < G; ++g) {
81         for (int mb = 0; mb < MB; ++mb) {
82             for (int oc = 0; oc < OC / G; ++oc) {
83                 for (int oh = 0; oh < OH; ++oh) {
84                     for (int ow = 0; ow < OW; ++ow) {
85                         size_t didx = mb * OC * OH * OW
86                                       + (g * OC / G + oc) * OH * OW + oh * OW + ow;
87
88                         dst_data[didx] = data_t(0);
89
90                         for (int ic = 0; ic < IC / G; ic++) {
91                             for (int kh = 0; kh < KH; kh++) {
92                                 for (int kw = 0; kw < KW; kw++) {
93                                     if (ow + PW < kw || oh + PH < kh)
94                                         continue;
95
96                                     size_t iw = ow - kw + PW;
97                                     size_t ih = oh - kh + PH;
98
99                                     if (iw % SW != 0 || ih % SH != 0)
100                                         continue;
101
102                                     iw /= SW;
103                                     ih /= SH;
104
105                                     if (ih < IH && iw < IW) {
106                                         size_t sidx = mb * IC * IH * IW
107                                                       + (g * IC / G + ic) * IH * IW + ih * IW
108                                                       + iw;
109
110                                         size_t widx = g * (IC / G) * (OC / G) * KH * KW +
111                                                       ic * (OC / G) * KH * KW +
112                                                       + oc * KH * KW + kh * KW
113                                                       + kw;
114
115                                         dst_data[didx] += src_data[sidx] * weights_data[widx];
116                                     }
117                                 }
118                             }
119                         }
120                     }
121                 }
122             }
123         }
124     }
125 }
126
127 class MKLDNNGraphDeconvolutionalTests: public TestsCommon,
128                                      public WithParamInterface<deconv_test_params> {
129     std::string model_t = R"V0G0N(
130 <Net Name="Deconvolution_Only" version="2" precision="FP32" batch="1">
131     <layers>
132         <layer name="in1" type="Input" precision="FP32" id="0">
133             <output>
134                 <port id="0">
135                     <dim>_IN_</dim>
136                     <dim>_IC_</dim>
137                     <dim>_IH_</dim>
138                     <dim>_IW_</dim>
139                 </port>
140             </output>
141         </layer>
142         <layer name="deconv1" id="1" type="Deconvolution" precision="FP32">
143             <deconvolution stride-x="_SW_" stride-y="_SH_"
144                          pad-x="_PW_"    pad-y="_PH_"
145                          kernel-x="_KW_" kernel-y="_KH_"
146                          output="_OC_"   group="_GC_"/>
147
148             <weights offset="0" size="_S1_" />
149
150             <input>
151                 <port id="1">
152                     <dim>_IN_</dim>
153                     <dim>_IC_</dim>
154                     <dim>_IH_</dim>
155                     <dim>_IW_</dim>
156                 </port>
157             </input>
158             <output>
159                 <port id="2">
160                     <dim>_IN_</dim>
161                     <dim>_OC_</dim>
162                     <dim>_OH_</dim>
163                     <dim>_OW_</dim>
164                 </port>
165             </output>
166         </layer>
167     </layers>
168     <edges>
169         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
170     </edges>
171 </Net>
172 )V0G0N";
173
174 protected:
175     std::string getModel(deconv_test_params p) {
176         std::string model = model_t;
177
178         REPLACE_WITH_NUM(model, "_IW_", p.in.w);
179         REPLACE_WITH_NUM(model, "_IH_", p.in.h);
180         REPLACE_WITH_NUM(model, "_IC_", p.in.c);
181         REPLACE_WITH_NUM(model, "_IN_", p.in.n);
182
183         REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
184         REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
185         REPLACE_WITH_NUM(model, "_SW_", p.str_w);
186         REPLACE_WITH_NUM(model, "_SH_", p.str_h);
187         REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
188         REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
189
190         REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
191         REPLACE_WITH_NUM(model, "_OC_", p.out_c);
192         REPLACE_WITH_NUM(model, "_OH_", p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h);
193         REPLACE_WITH_NUM(model, "_OW_", p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w);
194
195         size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float);
196         REPLACE_WITH_NUM(model, "_S1_", w_data_size);
197         return model;
198     }
199
200     virtual void TearDown() {
201     }
202
203     virtual void SetUp() {
204         try {
205             TestsCommon::SetUp();
206             deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
207             std::string model = getModel(p);
208
209             InferenceEngine::CNNNetReader net_reader;
210             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
211
212             InferenceEngine::SizeVector dims_weights = {(p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float)};
213
214             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, dims_weights);
215
216             weights->allocate();
217             fill_data(weights->data().as<float*>(), weights->size() / sizeof(float));
218
219             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
220
221             net_reader.SetWeights(weights_ptr);
222
223             MKLDNNGraphTestClass graph;
224             graph.CreateGraph(net_reader.getNetwork());
225             auto& nodes = graph.getNodes();
226             for (auto &node : nodes) {
227                 if (node->getType() == MKLDNNPlugin::Deconvolution) {
228                     ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
229                     for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
230                         p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
231                     }
232                     ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
233                     bool good_prim = false;
234                     for (auto & selected : p.selectedTypes)
235                         if (selected == (node->getSelectedPrimitiveDescriptor()->getImplementationType() & selected))
236                             good_prim = true;
237                     ASSERT_TRUE(good_prim);
238                 }
239             }
240
241             InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
242
243             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
244             src->allocate();
245             fill_data(src->buffer(), src->size());
246
247             InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
248
249             if (srcPtr == nullptr)
250                 FAIL() << "Cannot cast blob to TBlob<float>.";
251
252             InferenceEngine::BlobMap srcs;
253             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
254
255             InferenceEngine::OutputsDataMap out;
256             out = net_reader.getNetwork().getOutputsInfo();
257             InferenceEngine::BlobMap outputBlobs;
258
259             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
260
261             InferenceEngine::TBlob<float>::Ptr output;
262             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
263             output->allocate();
264             outputBlobs[item.first] = output;
265
266             graph.Infer(srcs, outputBlobs);
267
268             InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
269             dst_ref.allocate();
270
271             ref_deconv(*srcPtr, weights->readOnly().as<const float*>(), weights->size() / sizeof(float), dst_ref, p);
272
273             compare(*output, dst_ref);
274         } catch (const InferenceEngine::details::InferenceEngineException &e) {
275             FAIL() << e.what();
276         }
277     }
278 };
279
280 TEST_P(MKLDNNGraphDeconvolutionalTests, TestsDeconvolution) {}
281
282
283 INSTANTIATE_TEST_CASE_P(
284         TestDeconvolution, MKLDNNGraphDeconvolutionalTests,
285         ::testing::Values(
286                 deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
287                 deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
288                 deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
289                 deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
290                 deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
291                 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, 3, {MKLDNNPlugin::impl_desc_type::gemm}},
292                 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
293                 deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
294                 deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
295         ));
296
297 class MKLDNNGraphDynBatchDeconvolutionalTests: public MKLDNNGraphDeconvolutionalTests {
298 protected:
299     virtual void SetUp() {
300         try {
301             TestsCommon::SetUp();
302             deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
303             std::string model = getModel(p);
304             size_t MB = p.in.n;
305             if (MB < 2)
306                 MB = 2;
307
308             InferenceEngine::CNNNetReader net_reader;
309             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
310
311             InferenceEngine::SizeVector dims_weights = {(p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float)};
312             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, dims_weights);
313             weights->allocate();
314             fill_data(weights->data().as<float*>(), weights->size() / sizeof(float));
315             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
316             net_reader.SetWeights(weights_ptr);
317
318             InferenceEngine::CNNNetwork network = net_reader.getNetwork();
319             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
320             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
321             InferenceEngine::ResponseDesc resp;
322             InferenceEngine::StatusCode sts  = implNet->setBatchSizeReshape(MB, &resp);
323             ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
324
325
326             MKLDNNGraphTestClass graph;
327             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
328             graph.CreateGraph(net_reader.getNetwork());
329
330             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
331             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
332             InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
333             if (srcPtr == nullptr)
334                 FAIL() << "Cannot cast blob to TBlob<float>.";
335
336             src->allocate();
337             fill_data(src->buffer(), src->size());
338
339             InferenceEngine::BlobMap srcs;
340             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
341
342             InferenceEngine::OutputsDataMap out;
343             out = net_reader.getNetwork().getOutputsInfo();
344             InferenceEngine::BlobMap outputBlobs;
345
346             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
347
348             InferenceEngine::TBlob<float>::Ptr output;
349             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
350             output->allocate();
351             outputBlobs[item.first] = output;
352
353             auto checkDeconvolution = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
354                 return node->getType() == MKLDNNPlugin::Deconvolution;
355             };
356
357             graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
358             graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
359         } catch (const InferenceEngine::details::InferenceEngineException &e) {
360             FAIL() << e.what();
361         }
362     }
363 };
364
365 TEST_P(MKLDNNGraphDynBatchDeconvolutionalTests, TestsDynBatchDeconvolutional) {}
366
367 INSTANTIATE_TEST_CASE_P(
368         TestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests,
369         ::testing::Values(
370                 deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
371                 deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
372                 deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
373                 deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
374                 deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
375                 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, 3, {MKLDNNPlugin::impl_desc_type::gemm}},
376                 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
377                 deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
378                 deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
379         ));