Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / internal / graph_batchnorm_scaleshift_test.cpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include <inference_engine/cnn_network_impl.hpp>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9
10 #include "test_graph.hpp"
11
12 #include "single_layer_common.hpp"
13 #include "tests_common.hpp"
14
15 using namespace ::testing;
16 using namespace std;
17 using namespace mkldnn;
18
19 struct batchnorm_scaleshift_test_params {
20     struct {
21         size_t n;
22         size_t c;
23         size_t h;
24         size_t w;
25     } in;
26
27     // BatchNorm specific param
28     double epsilon;
29     // ScaleShift specific param
30     int broadcast;
31
32     size_t num_prim_desc;
33
34     MKLDNNPlugin::impl_desc_type selectedType;
35     std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
36
37     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
38 };
39
40 template <typename data_t>
41 void ref_batchnorm4DWithScale(const InferenceEngine::TBlob<data_t> &src, const data_t *variance, const data_t *mean, const data_t *scaleShift,
42                               InferenceEngine::TBlob<data_t> &dst, double eps) {
43     size_t MB = src.dims()[0];
44     size_t IC = src.dims()[1];
45     size_t IH = src.dims()[2];
46     size_t IW = src.dims()[3];
47
48     const data_t *src_data = src.readOnly();
49     data_t *dst_data = dst.data();
50
51     const data_t *scale_data = scaleShift;
52     const data_t *shift_data = scaleShift + IC;
53
54     for (int c = 0; c < IC; ++c) {
55         data_t v_mean = mean[c];
56         data_t v_variance = variance[c];
57         data_t sqrt_variance = 0;
58         data_t scale = scale_data[c];
59         data_t shift = shift_data[c];
60
61         sqrt_variance = 1. / sqrt(v_variance + eps);
62
63         for (int n = 0; n < MB; ++n)
64             for (int h = 0; h < IH; ++h)
65                 for (int w = 0; w < IW; ++w) {
66                     size_t idx = n * IC * IH * IW
67                                  + c * IH * IW
68                                  + h * IW + w;
69                     // BatchNorm
70                     dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance;
71                     // ScaleShift
72                     dst_data[idx] = dst_data[idx] * scale + shift;
73                 }
74     }
75 }
76
77 class MKLDNNGraphBatchNormScaleShiftTests: public TestsCommon,
78                                      public WithParamInterface<batchnorm_scaleshift_test_params> {
79     std::string model_t = R"V0G0N(
80 <Net Name="BatchNorm_With_Scale_Fusion" version="2" precision="FP32" batch="1">
81     <layers>
82         <layer name="in1" type="Input" precision="FP32" id="0">
83             <output>
84                 <port id="0">
85                     <dim>_IN_</dim>
86                     <dim>_IC_</dim>
87                     <dim>_IH_</dim>
88                     <dim>_IW_</dim>
89                 </port>
90             </output>
91         </layer>
92         <layer name="batchNorm" id="1" type="BatchNormalization" precision="FP32">
93             <batch_norm_data epsilon="_EPSILON_" PrimitivesPriority="_IMPLS_"/>
94
95             <weights offset="0" size="_S1_" />
96             <biases offset="_S1_" size="_S1_" />
97
98             <input>
99                 <port id="1">
100                     <dim>_IN_</dim>
101                     <dim>_IC_</dim>
102                     <dim>_IH_</dim>
103                     <dim>_IW_</dim>
104                 </port>
105             </input>
106             <output>
107                 <port id="2">
108                     <dim>_IN_</dim>
109                     <dim>_IC_</dim>
110                     <dim>_IH_</dim>
111                     <dim>_IW_</dim>
112                 </port>
113             </output>
114         </layer>
115          <layer name="scaleshift" id="2" type="ScaleShift" precision="FP32">
116             <scale_shift_data broadcast="_BROADCAST_" PrimitivesPriority="_IMPLS_"/>
117
118             <weights offset="_S2_" size="_S1_" />
119             <biases offset="_S3_" size="_S1_" />
120
121             <input>
122                 <port id="3">
123                     <dim>_IN_</dim>
124                     <dim>_IC_</dim>
125                     <dim>_IH_</dim>
126                     <dim>_IW_</dim>
127                 </port>
128             </input>
129             <output>
130                 <port id="4">
131                     <dim>_IN_</dim>
132                     <dim>_IC_</dim>
133                     <dim>_IH_</dim>
134                     <dim>_IW_</dim>
135                 </port>
136             </output>
137         </layer>
138     </layers>
139     <edges>
140        <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
141        <edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
142     </edges>
143 </Net>
144 )V0G0N";
145
146 protected:
147     virtual void TearDown() {
148     }
149
150     std::string getModel(batchnorm_scaleshift_test_params p) {
151         std::string model = model_t;
152
153         REPLACE_WITH_NUM(model, "_IW_", p.in.w);
154         REPLACE_WITH_NUM(model, "_IH_", p.in.h);
155         REPLACE_WITH_NUM(model, "_IC_", p.in.c);
156         REPLACE_WITH_NUM(model, "_IN_", p.in.n);
157         REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon);
158         REPLACE_WITH_NUM(model, "_BROADCAST_", p.broadcast);
159
160         size_t w_data_size = p.in.c * sizeof(float);
161         REPLACE_WITH_NUM(model, "_S1_", w_data_size);
162         REPLACE_WITH_NUM(model, "_S2_", 2*w_data_size);
163         REPLACE_WITH_NUM(model, "_S3_", 3*w_data_size);
164
165         std::string impls;
166         for (const auto& preferType : p.preferTypes) {
167             if (!impls.empty())
168                 impls += ",";
169             impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
170         }
171         REPLACE_WITH_STR(model, "_IMPLS_", impls);
172
173         return model;
174     }
175
176     virtual void SetUp() {
177         try {
178             TestsCommon::SetUp();
179             batchnorm_scaleshift_test_params p = ::testing::WithParamInterface<batchnorm_scaleshift_test_params>::GetParam();
180             std::string model = getModel(p);
181
182             InferenceEngine::CNNNetReader net_reader;
183             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
184
185             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
186             weights->allocate();
187             fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
188             float * data = weights->buffer();
189             for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
190                 if (data[i] < 0) {
191                     data[i] *= -1;
192                 }
193             }
194             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
195             net_reader.SetWeights(weights_ptr);
196
197             MKLDNNGraphTestClass graph;
198             graph.CreateGraph(net_reader.getNetwork());
199             auto& nodes = graph.getNodes();
200             for (int i = 0; i < nodes.size(); i++) {
201                 if ((nodes[i]->getType() == MKLDNNPlugin::Depthwise && nodes[i]->getCnnLayer()->type == "ScaleShift")
202                     || nodes[i]->getType() == MKLDNNPlugin::BatchNormalization) {
203                     ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
204                     for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
205                         p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
206                     }
207                     ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
208                     ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType);
209                 }
210             }
211
212             InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
213             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
214             InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
215             if (srcPtr == nullptr)
216                 FAIL() << "Cannot cast blob to TBlob<float>.";
217
218             src->allocate();
219             fill_data(src->buffer(), src->size());
220
221             InferenceEngine::BlobMap srcs;
222             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
223
224             InferenceEngine::OutputsDataMap out;
225             out = net_reader.getNetwork().getOutputsInfo();
226             InferenceEngine::BlobMap outputBlobs;
227
228             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
229
230             InferenceEngine::TBlob<float>::Ptr output;
231             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
232             output->allocate();
233             outputBlobs[item.first] = output;
234
235             graph.Infer(srcs, outputBlobs);
236
237             InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
238             dst_ref.allocate();
239
240             ref_batchnorm4DWithScale(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), (const float*) weights->buffer() + p.in.c*2, dst_ref, p.epsilon);
241             compare(*output, dst_ref);
242         } catch (const InferenceEngine::details::InferenceEngineException &e) {
243             FAIL() << e.what();
244         }
245     }
246 };
247
248 TEST_P(MKLDNNGraphBatchNormScaleShiftTests, TestsBatchNormWithScaleShift) {}
249
250 INSTANTIATE_TEST_CASE_P(
251         TestsBatchNormWithScaleShift, MKLDNNGraphBatchNormScaleShiftTests,
252         ::testing::Values(
253                 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
254                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
255                 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
256                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
257
258
259 class MKLDNNGraphDynBatchBatchNormScaleShiftTests: public MKLDNNGraphBatchNormScaleShiftTests {
260 protected:
261     virtual void SetUp() {
262         try {
263             TestsCommon::SetUp();
264             batchnorm_scaleshift_test_params p = ::testing::WithParamInterface<batchnorm_scaleshift_test_params>::GetParam();
265             std::string model = getModel(p);
266             size_t MB = p.in.n;
267             if (MB < 2)
268                 MB = 2;
269
270             InferenceEngine::CNNNetReader net_reader;
271             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
272
273             InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
274             weights->allocate();
275             fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
276             float * data = weights->buffer();
277             for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
278                 if (data[i] < 0) {
279                     data[i] *= -1;
280                 }
281             }
282             InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
283             net_reader.SetWeights(weights_ptr);
284             InferenceEngine::CNNNetwork network = net_reader.getNetwork();
285             auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
286             ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
287             InferenceEngine::ResponseDesc resp;
288             InferenceEngine::StatusCode sts  = implNet->setBatchSizeReshape(MB, &resp);
289             ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
290
291
292             MKLDNNGraphTestClass graph;
293             graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
294             graph.CreateGraph(net_reader.getNetwork());
295
296             InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
297             InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
298             InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
299             if (srcPtr == nullptr)
300                 FAIL() << "Cannot cast blob to TBlob<float>.";
301
302             src->allocate();
303             fill_data(src->buffer(), src->size());
304
305             InferenceEngine::BlobMap srcs;
306             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
307
308             InferenceEngine::OutputsDataMap out;
309             out = net_reader.getNetwork().getOutputsInfo();
310             InferenceEngine::BlobMap outputBlobs;
311
312             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
313
314             InferenceEngine::TBlob<float>::Ptr output;
315             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
316             output->allocate();
317             outputBlobs[item.first] = output;
318
319             auto checkScaleShift = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
320                 return (node->getType() == MKLDNNPlugin::Depthwise && node->getCnnLayer()->type == "ScaleShift")
321                        || node->getType() == MKLDNNPlugin::BatchNormalization;
322             };
323
324             graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkScaleShift);
325             graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkScaleShift);
326         } catch (const InferenceEngine::details::InferenceEngineException &e) {
327             FAIL() << e.what();
328         }
329     }
330 };
331
332 TEST_P(MKLDNNGraphDynBatchBatchNormScaleShiftTests, TestsDynBatchBatchNormWithScaleShift) {}
333
334 INSTANTIATE_TEST_CASE_P(
335         TestsDynBatchBatchNormWithScaleShift, MKLDNNGraphDynBatchBatchNormScaleShiftTests,
336         ::testing::Values(
337                 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
338                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
339                 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
340                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));