1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include <inference_engine/cnn_network_impl.hpp>
8 #include "mkldnn_plugin/mkldnn_graph.h"
10 #include "test_graph.hpp"
12 #include "single_layer_common.hpp"
13 #include "tests_common.hpp"
15 using namespace ::testing;
17 using namespace mkldnn;
19 struct batchnorm_scaleshift_test_params {
27 // BatchNorm specific param
29 // ScaleShift specific param
34 MKLDNNPlugin::impl_desc_type selectedType;
35 std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
37 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
40 template <typename data_t>
41 void ref_batchnorm4DWithScale(const InferenceEngine::TBlob<data_t> &src, const data_t *variance, const data_t *mean, const data_t *scaleShift,
42 InferenceEngine::TBlob<data_t> &dst, double eps) {
43 size_t MB = src.dims()[0];
44 size_t IC = src.dims()[1];
45 size_t IH = src.dims()[2];
46 size_t IW = src.dims()[3];
48 const data_t *src_data = src.readOnly();
49 data_t *dst_data = dst.data();
51 const data_t *scale_data = scaleShift;
52 const data_t *shift_data = scaleShift + IC;
54 for (int c = 0; c < IC; ++c) {
55 data_t v_mean = mean[c];
56 data_t v_variance = variance[c];
57 data_t sqrt_variance = 0;
58 data_t scale = scale_data[c];
59 data_t shift = shift_data[c];
61 sqrt_variance = 1. / sqrt(v_variance + eps);
63 for (int n = 0; n < MB; ++n)
64 for (int h = 0; h < IH; ++h)
65 for (int w = 0; w < IW; ++w) {
66 size_t idx = n * IC * IH * IW
70 dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance;
72 dst_data[idx] = dst_data[idx] * scale + shift;
77 class MKLDNNGraphBatchNormScaleShiftTests: public TestsCommon,
78 public WithParamInterface<batchnorm_scaleshift_test_params> {
79 std::string model_t = R"V0G0N(
80 <Net Name="BatchNorm_With_Scale_Fusion" version="2" precision="FP32" batch="1">
82 <layer name="in1" type="Input" precision="FP32" id="0">
92 <layer name="batchNorm" id="1" type="BatchNormalization" precision="FP32">
93 <batch_norm_data epsilon="_EPSILON_" PrimitivesPriority="_IMPLS_"/>
95 <weights offset="0" size="_S1_" />
96 <biases offset="_S1_" size="_S1_" />
115 <layer name="scaleshift" id="2" type="ScaleShift" precision="FP32">
116 <scale_shift_data broadcast="_BROADCAST_" PrimitivesPriority="_IMPLS_"/>
118 <weights offset="_S2_" size="_S1_" />
119 <biases offset="_S3_" size="_S1_" />
140 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
141 <edge from-layer="1" from-port="2" to-layer="2" to-port="3"/>
147 virtual void TearDown() {
150 std::string getModel(batchnorm_scaleshift_test_params p) {
151 std::string model = model_t;
153 REPLACE_WITH_NUM(model, "_IW_", p.in.w);
154 REPLACE_WITH_NUM(model, "_IH_", p.in.h);
155 REPLACE_WITH_NUM(model, "_IC_", p.in.c);
156 REPLACE_WITH_NUM(model, "_IN_", p.in.n);
157 REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon);
158 REPLACE_WITH_NUM(model, "_BROADCAST_", p.broadcast);
160 size_t w_data_size = p.in.c * sizeof(float);
161 REPLACE_WITH_NUM(model, "_S1_", w_data_size);
162 REPLACE_WITH_NUM(model, "_S2_", 2*w_data_size);
163 REPLACE_WITH_NUM(model, "_S3_", 3*w_data_size);
166 for (const auto& preferType : p.preferTypes) {
169 impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
171 REPLACE_WITH_STR(model, "_IMPLS_", impls);
176 virtual void SetUp() {
178 TestsCommon::SetUp();
179 batchnorm_scaleshift_test_params p = ::testing::WithParamInterface<batchnorm_scaleshift_test_params>::GetParam();
180 std::string model = getModel(p);
182 InferenceEngine::CNNNetReader net_reader;
183 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
185 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
187 fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
188 float * data = weights->buffer();
189 for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
194 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
195 net_reader.SetWeights(weights_ptr);
197 MKLDNNGraphTestClass graph;
198 graph.CreateGraph(net_reader.getNetwork());
199 auto& nodes = graph.getNodes();
200 for (int i = 0; i < nodes.size(); i++) {
201 if ((nodes[i]->getType() == MKLDNNPlugin::Depthwise && nodes[i]->getCnnLayer()->type == "ScaleShift")
202 || nodes[i]->getType() == MKLDNNPlugin::BatchNormalization) {
203 ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
204 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
205 p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
207 ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
208 ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType);
212 InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
213 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
214 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
215 if (srcPtr == nullptr)
216 FAIL() << "Cannot cast blob to TBlob<float>.";
219 fill_data(src->buffer(), src->size());
221 InferenceEngine::BlobMap srcs;
222 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
224 InferenceEngine::OutputsDataMap out;
225 out = net_reader.getNetwork().getOutputsInfo();
226 InferenceEngine::BlobMap outputBlobs;
228 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
230 InferenceEngine::TBlob<float>::Ptr output;
231 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
233 outputBlobs[item.first] = output;
235 graph.Infer(srcs, outputBlobs);
237 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
240 ref_batchnorm4DWithScale(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), (const float*) weights->buffer() + p.in.c*2, dst_ref, p.epsilon);
241 compare(*output, dst_ref);
242 } catch (const InferenceEngine::details::InferenceEngineException &e) {
248 TEST_P(MKLDNNGraphBatchNormScaleShiftTests, TestsBatchNormWithScaleShift) {}
250 INSTANTIATE_TEST_CASE_P(
251 TestsBatchNormWithScaleShift, MKLDNNGraphBatchNormScaleShiftTests,
253 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
254 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
255 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
256 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
259 class MKLDNNGraphDynBatchBatchNormScaleShiftTests: public MKLDNNGraphBatchNormScaleShiftTests {
261 virtual void SetUp() {
263 TestsCommon::SetUp();
264 batchnorm_scaleshift_test_params p = ::testing::WithParamInterface<batchnorm_scaleshift_test_params>::GetParam();
265 std::string model = getModel(p);
270 InferenceEngine::CNNNetReader net_reader;
271 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
273 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
275 fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
276 float * data = weights->buffer();
277 for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
282 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
283 net_reader.SetWeights(weights_ptr);
284 InferenceEngine::CNNNetwork network = net_reader.getNetwork();
285 auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
286 ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
287 InferenceEngine::ResponseDesc resp;
288 InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
289 ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
292 MKLDNNGraphTestClass graph;
293 graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
294 graph.CreateGraph(net_reader.getNetwork());
296 InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
297 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
298 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
299 if (srcPtr == nullptr)
300 FAIL() << "Cannot cast blob to TBlob<float>.";
303 fill_data(src->buffer(), src->size());
305 InferenceEngine::BlobMap srcs;
306 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
308 InferenceEngine::OutputsDataMap out;
309 out = net_reader.getNetwork().getOutputsInfo();
310 InferenceEngine::BlobMap outputBlobs;
312 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
314 InferenceEngine::TBlob<float>::Ptr output;
315 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
317 outputBlobs[item.first] = output;
319 auto checkScaleShift = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
320 return (node->getType() == MKLDNNPlugin::Depthwise && node->getCnnLayer()->type == "ScaleShift")
321 || node->getType() == MKLDNNPlugin::BatchNormalization;
324 graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkScaleShift);
325 graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkScaleShift);
326 } catch (const InferenceEngine::details::InferenceEngineException &e) {
332 TEST_P(MKLDNNGraphDynBatchBatchNormScaleShiftTests, TestsDynBatchBatchNormWithScaleShift) {}
334 INSTANTIATE_TEST_CASE_P(
335 TestsDynBatchBatchNormWithScaleShift, MKLDNNGraphDynBatchBatchNormScaleShiftTests,
337 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
338 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
339 batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
340 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));