1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "test_graph.hpp"
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <inference_engine/cnn_network_impl.hpp>
14 #include "tests_common.hpp"
16 using namespace ::testing;
18 using namespace mkldnn;
20 struct batchnorm4D_test_params {
32 MKLDNNPlugin::impl_desc_type selectedType;
33 std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
35 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
38 template <typename data_t>
39 void ref_batchnorm4D(const InferenceEngine::TBlob<data_t> &src, const data_t *variance, const data_t *mean,
40 InferenceEngine::TBlob<data_t> &dst, batchnorm4D_test_params prm) {
41 size_t MB = src.dims()[0];
42 size_t IC = src.dims()[1];
43 size_t IH = src.dims()[2];
44 size_t IW = src.dims()[3];
46 const double eps = prm.epsilon;
48 const data_t *src_data = src.readOnly();
49 data_t *dst_data = dst.data();
51 for (int c = 0; c < IC; ++c) {
52 data_t v_mean = mean[c];
53 data_t v_variance = variance[c];
54 data_t sqrt_variance = 0;
56 sqrt_variance = 1. / sqrt(v_variance + eps);
58 for (int n = 0; n < MB; ++n)
59 for (int h = 0; h < IH; ++h)
60 for (int w = 0; w < IW; ++w) {
61 size_t idx = n * IC * IH * IW
64 dst_data[idx] = (src_data[idx] - v_mean) * sqrt_variance;
69 class MKLDNNGraphBatchNormTests: public TestsCommon,
70 public WithParamInterface<batchnorm4D_test_params> {
71 std::string model_t = R"V0G0N(
72 <Net Name="BatchNorm4D_Only" version="2" precision="FP32" batch="1">
74 <layer name="in1" type="Input" precision="FP32" id="0">
84 <layer name="batchNorm" id="1" type="BatchNormalization" precision="FP32">
85 <batch_norm_data epsilon="_EPSILON_" PrimitivesPriority="_IMPLS_"/>
87 <weights offset="0" size="_S1_" />
88 <biases offset="_S1_" size="_S1_" />
109 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
115 std::string getModel(batchnorm4D_test_params p) {
116 std::string model = model_t;
117 REPLACE_WITH_NUM(model, "_IW_", p.in.w);
118 REPLACE_WITH_NUM(model, "_IH_", p.in.h);
119 REPLACE_WITH_NUM(model, "_IC_", p.in.c);
120 REPLACE_WITH_NUM(model, "_IN_", p.in.n);
121 REPLACE_WITH_NUM(model, "_EPSILON_", p.epsilon);
123 REPLACE_WITH_NUM(model, "_OW_", p.in.w);
124 REPLACE_WITH_NUM(model, "_OH_", p.in.h);
125 REPLACE_WITH_NUM(model, "_OC_", p.in.c);
127 size_t w_data_size = p.in.c * sizeof(float);
128 REPLACE_WITH_NUM(model, "_S1_", w_data_size);
131 for (const auto& preferType : p.preferTypes) {
134 impls += "cpu:" + MKLDNNGraphTestClass::getStrPrimitiveDescriptorType(preferType);
136 REPLACE_WITH_STR(model, "_IMPLS_", impls);
140 virtual void TearDown() {
143 virtual void SetUp() {
145 TestsCommon::SetUp();
146 batchnorm4D_test_params p = ::testing::WithParamInterface<batchnorm4D_test_params>::GetParam();
147 std::string model = getModel(p);
149 InferenceEngine::CNNNetReader net_reader;
150 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
152 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::FP32, InferenceEngine::C, {p.in.c * 2 * sizeof(float)});
154 fill_data(weights->buffer(), weights->size() / sizeof(float));
155 float * data = weights->buffer();
156 for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
162 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
164 net_reader.SetWeights(weights_ptr);
166 MKLDNNGraphTestClass graph;
167 graph.CreateGraph(net_reader.getNetwork());
169 auto& nodes = graph.getNodes();
170 for (int i = 0; i < nodes.size(); i++) {
171 if (nodes[i]->getType() == MKLDNNPlugin::BatchNormalization) {
172 ASSERT_LE(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
173 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
174 p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
176 ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
177 ASSERT_TRUE(nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType() | p.selectedType);
180 ASSERT_GE(5, nodes.size());
182 InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
184 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
186 fill_data(src->buffer(), src->size());
188 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
190 if (srcPtr == nullptr)
191 FAIL() << "Cannot cast blob to TBlob<float>.";
193 InferenceEngine::BlobMap srcs;
194 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
196 InferenceEngine::OutputsDataMap out;
197 out = net_reader.getNetwork().getOutputsInfo();
198 InferenceEngine::BlobMap outputBlobs;
200 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
202 InferenceEngine::TBlob<float>::Ptr output;
203 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
205 outputBlobs[item.first] = output;
207 graph.Infer(srcs, outputBlobs);
209 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
212 ref_batchnorm4D(*srcPtr, (const float*) weights->buffer(), ((const float*) weights->buffer() + p.in.c), dst_ref, p);
214 compare(*output, dst_ref);
215 } catch (const InferenceEngine::details::InferenceEngineException &e) {
221 TEST_P(MKLDNNGraphBatchNormTests, TestsBatchNorm) {}
224 INSTANTIATE_TEST_CASE_P(
225 TestsBatchNorm, MKLDNNGraphBatchNormTests,
227 batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
228 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
229 batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
230 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
232 class MKLDNNGraphDynBatchBatchNormTests: public MKLDNNGraphBatchNormTests {
235 virtual void SetUp() {
237 TestsCommon::SetUp();
238 batchnorm4D_test_params p = ::testing::WithParamInterface<batchnorm4D_test_params>::GetParam();
239 std::string model = getModel(p);
244 InferenceEngine::CNNNetReader net_reader;
245 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
247 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {p.in.c * 4 * sizeof(float)});
249 fill_data( weights->data().as<float*>(), weights->size() / sizeof(float));
250 float * data = weights->buffer();
251 for (size_t i = 0; i < weights->size() / sizeof(float); i++) {
256 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
257 net_reader.SetWeights(weights_ptr);
258 InferenceEngine::CNNNetwork network = net_reader.getNetwork();
259 auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
260 ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
261 InferenceEngine::ResponseDesc resp;
262 InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
263 ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
265 MKLDNNGraphTestClass graph;
266 graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
267 graph.CreateGraph(net_reader.getNetwork());
269 InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
270 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
271 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
272 if (srcPtr == nullptr)
273 FAIL() << "Cannot cast blob to TBlob<float>.";
276 fill_data(src->buffer(), src->size());
278 InferenceEngine::BlobMap srcs;
279 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
281 InferenceEngine::OutputsDataMap out;
282 out = net_reader.getNetwork().getOutputsInfo();
283 InferenceEngine::BlobMap outputBlobs;
285 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
287 InferenceEngine::TBlob<float>::Ptr output;
288 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
290 outputBlobs[item.first] = output;
292 auto checkScaleShift = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
293 return node->getType() == MKLDNNPlugin::BatchNormalization;
296 graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkScaleShift);
297 graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkScaleShift);
298 } catch (const InferenceEngine::details::InferenceEngineException &e) {
304 TEST_P(MKLDNNGraphDynBatchBatchNormTests, TestsDynBatchBatchNorm) {}
306 INSTANTIATE_TEST_CASE_P(
307 TestsDynBatchBatchNorm, MKLDNNGraphDynBatchBatchNormTests,
309 batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
310 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
311 batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
312 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));