1 // Copyright (C) 2018 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
6 #include <gtest/gtest.h>
7 #include <gmock/gmock-spec-builders.h>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "mock_mkldnn_primitive.hpp"
11 #include "test_graph.hpp"
13 #include "single_layer_common.hpp"
14 #include <mkldnn_plugin/mkldnn_extension_utils.h>
15 #include <inference_engine/cnn_network_impl.hpp>
16 #include "tests_common.hpp"
19 using namespace ::testing;
21 using namespace mkldnn;
24 struct deconv_test_params {
44 std::vector<int> selectedTypes;
45 std::vector<MKLDNNPlugin::impl_desc_type> preferTypes;
47 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
50 template <typename data_t>
51 void ref_deconv(const InferenceEngine::TBlob<data_t> &src, const data_t *weights, const size_t weightsSize,
52 InferenceEngine::TBlob<data_t> &dst, deconv_test_params prm) {
55 size_t KW = prm.krn_w;
56 size_t KH = prm.krn_h;
58 size_t PW = prm.pad_w;
59 size_t PH = prm.pad_h;
61 size_t SW = prm.str_w;
62 size_t SH = prm.str_h;
64 size_t IW = src.dims()[3];
65 size_t IH = src.dims()[2];
66 size_t IC = src.dims()[1];
67 size_t MB = src.dims()[0];
69 size_t OC = prm.out_c;
71 size_t OW = SW * (IW - 1) + KW - 2 * PW;
72 size_t OH = SH * (IH - 1) + KH - 2 * PH;
74 const data_t *src_data = src.readOnly();
75 const data_t *weights_data = weights;
77 data_t *dst_data = dst.data();
79 # pragma omp parallel for collapse(4) schedule(static)
80 for (int g = 0; g < G; ++g) {
81 for (int mb = 0; mb < MB; ++mb) {
82 for (int oc = 0; oc < OC / G; ++oc) {
83 for (int oh = 0; oh < OH; ++oh) {
84 for (int ow = 0; ow < OW; ++ow) {
85 size_t didx = mb * OC * OH * OW
86 + (g * OC / G + oc) * OH * OW + oh * OW + ow;
88 dst_data[didx] = data_t(0);
90 for (int ic = 0; ic < IC / G; ic++) {
91 for (int kh = 0; kh < KH; kh++) {
92 for (int kw = 0; kw < KW; kw++) {
93 if (ow + PW < kw || oh + PH < kh)
96 size_t iw = ow - kw + PW;
97 size_t ih = oh - kh + PH;
99 if (iw % SW != 0 || ih % SH != 0)
105 if (ih < IH && iw < IW) {
106 size_t sidx = mb * IC * IH * IW
107 + (g * IC / G + ic) * IH * IW + ih * IW
110 size_t widx = g * (IC / G) * (OC / G) * KH * KW +
111 ic * (OC / G) * KH * KW +
112 + oc * KH * KW + kh * KW
115 dst_data[didx] += src_data[sidx] * weights_data[widx];
127 class MKLDNNGraphDeconvolutionalTests: public TestsCommon,
128 public WithParamInterface<deconv_test_params> {
129 std::string model_t = R"V0G0N(
130 <Net Name="Deconvolution_Only" version="2" precision="FP32" batch="1">
132 <layer name="in1" type="Input" precision="FP32" id="0">
142 <layer name="deconv1" id="1" type="Deconvolution" precision="FP32">
143 <deconvolution stride-x="_SW_" stride-y="_SH_"
144 pad-x="_PW_" pad-y="_PH_"
145 kernel-x="_KW_" kernel-y="_KH_"
146 output="_OC_" group="_GC_"/>
148 <weights offset="0" size="_S1_" />
169 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
175 std::string getModel(deconv_test_params p) {
176 std::string model = model_t;
178 REPLACE_WITH_NUM(model, "_IW_", p.in.w);
179 REPLACE_WITH_NUM(model, "_IH_", p.in.h);
180 REPLACE_WITH_NUM(model, "_IC_", p.in.c);
181 REPLACE_WITH_NUM(model, "_IN_", p.in.n);
183 REPLACE_WITH_NUM(model, "_KW_", p.krn_w);
184 REPLACE_WITH_NUM(model, "_KH_", p.krn_h);
185 REPLACE_WITH_NUM(model, "_SW_", p.str_w);
186 REPLACE_WITH_NUM(model, "_SH_", p.str_h);
187 REPLACE_WITH_NUM(model, "_PW_", p.pad_w);
188 REPLACE_WITH_NUM(model, "_PH_", p.pad_h);
190 REPLACE_WITH_NUM(model, "_GC_", p.grp_c);
191 REPLACE_WITH_NUM(model, "_OC_", p.out_c);
192 REPLACE_WITH_NUM(model, "_OH_", p.str_h * (p.in.h - 1) + p.krn_h - 2 * p.pad_h);
193 REPLACE_WITH_NUM(model, "_OW_", p.str_w * (p.in.w - 1) + p.krn_w - 2 * p.pad_w);
195 size_t w_data_size = (p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float);
196 REPLACE_WITH_NUM(model, "_S1_", w_data_size);
200 virtual void TearDown() {
203 virtual void SetUp() {
205 TestsCommon::SetUp();
206 deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
207 std::string model = getModel(p);
209 InferenceEngine::CNNNetReader net_reader;
210 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
212 InferenceEngine::SizeVector dims_weights = {(p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float)};
214 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, dims_weights);
217 fill_data(weights->data().as<float*>(), weights->size() / sizeof(float));
219 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
221 net_reader.SetWeights(weights_ptr);
223 MKLDNNGraphTestClass graph;
224 graph.CreateGraph(net_reader.getNetwork());
225 auto& nodes = graph.getNodes();
226 for (auto &node : nodes) {
227 if (node->getType() == MKLDNNPlugin::Deconvolution) {
228 ASSERT_LE(p.num_prim_desc, node->getSupportedPrimitiveDescriptors().size());
229 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
230 p.comp.at(j)(node->getSupportedPrimitiveDescriptors().at(j));
232 ASSERT_NE(nullptr, node->getSelectedPrimitiveDescriptor());
233 bool good_prim = false;
234 for (auto & selected : p.selectedTypes)
235 if (selected == (node->getSelectedPrimitiveDescriptor()->getImplementationType() & selected))
237 ASSERT_TRUE(good_prim);
241 InferenceEngine::SizeVector dims_src = {p.in.n, p.in.c, p.in.h, p.in.w};
243 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
245 fill_data(src->buffer(), src->size());
247 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
249 if (srcPtr == nullptr)
250 FAIL() << "Cannot cast blob to TBlob<float>.";
252 InferenceEngine::BlobMap srcs;
253 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
255 InferenceEngine::OutputsDataMap out;
256 out = net_reader.getNetwork().getOutputsInfo();
257 InferenceEngine::BlobMap outputBlobs;
259 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
261 InferenceEngine::TBlob<float>::Ptr output;
262 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
264 outputBlobs[item.first] = output;
266 graph.Infer(srcs, outputBlobs);
268 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
271 ref_deconv(*srcPtr, weights->readOnly().as<const float*>(), weights->size() / sizeof(float), dst_ref, p);
273 compare(*output, dst_ref);
274 } catch (const InferenceEngine::details::InferenceEngineException &e) {
280 TEST_P(MKLDNNGraphDeconvolutionalTests, TestsDeconvolution) {}
283 INSTANTIATE_TEST_CASE_P(
284 TestDeconvolution, MKLDNNGraphDeconvolutionalTests,
286 deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
287 deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
288 deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
289 deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
290 deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
291 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, 3, {MKLDNNPlugin::impl_desc_type::gemm}},
292 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
293 deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
294 deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}
297 class MKLDNNGraphDynBatchDeconvolutionalTests: public MKLDNNGraphDeconvolutionalTests {
299 virtual void SetUp() {
301 TestsCommon::SetUp();
302 deconv_test_params p = ::testing::WithParamInterface<deconv_test_params>::GetParam();
303 std::string model = getModel(p);
308 InferenceEngine::CNNNetReader net_reader;
309 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
311 InferenceEngine::SizeVector dims_weights = {(p.krn_w * p.krn_h * p.out_c * (p.in.c / p.grp_c)) * sizeof(float)};
312 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, dims_weights);
314 fill_data(weights->data().as<float*>(), weights->size() / sizeof(float));
315 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
316 net_reader.SetWeights(weights_ptr);
318 InferenceEngine::CNNNetwork network = net_reader.getNetwork();
319 auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
320 ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
321 InferenceEngine::ResponseDesc resp;
322 InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
323 ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
326 MKLDNNGraphTestClass graph;
327 graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
328 graph.CreateGraph(net_reader.getNetwork());
330 InferenceEngine::SizeVector dims_src = {MB, p.in.c, p.in.h, p.in.w};
331 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
332 InferenceEngine::TBlob<float>* srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
333 if (srcPtr == nullptr)
334 FAIL() << "Cannot cast blob to TBlob<float>.";
337 fill_data(src->buffer(), src->size());
339 InferenceEngine::BlobMap srcs;
340 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
342 InferenceEngine::OutputsDataMap out;
343 out = net_reader.getNetwork().getOutputsInfo();
344 InferenceEngine::BlobMap outputBlobs;
346 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
348 InferenceEngine::TBlob<float>::Ptr output;
349 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
351 outputBlobs[item.first] = output;
353 auto checkDeconvolution = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
354 return node->getType() == MKLDNNPlugin::Deconvolution;
357 graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
358 graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkDeconvolution, MKLDNNGraphTestClass::CheckDynBatchType::Child);
359 } catch (const InferenceEngine::details::InferenceEngineException &e) {
365 TEST_P(MKLDNNGraphDynBatchDeconvolutionalTests, TestsDynBatchDeconvolutional) {}
367 INSTANTIATE_TEST_CASE_P(
368 TestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests,
370 deconv_test_params{{1, 3, 3, 3}, 3, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
371 deconv_test_params{{3, 3, 3, 3}, 4, 3, 1, 1, 0, 0, 2, 1, 5, {MKLDNNPlugin::impl_desc_type::jit} },
372 deconv_test_params{{1, 3, 3, 3}, 4, 3, 1, 2, 0, 0, 2, 1, 4, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
373 deconv_test_params{{1, 3, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
374 deconv_test_params{{4, 17, 3, 3}, 4, 3, 2, 2, 0, 0, 2, 1, 3, {MKLDNNPlugin::impl_desc_type::gemm, MKLDNNPlugin::impl_desc_type::jit} },
375 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 2, 3, {MKLDNNPlugin::impl_desc_type::gemm}},
376 deconv_test_params{{2, 8, 5, 5}, 4, 4, 2, 2, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
377 deconv_test_params{{2, 8, 5, 5}, 8, 8, 4, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}},
378 deconv_test_params{{2, 8, 5, 5}, 4, 8, 2, 4, 1, 1, 8, 8, 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw}}