Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / extensions / expand_tests.cpp
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
15
16
17 using namespace ::testing;
18 using namespace std;
19 using namespace mkldnn;
20
21 struct expand_test_params {
22     std::string                 precision;
23     InferenceEngine::SizeVector in_shape;
24     InferenceEngine::SizeVector out_shape;
25
26     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
27 };
28
29
30 template <typename data_t>
31 void ref_expand(InferenceEngine::TBlob<data_t> &src, InferenceEngine::TBlob<data_t> &dst) {
32     size_t i;
33     const data_t *src_data = src.data();
34     InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();
35     InferenceEngine::SizeVector srcStrides = src.getTensorDesc().getBlockingDesc().getStrides();
36     data_t* dst_data = dst.data();
37     InferenceEngine::SizeVector dst_dims = dst.getTensorDesc().getDims();
38     InferenceEngine::SizeVector dstStrides = dst.getTensorDesc().getBlockingDesc().getStrides();
39
40     if (src_dims.size() > dst_dims.size())
41         FAIL() << "Output tensor dimension is smaller then input tensor dimension";
42
43     size_t prefix_size = dst_dims.size() - src_dims.size();
44     for (i = 0; i < src_dims.size(); i++) {
45         if (src_dims[i] != 1 && src_dims[i] != dst_dims[i + prefix_size])
46             FAIL() << "In/Output corresponding dimension must have the same value, or Input dimension is equal to 1";
47     }
48
49     InferenceEngine::SizeVector src_aligned(dst_dims.size());
50     InferenceEngine::SizeVector srcStrides_aligned(dst_dims.size());
51     for (i = 0; i < dst_dims.size(); i++) {
52         if (i < prefix_size) {
53             src_aligned[i] = 1;
54             srcStrides_aligned[i] = srcStrides[0];
55         } else {
56             src_aligned[i] = src_dims[i - prefix_size];
57             srcStrides_aligned[i] = srcStrides[i - prefix_size];
58         }
59     }
60
61     size_t src_idx, work_amount_dst = dstStrides[0] * dst_dims[0];
62     InferenceEngine::SizeVector counters(dst_dims.size(), 0);
63
64     for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) {
65         for (i = 0, src_idx = 0; i < dst_dims.size(); ++i)
66             src_idx += counters[i] ? ((counters[i] % src_aligned[i]) * srcStrides_aligned[i]) : 0;
67
68         dst_data[iwork] = src_data[src_idx];
69
70         for (int j = dst_dims.size() - 1; j >= 0; j--) {
71             counters[j] = (counters[j] + 1) % dst_dims[j];
72             if (counters[j] != 0) break;
73         }
74     }
75 }
76
77
78 class MKLDNNCPUExtExpandTests : public TestsCommon, public WithParamInterface<expand_test_params> {
79     std::string model_t = R"V0G0N(
80 <net Name="Expand_net" version="2" precision="_IIDXP_" batch="1">
81     <layers>
82         <layer name="input" type="Input" precision="_IIDXP_" id="1">
83             <output>
84                 <port id="1">
85                     _IN_
86                 </port>
87             </output>
88         </layer>
89         <layer name="shape" type="Input" precision="I32" id="2">
90             <output>
91                 <port id="2">
92                     <dim>_DIM_SIZE_</dim>
93                 </port>
94             </output>
95         </layer>
96         <layer name="output" id="2" type="Expand" precision="_IIDXP_">
97             <data/>
98             <input>
99                 <port id="1">
100                     _IN_
101                 </port>
102                 <port id="2">
103                     <dim>_DIM_SIZE_</dim>
104                 </port>
105             </input>
106             <output>
107                 <port id="3">
108                     _OUT_
109                 </port>
110             </output>
111         </layer>
112     </layers>
113     <edges>
114         <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
115         <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
116     </edges>
117 </net>
118 )V0G0N";
119
120     std::string getModel(expand_test_params p) {
121         std::string model = model_t;
122         std::string in_shape;
123         std::string out_shape;
124
125         REPLACE_WITH_STR(model, "_IIDXP_", p.precision);
126         for (size_t i = 0; i < p.in_shape.size(); i++) {
127             in_shape += "<dim>";
128             in_shape += std::to_string(p.in_shape[i]) + "</dim>\n";
129         }
130         REPLACE_WITH_STR(model, "_IN_", in_shape);
131         for (size_t i = 0; i < p.out_shape.size(); i++) {
132             out_shape += "<dim>";
133             out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
134         }
135         REPLACE_WITH_STR(model, "_OUT_", out_shape);
136         REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.out_shape.size());
137
138         return model;
139     }
140
141 protected:
142     virtual void TearDown() {
143     }
144
145     virtual void SetUp() {
146         try {
147             TestsCommon::SetUp();
148             expand_test_params p = ::testing::WithParamInterface<expand_test_params>::GetParam();
149             std::string model = getModel(p);
150
151             InferenceEngine::CNNNetReader net_reader;
152             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
153
154             InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
155             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
156             extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*) {}));
157
158             MKLDNNGraphTestClass graph;
159             graph.CreateGraph(net_reader.getNetwork(), extMgr);
160
161             // Output Data
162             InferenceEngine::OutputsDataMap out;
163             out = net_reader.getNetwork().getOutputsInfo();
164             InferenceEngine::BlobMap outputBlobs;
165
166             // Input Data
167             InferenceEngine::Blob::Ptr dims;
168             InferenceEngine::SizeVector vector_dim(1, p.out_shape.size());
169             dims = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, vector_dim, InferenceEngine::TensorDesc::getLayoutByDims(vector_dim) });
170             dims->allocate();
171             for (size_t i = 0; i < p.out_shape.size(); i++) {
172                 static_cast<int32_t*>(dims->buffer())[i] = static_cast<int32_t>(p.out_shape[i]);
173             }
174             auto * dimsPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(dims.get());
175             if (dimsPtr == nullptr)
176                 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
177
178             InferenceEngine::BlobMap srcs;
179             InferenceEngine::Blob::Ptr src;
180             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
181             if (p.precision == "I32") {
182                 src = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) });
183                 src->allocate();
184                 for (size_t i = 0; i < src->size(); i++)
185                     static_cast<int32_t*>(src->buffer())[i] = static_cast<int32_t>(i);
186                 auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(src.get());
187                 if (srcPtr == nullptr)
188                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
189
190                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
191                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("shape", dims));
192
193                 // Output Blob
194                 InferenceEngine::TBlob<int32_t>::Ptr output;
195                 output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
196                 output->allocate();
197                 outputBlobs[item.first] = output;
198
199                 // Output Reference
200                 InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
201                 dst_ref.allocate();
202                 ref_expand(*srcPtr, dst_ref);
203
204                 // Infer
205                 graph.Infer(srcs, outputBlobs);
206                 for (int i = 0; i < dst_ref.size(); i++) {
207                     if (dst_ref.data()[i] != (*output).data()[i])
208                         FAIL() << "The difference between res_ptr[i] and ref_ptr[i]";
209                 }
210             }
211             else if (p.precision == "FP32") {
212                 src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) });
213                 src->allocate();
214                 fill_data_dbgval(src->buffer(), src->size());
215                 auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
216                 if (srcPtr == nullptr)
217                     FAIL() << "Cannot cast blob to TBlob<float>.";
218
219                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
220                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("shape", dims));
221
222                 // Output Blob
223                 InferenceEngine::TBlob<float>::Ptr output;
224                 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
225                 output->allocate();
226                 outputBlobs[item.first] = output;
227
228                 // Output Reference
229                 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
230                 dst_ref.allocate();
231                 ref_expand(*srcPtr, dst_ref);
232
233                 // Infer
234                 graph.Infer(srcs, outputBlobs);
235                 compare(*output, dst_ref);
236             }
237             else {
238                 return;
239             }
240         }
241         catch (const InferenceEngine::details::InferenceEngineException &e) {
242             FAIL() << e.what();
243         }
244     }
245 };
246
247 TEST_P(MKLDNNCPUExtExpandTests, TestsExpand) {}
248
249 INSTANTIATE_TEST_CASE_P(
250     TestsExpand, MKLDNNCPUExtExpandTests,
251     ::testing::Values(
252         // Params: precision, in_shape, out_shape
253         expand_test_params{ "I32", { 1 }, { 2, 3, 4 } },
254         expand_test_params{ "I32", { 4, 1, 2 }, { 4, 2, 2 } },
255         expand_test_params{ "I32", { 4, 2, 1 }, { 4, 2, 2 } },
256         expand_test_params{ "I32", { 4, 2 }, { 2, 4, 2 } },
257         expand_test_params{ "I32", { 4, 1, 1 }, { 4, 2, 1 } },
258         expand_test_params{ "I32", { 2, 1, 3, 1 },{ 2, 2, 2, 3, 1 } },
259         expand_test_params{"FP32", { 1 }, { 2, 3, 4 } },
260         expand_test_params{"FP32", { 4, 1, 2 }, { 4, 2, 2 } },
261         expand_test_params{"FP32", { 4, 2, 1 }, { 4, 2, 2 } },
262         expand_test_params{"FP32", { 4, 2 }, { 2, 4, 2 } },
263         expand_test_params{"FP32", { 4, 1, 1 }, { 4, 2, 1 } },
264         expand_test_params{"FP32", { 2, 1, 3, 1 },{ 2, 2, 2, 3, 1 } }
265 ));