Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / extensions / squeeze_tests.cpp
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
15
16
17 using namespace ::testing;
18 using namespace std;
19 using namespace mkldnn;
20
21 struct squeeze_test_params {
22     std::string                 inIdxPrecision;
23     InferenceEngine::SizeVector in_shape;
24     std::vector<int32_t>        indices_to_squeeze;
25     InferenceEngine::SizeVector out_shape;
26
27     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
28 };
29
30 void ref_squeeze(
31     InferenceEngine::TBlob<float> &src,
32     InferenceEngine::SizeVector &out_dims,
33     std::vector<int32_t> indices_to_squeeze
34 ) {
35     InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();
36
37     if (indices_to_squeeze.size() == 0)
38         FAIL() << " Index vector should be 1 dimension";
39
40     for (size_t i = 0; i < indices_to_squeeze.size(); i++) {
41         int32_t axis = indices_to_squeeze[i];
42         if (axis < 0)
43             axis += src_dims.size();
44
45         if (axis > src_dims.size())
46             FAIL() << " Index to squeeze exceeds data tensor dimension";
47         else if (src_dims[axis] != 1)
48             FAIL() << " Index to squeeze of data tensor dimension is not 1";
49     }
50
51     for (size_t j = 0; j < src_dims.size(); j++) {
52         bool found = false;
53         for (size_t i = 0; i < indices_to_squeeze.size(); i++) {
54             int32_t axis = indices_to_squeeze[i];
55             if (axis < 0)
56                 axis += src_dims.size();
57             if (j == static_cast<size_t>(axis)) found = true;
58         }
59         if(!found) out_dims.push_back(src_dims[j]);
60     }
61 }
62
63 class MKLDNNCPUExtSqueezeTests : public TestsCommon, public WithParamInterface<squeeze_test_params> {
64     std::string model_t = R"V0G0N(
65 <net Name="Squeeze_net" version="2" precision="FP32" batch="1">
66     <layers>
67         <layer name="input" type="Input" precision="FP32" id="1">
68             <output>
69                 <port id="1">
70                     _IN_
71                 </port>
72             </output>
73         </layer>
74         <layer name="indices_to_squeeze" type="Input" precision="_IIDXP_" id="2">
75             <output>
76                 <port id="2">
77                     <dim>_DIM_SIZE_</dim>
78                 </port>
79             </output>
80         </layer>
81         <layer name="output" id="2" type="Squeeze" precision="FP32">
82             <data/>
83             <input>
84                 <port id="1">
85                     _IN_
86                 </port>
87                 <port id="2">
88                     <dim>_DIM_SIZE_</dim>
89                 </port>
90             </input>
91             <output>
92                 <port id="3">
93                     _OUT_
94                 </port>
95             </output>
96         </layer>
97     </layers>
98     <edges>
99         <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
100         <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
101     </edges>
102 </net>
103 )V0G0N";
104
105     std::string getModel(squeeze_test_params p) {
106         std::string model = model_t;
107         std::string in_shape;
108         std::string out_shape;
109
110         for (size_t i = 0; i < p.in_shape.size(); i++) {
111             in_shape += "<dim>";
112             in_shape += std::to_string(p.in_shape[i]) + "</dim>\n";
113         }
114         REPLACE_WITH_STR(model, "_IN_", in_shape);
115         REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
116         REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.indices_to_squeeze.size());
117         if (p.out_shape.size()) {
118             for (size_t i = 0; i < p.out_shape.size(); i++) {
119                 out_shape += "<dim>";
120                 out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
121             }
122         } else {
123             out_shape = "<dim>1</dim>\n";
124         }
125         REPLACE_WITH_STR(model, "_OUT_", out_shape);
126
127         return model;
128     }
129
130 protected:
131     virtual void TearDown() {
132     }
133
134     virtual void SetUp() {
135         try {
136             TestsCommon::SetUp();
137             squeeze_test_params p = ::testing::WithParamInterface<squeeze_test_params>::GetParam();
138             std::string model = getModel(p);
139
140             InferenceEngine::CNNNetReader net_reader;
141             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
142
143             InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
144             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
145             extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
146
147             MKLDNNGraphTestClass graph;
148             graph.CreateGraph(net_reader.getNetwork(), extMgr);
149
150             // Output Data
151             InferenceEngine::OutputsDataMap out;
152             out = net_reader.getNetwork().getOutputsInfo();
153             InferenceEngine::BlobMap outputBlobs;
154
155             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
156
157             InferenceEngine::TBlob<float>::Ptr output;
158             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
159             output->allocate();
160             outputBlobs[item.first] = output;
161
162             // Output Reference
163             InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
164             dst_ref.allocate();
165
166             // Input Data
167             InferenceEngine::Blob::Ptr src;
168             src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) });
169             src->allocate();
170             fill_data_dbgval(src->buffer(), src->size());
171             auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
172             if (srcPtr == nullptr)
173                 FAIL() << "Cannot cast blob to TBlob<float>.";
174
175             InferenceEngine::BlobMap srcs;
176             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
177
178             InferenceEngine::Blob::Ptr seq_lengthsIdx;
179             InferenceEngine::SizeVector seq_lengths_dim(1, p.indices_to_squeeze.size());
180             if (p.inIdxPrecision == "I32") {
181                 seq_lengthsIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) });
182                 seq_lengthsIdx->allocate();
183                 if (p.indices_to_squeeze.size())
184                     memcpy(static_cast<int32_t*>(seq_lengthsIdx->buffer()), &p.indices_to_squeeze[0], sizeof(int32_t)*p.indices_to_squeeze.size());
185                 auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(seq_lengthsIdx.get());
186                 if (seq_lengthsIdxPtr == nullptr)
187                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
188
189                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("indices_to_squeeze", seq_lengthsIdx));
190             } else if (p.inIdxPrecision == "FP32") {
191                 seq_lengthsIdx = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) });
192                 seq_lengthsIdx->allocate();
193                 if (p.indices_to_squeeze.size())
194                     for (size_t i = 0; i < p.indices_to_squeeze.size(); i++) {
195                         static_cast<float *>(seq_lengthsIdx->buffer())[i] = static_cast<float>(p.indices_to_squeeze[i]);
196                     }
197                 auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(seq_lengthsIdx.get());
198                 if (seq_lengthsIdxPtr == nullptr)
199                     FAIL() << "Cannot cast blob to TBlob<float>.";
200
201                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("indices_to_squeeze", seq_lengthsIdx));
202             }
203             else {
204                 return;
205             }
206
207             // Check results
208             InferenceEngine::SizeVector out_dims;
209             ref_squeeze(*srcPtr, out_dims, p.indices_to_squeeze);
210             if (out_dims.size() != p.out_shape.size())
211                 FAIL() << "Wrong out_shape size!";
212             for (size_t i = 0; i < p.out_shape.size(); i++) {
213                 if (out_dims[i] != p.out_shape[i])
214                     FAIL() << "Wrong out_shape dimensions!";
215             }
216
217             // Infer
218             graph.Infer(srcs, outputBlobs);
219             compare(*output, *src);
220         } catch (const InferenceEngine::details::InferenceEngineException &e) {
221             FAIL() << e.what();
222         }
223     }
224 };
225
226 TEST_P(MKLDNNCPUExtSqueezeTests, TestsSqueeze) {}
227
228 INSTANTIATE_TEST_CASE_P(
229     TestsSqueeze, MKLDNNCPUExtSqueezeTests,
230             ::testing::Values(
231 // Params: inIdxPrecision, in_shape, indices_to_squeeze, out_shape
232                 squeeze_test_params{ "I32",{ 1 },{ 0 },{ } },
233                 squeeze_test_params{ "I32",{ 1, 3, 1 },{ 0 },{ 3, 1 } },
234                 squeeze_test_params{ "I32",{ 1, 3, 1 },{ 2 },{ 1, 3 } },
235                 squeeze_test_params{ "I32",{ 1, 3, 1 },{ 0, 2 },{ 3 } },
236                 squeeze_test_params{ "I32",{ 1, 3, 1 },{ -1 },{ 1, 3 } },
237                 squeeze_test_params{ "I32",{ 1, 3, 1, 2 },{ 0, 2 },{ 3, 2 } },
238                 squeeze_test_params{"FP32",{ 1 },{ 0 },{} },
239                 squeeze_test_params{"FP32",{ 1, 3, 1 },{ 0 },{ 3, 1 } },
240                 squeeze_test_params{"FP32",{ 1, 3, 1 },{ 2 },{ 1, 3 } },
241                 squeeze_test_params{"FP32",{ 1, 3, 1 },{ 0, 2 },{ 3 } },
242                 squeeze_test_params{"FP32",{ 1, 3, 1 },{ -1 },{ 1, 3 } },
243                 squeeze_test_params{"FP32",{ 1, 3, 1, 2 },{ 0, 2 },{ 3, 2 } }
244             ));