Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / extensions / unsqueeze_tests.cpp
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
15
16
17 using namespace ::testing;
18 using namespace std;
19 using namespace mkldnn;
20
21 struct unsqueeze_test_params {
22     std::string                 inIdxPrecision;
23     InferenceEngine::SizeVector in_shape;
24     std::vector<int32_t>        indices_to_set;
25     InferenceEngine::SizeVector out_shape;
26
27     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
28 };
29
30 void ref_unsqueeze(
31     InferenceEngine::TBlob<float> &src,
32     InferenceEngine::SizeVector &out_dims,
33     std::vector<int32_t> indices_to_set
34 ) {
35     InferenceEngine::SizeVector src_dims = src.getTensorDesc().getDims();
36
37     if (indices_to_set.size() == 0)
38         FAIL() << " Index vector should be 1 dimension";
39
40     size_t i, j, k, max = src_dims.size();
41     for (size_t i = 0; i < indices_to_set.size(); i++) {
42         if (indices_to_set[i] > max) max = indices_to_set[i];
43     }
44     max++;
45
46     if ((indices_to_set.size() + src_dims.size()) < max)
47         FAIL() << " Indices_to_set for unsqueeze layer is out of tensor dimension";
48
49     max = indices_to_set.size() + src_dims.size();
50     for (i = 0, j = 0, k = 0; i < max; i++) {
51         if (k < indices_to_set.size() && i == indices_to_set[k]) {
52             out_dims.push_back(1);
53             k++;
54         } else {
55             out_dims.push_back(src_dims[j++]);
56         }
57     }
58 }
59
60 class MKLDNNCPUExtUnsqueezeTests : public TestsCommon, public WithParamInterface<unsqueeze_test_params> {
61     std::string model_t = R"V0G0N(
62 <net Name="Unsqueeze_net" version="2" precision="FP32" batch="1">
63     <layers>
64         <layer name="input" type="Input" precision="FP32" id="1">
65             <output>
66                 <port id="1">
67                     _IN_
68                 </port>
69             </output>
70         </layer>
71         <layer name="indices_to_set" type="Input" precision="_IIDXP_" id="2">
72             <output>
73                 <port id="2">
74                     <dim>_DIM_SIZE_</dim>
75                 </port>
76             </output>
77         </layer>
78         <layer name="output" id="2" type="Unsqueeze" precision="FP32">
79             <data/>
80             <input>
81                 <port id="1">
82                     _IN_
83                 </port>
84                 <port id="2">
85                     <dim>_DIM_SIZE_</dim>
86                 </port>
87             </input>
88             <output>
89                 <port id="3">
90                     _OUT_
91                 </port>
92             </output>
93         </layer>
94     </layers>
95     <edges>
96         <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
97         <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
98     </edges>
99 </net>
100 )V0G0N";
101
102     std::string getModel(unsqueeze_test_params p) {
103         std::string model = model_t;
104         std::string in_shape;
105         std::string out_shape;
106
107         for (size_t i = 0; i < p.in_shape.size(); i++) {
108             in_shape += "<dim>";
109             in_shape += std::to_string(p.in_shape[i]) + "</dim>\n";
110         }
111         REPLACE_WITH_STR(model, "_IN_", in_shape);
112         REPLACE_WITH_STR(model, "_IIDXP_", p.inIdxPrecision);
113         REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.indices_to_set.size());
114         for (size_t i = 0; i < p.out_shape.size(); i++) {
115             out_shape += "<dim>";
116             out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
117         }
118         REPLACE_WITH_STR(model, "_OUT_", out_shape);
119
120         return model;
121     }
122
123 protected:
124     virtual void TearDown() {
125     }
126
127     virtual void SetUp() {
128         try {
129             TestsCommon::SetUp();
130             unsqueeze_test_params p = ::testing::WithParamInterface<unsqueeze_test_params>::GetParam();
131             std::string model = getModel(p);
132             ////std::cout << model;
133             InferenceEngine::CNNNetReader net_reader;
134             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
135
136             InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
137             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
138             extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
139
140             MKLDNNGraphTestClass graph;
141             graph.CreateGraph(net_reader.getNetwork(), extMgr);
142
143             // Output Data
144             InferenceEngine::OutputsDataMap out;
145             out = net_reader.getNetwork().getOutputsInfo();
146             InferenceEngine::BlobMap outputBlobs;
147
148             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
149
150             InferenceEngine::TBlob<float>::Ptr output;
151             output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
152             output->allocate();
153             outputBlobs[item.first] = output;
154
155             // Output Reference
156             InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
157             dst_ref.allocate();
158
159             // Input Data
160             InferenceEngine::Blob::Ptr src;
161             src = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, p.in_shape, InferenceEngine::TensorDesc::getLayoutByDims(p.in_shape) });
162             src->allocate();
163             fill_data_dbgval(src->buffer(), src->size());
164             auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
165             if (srcPtr == nullptr)
166                 FAIL() << "Cannot cast blob to TBlob<float>.";
167
168             InferenceEngine::BlobMap srcs;
169             srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
170
171             InferenceEngine::Blob::Ptr seq_lengthsIdx;
172             InferenceEngine::SizeVector seq_lengths_dim(1, p.indices_to_set.size());
173             if (p.inIdxPrecision == "I32") {
174                 seq_lengthsIdx = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) });
175                 seq_lengthsIdx->allocate();
176                 if (p.indices_to_set.size())
177                     memcpy(static_cast<int32_t*>(seq_lengthsIdx->buffer()), &p.indices_to_set[0], sizeof(int32_t)*p.indices_to_set.size());
178                 auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(seq_lengthsIdx.get());
179                 if (seq_lengthsIdxPtr == nullptr)
180                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
181
182                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("indices_to_set", seq_lengthsIdx));
183             } else if (p.inIdxPrecision == "FP32") {
184                 seq_lengthsIdx = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, seq_lengths_dim, InferenceEngine::TensorDesc::getLayoutByDims(seq_lengths_dim) });
185                 seq_lengthsIdx->allocate();
186                 if (p.indices_to_set.size())
187                     for (size_t i = 0; i < p.indices_to_set.size(); i++) {
188                         static_cast<float *>(seq_lengthsIdx->buffer())[i] = static_cast<float>(p.indices_to_set[i]);
189                     }
190                 auto * seq_lengthsIdxPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(seq_lengthsIdx.get());
191                 if (seq_lengthsIdxPtr == nullptr)
192                     FAIL() << "Cannot cast blob to TBlob<float>.";
193
194                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("indices_to_set", seq_lengthsIdx));
195             }
196             else {
197                 return;
198             }
199
200             // Check results
201             InferenceEngine::SizeVector out_dims;
202             ref_unsqueeze(*srcPtr, out_dims, p.indices_to_set);
203             if (out_dims.size() != p.out_shape.size())
204                 FAIL() << "Wrong out_shape size!";
205             for (size_t i = 0; i < p.out_shape.size(); i++) {
206                 if (out_dims[i] != p.out_shape[i])
207                     FAIL() << "Wrong out_shape dimensions!";
208             }
209
210             // Infer
211             graph.Infer(srcs, outputBlobs);
212             compare(*output, *src);
213         } catch (const InferenceEngine::details::InferenceEngineException &e) {
214             FAIL() << e.what();
215         }
216     }
217 };
218
219 TEST_P(MKLDNNCPUExtUnsqueezeTests, TestsUnsqueeze) {}
220
221 INSTANTIATE_TEST_CASE_P(
222     TestsUnsqueeze, MKLDNNCPUExtUnsqueezeTests,
223             ::testing::Values(
224 // Params: inIdxPrecision, in_shape, indices_to_set, out_shape
225                 unsqueeze_test_params{ "I32",{ 3 },{ 0 },{ 1, 3 } },
226                 unsqueeze_test_params{ "I32",{ 3 },{ 0, 1, 2 },{ 1, 1, 1, 3 } },
227                 unsqueeze_test_params{ "I32",{ 3 },{ 0, 2, 3 },{ 1, 3, 1, 1 } },
228                 unsqueeze_test_params{ "I32",{ 2, 3 },{ 0, 3 },{ 1, 2, 3, 1 } },
229                 unsqueeze_test_params{ "I32",{ 2, 3 },{ 1 },{ 2, 1, 3 } },
230                 unsqueeze_test_params{"FP32",{ 3 },{ 0 },{ 1, 3 } },
231                 unsqueeze_test_params{"FP32",{ 3 },{ 0, 1, 2 },{ 1, 1, 1, 3 } },
232                 unsqueeze_test_params{"FP32",{ 3 },{ 0, 2, 3 },{ 1, 3, 1, 1 } },
233                 unsqueeze_test_params{"FP32",{ 2, 3 },{ 0, 3 },{ 1, 2, 3, 1 } },
234                 unsqueeze_test_params{"FP32",{ 2, 3 },{ 1 },{ 2, 1, 3 } }
235             ));