Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / extensions / range_tests.cpp
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
15
16
17 using namespace ::testing;
18 using namespace std;
19 using namespace mkldnn;
20
21 struct range_test_params {
22     std::string                 precision;
23     float                       start;
24     float                       limit;
25     float                       delta;
26     InferenceEngine::SizeVector out_shape;
27
28     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
29 };
30
31 template <typename data_t>
32 void ref_range(
33     float start,
34     float limit,
35     float delta,
36     InferenceEngine::TBlob<data_t> &dst
37 ) {
38     data_t* dst_data = dst.data();
39     size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
40     if (work_amount_dst != dst.size())
41         FAIL() << "Range indexes exceeds data tensor dimension";
42
43     data_t dst_value = static_cast<data_t>(start);
44     for (size_t iwork = 0; iwork < work_amount_dst; ++iwork, dst_value += static_cast<data_t>(delta)) {
45         dst_data[iwork] = dst_value;
46     }
47 }
48
49 class MKLDNNCPUExtRangeTests : public TestsCommon, public WithParamInterface<range_test_params> {
50     std::string model_t = R"V0G0N(
51 <net Name="Range_net" version="2" precision="_IIDXP_" batch="1">
52     <layers>
53         <layer name="start" type="Input" precision="_IIDXP_" id="1">
54             <output>
55                 <port id="1">
56                     <dim>1</dim>
57                 </port>
58             </output>
59         </layer>
60         <layer name="limit" type="Input" precision="_IIDXP_" id="2">
61             <output>
62                 <port id="2">
63                     <dim>1</dim>
64                 </port>
65             </output>
66         </layer>
67         <layer name="delta" type="Input" precision="_IIDXP_" id="3">
68             <output>
69                 <port id="3">
70                     <dim>1</dim>
71                 </port>
72             </output>
73         </layer>
74         <layer name="output" id="2" type="Range" precision="_IIDXP_">
75             <data/>
76             <input>
77                 <port id="1">
78                     <dim>1</dim>
79                 </port>
80                 <port id="2">
81                     <dim>1</dim>
82                 </port>
83                 <port id="3">
84                     <dim>1</dim>
85                 </port>
86             </input>
87             <output>
88                 <port id="3">
89                     _OUT_
90                 </port>
91             </output>
92         </layer>
93     </layers>
94     <edges>
95         <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
96         <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
97         <edge from-layer="3" from-port="3" to-layer="2" to-port="3"/>
98     </edges>
99 </net>
100 )V0G0N";
101
102     std::string getModel(range_test_params p) {
103         std::string model = model_t;
104         std::string out_shape;
105
106         REPLACE_WITH_STR(model, "_IIDXP_", p.precision);
107         for (size_t i = 0; i < p.out_shape.size(); i++) {
108             out_shape += "<dim>";
109             out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
110         }
111         REPLACE_WITH_STR(model, "_OUT_", out_shape);
112
113         return model;
114     }
115
116 protected:
117     virtual void TearDown() {
118     }
119
120     virtual void SetUp() {
121         try {
122             TestsCommon::SetUp();
123             range_test_params p = ::testing::WithParamInterface<range_test_params>::GetParam();
124             std::string model = getModel(p);
125
126             InferenceEngine::CNNNetReader net_reader;
127             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
128
129             InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
130             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
131             extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
132
133             MKLDNNGraphTestClass graph;
134             graph.CreateGraph(net_reader.getNetwork(), extMgr);
135
136             // Output Data
137             InferenceEngine::OutputsDataMap out;
138             out = net_reader.getNetwork().getOutputsInfo();
139             InferenceEngine::BlobMap outputBlobs;
140
141             // Input Data
142             InferenceEngine::Blob::Ptr start_scalar;
143             InferenceEngine::Blob::Ptr limit_scalar;
144             InferenceEngine::Blob::Ptr delta_scalar;
145             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
146             InferenceEngine::SizeVector scalar_dim(1, 1);
147             InferenceEngine::BlobMap srcs;
148             InferenceEngine::SizeVector out_dims;
149             if (p.precision == "I32") {
150                 start_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
151                 start_scalar->allocate();
152                 static_cast<int32_t*>(start_scalar->buffer())[0] = static_cast<int32_t>(p.start);
153                 auto * start_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(start_scalar.get());
154                 if (start_scalarPtr == nullptr)
155                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
156
157                 limit_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
158                 limit_scalar->allocate();
159                 static_cast<int32_t*>(limit_scalar->buffer())[0] = static_cast<int32_t>(p.limit);
160                 auto * limit_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(limit_scalar.get());
161                 if (limit_scalarPtr == nullptr)
162                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
163
164                 delta_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
165                 delta_scalar->allocate();
166                 static_cast<int32_t*>(delta_scalar->buffer())[0] = static_cast<int32_t>(p.delta);
167                 auto * delta_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(delta_scalar.get());
168                 if (delta_scalarPtr == nullptr)
169                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
170
171                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("start", start_scalar));
172                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("limit", limit_scalar));
173                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("delta", delta_scalar));
174
175                 // Output Blob
176                 InferenceEngine::TBlob<int32_t>::Ptr output;
177                 output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
178                 output->allocate();
179                 outputBlobs[item.first] = output;
180
181                 // Output Reference
182                 InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
183                 dst_ref.allocate();
184                 ref_range(p.start, p.limit, p.delta, dst_ref);
185
186                 // Infer
187                 graph.Infer(srcs, outputBlobs);
188                 for (int i = 0; i < dst_ref.size(); i++) {
189                     if (dst_ref.data()[i] != (*output).data()[i])
190                         FAIL() << "The difference between res_ptr[i] and ref_ptr[i]";
191                 }
192             } else if (p.precision == "FP32") {
193                 start_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
194                 start_scalar->allocate();
195                 static_cast<float*>(start_scalar->buffer())[0] = p.start;
196                 auto * start_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(start_scalar.get());
197                 if (start_scalarPtr == nullptr)
198                     FAIL() << "Cannot cast blob to TBlob<float>.";
199
200                 limit_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
201                 limit_scalar->allocate();
202                 static_cast<float*>(limit_scalar->buffer())[0] = p.limit;
203                 auto * limit_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(limit_scalar.get());
204                 if (limit_scalarPtr == nullptr)
205                     FAIL() << "Cannot cast blob to TBlob<float>.";
206
207                 delta_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
208                 delta_scalar->allocate();
209                 static_cast<float*>(delta_scalar->buffer())[0] = p.delta;
210                 auto * delta_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(delta_scalar.get());
211                 if (delta_scalarPtr == nullptr)
212                     FAIL() << "Cannot cast blob to TBlob<float>.";
213
214                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("start", start_scalar));
215                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("limit", limit_scalar));
216                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("delta", delta_scalar));
217
218                 // Output Blob
219                 InferenceEngine::Blob::Ptr output;
220                 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
221                 output->allocate();
222                 outputBlobs[item.first] = output;
223
224                 // Output Reference
225                 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
226                 dst_ref.allocate();
227                 ref_range(p.start, p.limit, p.delta, dst_ref);
228
229                 // Infer
230                 graph.Infer(srcs, outputBlobs);
231                 compare(*output, dst_ref);
232             } else {
233                 return;
234             }
235         } catch (const InferenceEngine::details::InferenceEngineException &e) {
236             FAIL() << e.what();
237         }
238     }
239 };
240
241 TEST_P(MKLDNNCPUExtRangeTests, TestsRange) {}
242
243 INSTANTIATE_TEST_CASE_P(
244     TestsRange, MKLDNNCPUExtRangeTests,
245             ::testing::Values(
246 // Params: precision, start, limit, delta, out_shape
247                 range_test_params{ "I32", 3.f, 18.f, 3.f, { 5 } },
248                 range_test_params{ "I32", 3.f, 1.f, -1.f, { 2 } },
249                 range_test_params{ "I32", 3.f, -3.f, -1.f, { 6 } },
250                 range_test_params{ "I32", 0.f, 5.f, 1.f, { 5 } },
251                 range_test_params{"FP32", 3.f, 18.f, 3.f, { 5 } },
252                 range_test_params{"FP32", 3.f, 1.f, -.5f, { 4 } },
253                 range_test_params{"FP32", 3.f, -1.f, -.5f, { 8 } },
254                 range_test_params{"FP32", 0.f, 5.f, 1.f, { 5 } }
255             ));