Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / tests / test_utils / test_utils.cpp
1 /*
2 // Copyright (c) 2016 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 ///////////////////////////////////////////////////////////////////////////////////////////////////
18
19 #include "api/CPP/memory.hpp"
20 #include <api/CPP/primitive.hpp>
21 #include <api/CPP/input_layout.hpp>
22 #include <api/CPP/data.hpp>
23 #include <api/CPP/topology.hpp>
24 #include <api/CPP/network.hpp>
25 #include <api/CPP/engine.hpp>
26 #include "test_utils.h"
27 #include "float16.h"
28 #include "instrumentation.h"
29 #include <iostream>
30
31 using namespace cldnn;
32
33 namespace tests 
34 {
35     const std::string graph_dump_dir = DUMP_DIRECTORY;
36
37     generic_test::generic_test() : generic_params(std::get<0>(GetParam())), layer_params(std::get<1>(GetParam())), max_ulps_diff_allowed(4), random_values(true), dump_graphs(false), dump_memory(false)
38     {
39     }
40     void generic_test::run_single_test()
41     {
42         assert((generic_params->data_type == data_types::f32) || (generic_params->data_type == data_types::f16));
43         if (dump_graphs)
44         {
45             generic_params->network_build_options.set_option(cldnn::build_option::graph_dumps_dir(DUMP_DIRECTORY));
46         }
47         topology topology;               
48         topology.add(*layer_params);
49
50         std::vector<memory> input_mems;
51         std::vector<std::string> input_layouts_names = {};
52
53         size_t multipler = 0;
54         for (size_t i = 0 ; i < generic_params->input_layouts.size() ; i++)
55         {           
56             input_mems.push_back( memory::allocate(engine, generic_params->input_layouts[i]) );
57
58             if (random_values)
59             {
60                 if (generic_params->data_type == data_types::f32)
61                 {
62                     tests::set_random_values<float>(input_mems[i], true, 7, 10);
63                 }
64                 else
65                 {
66                     tests::set_random_values<FLOAT16>(input_mems[i], true, 5, 10);
67                 }
68             }
69             else
70             {
71                 size_t size = generic_params->input_layouts[i].size.batch[0] * generic_params->input_layouts[i].size.feature[0];
72                 
73                 if (generic_params->data_type == data_types::f32)
74                 {
75                     std::vector<float> values;
76                     for (size_t j = 1; j <= size; j++)
77                     {
78                         values.push_back(static_cast<float>(multipler + j));
79                     }
80                     tests::set_values_per_batch_and_feature<float>(input_mems[i], values);
81                     multipler = values.size();
82                 }
83                 else
84                 {
85                     std::vector<FLOAT16> values;
86                     for (size_t j = 1; j <= size; j++)
87                     {
88                         values.push_back(FLOAT16(static_cast<float>(multipler + j)));
89                     }
90                     tests::set_values_per_batch_and_feature<FLOAT16>(input_mems[i], values);
91                     multipler = values.size();
92                 }        
93             }                        
94             std::string input_name = "input" + std::to_string(i);
95             if ( (i == 0) && generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled() )
96             {
97                 // Add reorder after the first input in case of optimize data flag since it might change the input layout.
98                 input_name = "input0_init";
99             }
100
101             // First input is provided to the network as input_layout.
102             // Other inputs are provided as input_layout if optimize data flag is off. Otherwise they are provided as data.
103             if ( (i == 0) || !generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled())
104             {
105                 topology.add(input_layout(input_name, input_mems[i].get_layout()));
106                 input_layouts_names.push_back(input_name);
107             }
108             else
109             {
110                 topology.add(data(input_name, input_mems[i]));
111             }
112             
113             if (!is_format_supported(generic_params->fmt))
114             {
115                 ASSERT_THROW(network bad(engine, topology), std::exception);
116                 return;
117             }       
118         }
119
120         if (generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled())
121         {
122             // Add reorder after the first input in case of optimize data flag since it might change the input layout.
123             topology.add(reorder("input0", "input0_init", input_mems[0].get_layout()));
124         }
125
126         if (layer_params->input[0] == "reorder0")
127         {
128             // Add reorder layer with output padding as input to the tested layer.
129             topology.add(reorder("reorder0", "input0", input_mems[0].get_layout().with_padding({ { 0, 0, 1, 3 },{ 0, 0, 5, 2 } })));
130         }
131
132         prepare_input_for_test(input_mems);
133
134         network network(engine, topology, generic_params->network_build_options);
135
136         for (size_t i = 0 ; i < input_layouts_names.size() ; i++)
137         {
138             network.set_input_data(input_layouts_names[i], input_mems[i]);
139         }
140
141         auto outputs = network.execute();
142         EXPECT_EQ(outputs.size(), size_t(1));
143
144         auto output = outputs.begin()->second.get_memory();
145         
146         auto output_ref = generate_reference(input_mems);
147         
148
149         if (dump_memory)
150         {
151             std::string prefix = test_info.name();
152             for (size_t i = 0; i < generic_params->input_layouts.size(); i++)
153             {
154                 ::instrumentation::logger::log_memory_to_file(input_mems[i], prefix + "input" + std::to_string(i));
155             }
156             for (size_t i = 0; i < outputs.size(); i++)
157             {
158                 ::instrumentation::logger::log_memory_to_file(output, prefix + "output" + std::to_string(i));
159             }          
160         }
161
162         if (output.get_layout().data_type == data_types::f32)
163         {
164             compare_buffers<float>(output, output_ref);
165         }
166         else
167         {
168             compare_buffers<FLOAT16>(output, output_ref);
169         }   
170     }
171
172     template<typename Type>
173     void generic_test::compare_buffers(const memory& out, const memory& ref)
174     {
175         auto out_layout = out.get_layout();
176         auto ref_layout = ref.get_layout();
177
178         EXPECT_EQ(out_layout.size, ref_layout.size);
179         EXPECT_EQ(out_layout.data_type, ref_layout.data_type);
180         EXPECT_EQ(get_expected_output_tensor(), out_layout.size);
181         EXPECT_EQ(out_layout.get_linear_size(), ref_layout.get_linear_size());
182         EXPECT_EQ(out_layout.data_padding, ref_layout.data_padding);
183
184         auto output_size = out_layout.size;
185
186         int batch_size = output_size.batch[0];
187         int feature_size = output_size.feature[0];
188         int y_size = output_size.spatial[1];
189         int x_size = output_size.spatial[0];
190
191         auto res_data = out.pointer<Type>();
192         auto ref_data = ref.pointer<Type>();
193
194         const auto out_desc = get_linear_memory_desc(out_layout);
195         const auto ref_desc = get_linear_memory_desc(ref_layout);
196
197         for (int b = 0; b < batch_size; b++)
198         {
199             for (int f = 0; f < feature_size; f++)
200             {
201                 for (int y = 0; y < y_size; y++)
202                 {
203                     for (int x = 0; x < x_size; x++)
204                     {
205                         size_t res_index = get_linear_index(out_layout, b, f, y, x, out_desc);
206                         size_t ref_index = get_linear_index(ref_layout, b, f, y, x, ref_desc);
207
208                         EXPECT_TRUE(floating_point_equal(res_data[res_index], ref_data[ref_index], max_ulps_diff_allowed))
209                             << "Expected " << (float)res_data[res_index] << " to be almost equal (within " << max_ulps_diff_allowed << " ULP's) to " << (float)ref_data[ref_index]
210                             << " (ref index = " << ref_index << ", B " << b << ", F "<< f << ", Y " << y << ", X " << x << ")!";
211
212                         if (HasFailure())
213                         {
214                             return;
215                         }
216                     }
217                 }
218             }
219         }
220     }
221
222     static size_t calc_offfset(const layout & layout, const pitches& p)
223     {
224         auto lower_padding = layout.data_padding.lower_size();
225         return
226             p.b * lower_padding.batch[0] +
227             p.f * lower_padding.feature[0] +
228             p.y * lower_padding.spatial[1] +
229             p.x * lower_padding.spatial[0];
230     }
231
232     memory_desc generic_test::get_linear_memory_desc(const layout & layout)
233     {
234         pitches p;
235         
236         switch (layout.format)
237         {
238             case format::bfyx:
239             {
240                 p.x = 1;
241                 p.y = layout.get_buffer_size().sizes(format::bfyx)[3] * p.x;
242                 p.f = layout.get_buffer_size().sizes(format::bfyx)[2] * p.y;
243                 p.b = layout.get_buffer_size().sizes(format::bfyx)[1] * p.f;
244                 break;
245             }
246             case format::yxfb:
247             {
248                 p.b = 1;
249                 p.f = layout.get_buffer_size().sizes(format::yxfb)[3] * p.b;
250                 p.x = layout.get_buffer_size().sizes(format::yxfb)[2] * p.f;
251                 p.y = layout.get_buffer_size().sizes(format::yxfb)[1] * p.x;
252                 break;
253             }
254             case format::fyxb:
255             {
256                 p.b = 1;
257                 p.x = layout.get_buffer_size().sizes(format::fyxb)[3] * p.b;
258                 p.y = layout.get_buffer_size().sizes(format::fyxb)[2] * p.x;
259                 p.f = layout.get_buffer_size().sizes(format::fyxb)[1] * p.y;
260                 break;
261             }
262             case format::byxf:
263             {
264                 p.f = 1;
265                 p.x = layout.get_buffer_size().sizes(format::byxf)[3] * p.f;
266                 p.y = layout.get_buffer_size().sizes(format::byxf)[2] * p.x;
267                 p.b = layout.get_buffer_size().sizes(format::byxf)[1] * p.y;
268                 break;
269             }
270             default:
271             {
272                 throw std::runtime_error("Format not supported yet.");
273             }
274         }
275         
276         return{ p, calc_offfset(layout, p) };
277     }
278
279     size_t generic_test::get_linear_index(const layout&, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc)
280     {
281         return 
282             desc.offset + 
283             b*desc.pitch.b + 
284             f*desc.pitch.f + 
285             y*desc.pitch.y + 
286             x*desc.pitch.x;
287     }
288
289     size_t generic_test::get_linear_index_with_broadcast(const layout& in_layout, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc)
290     {
291         return
292             desc.offset +
293             (b % in_layout.size.batch[0]) * desc.pitch.b +
294             (f % in_layout.size.feature[0]) * desc.pitch.f +
295             (y % in_layout.size.spatial[1]) * desc.pitch.y +
296             (x % in_layout.size.spatial[0]) * desc.pitch.x;
297     }
298
299     //Default implementation. Should be overridden in derived class otherwise.
300     cldnn::tensor generic_test::get_expected_output_tensor()
301     {
302         return generic_params->input_layouts[0].size;
303     }
304
305     std::vector<test_params*> generic_test::generate_generic_test_params(std::vector<test_params*>& all_generic_params)
306     {
307         // , { format::yx,{ 531,777 } } , { format::yx,{ 4096,1980 } } ,
308         //{ format::bfyx,{ 1,1,1,1 } } , { format::bfyx,{ 1,1,2,2 } } , { format::yx,{ 3,3 } } , { format::yx,{ 4,4 } } , { format::bfyx,{ 1,1,5,5 } } , { format::yx,{ 6,6 } } , { format::yx,{ 7,7 } } ,
309         //{ format::yx,{ 8,8 } } , { format::yx,{ 9,9 } } , { format::yx,{ 10,10 } } , { format::yx,{ 11,11 } } , { format::yx,{ 12,12 } } , { format::yx,{ 13,13 } } ,
310         //{ format::yx,{ 14,14 } } , { format::yx,{ 15,15 } } , { format::yx,{ 16,16 } } };
311
312         auto data_types = test_data_types();
313
314         for (cldnn::data_types data_type : data_types)
315         {
316             for (cldnn::format fmt : test_input_formats)
317             {
318                 for (int batch_size : test_batch_sizes)
319                 {
320                     for (int feature_size : test_feature_sizes)
321                     {
322                         for (tensor input_size : test_input_sizes)
323                         {
324                             all_generic_params.push_back(new test_params(data_type, fmt, batch_size, feature_size, input_size));
325                         }
326                     }
327                 }
328             }
329         }        
330
331         return all_generic_params;
332     }
333
334     const cldnn::engine & get_test_engine()
335     {
336         static const cldnn::engine engine;
337         return engine;
338     }
339
340     const std::string test_dump::name() const
341     {
342         std::string temp = name_str;
343         std::replace(temp.begin(), temp.end(), '/', '_');
344         return temp;
345     }
346
347     const std::string test_dump::test_case_name() const
348     {
349         size_t pos = test_case_name_str.find("/");
350         if (pos > test_case_name_str.length())
351         {
352             pos = 0;
353         }
354         std::string temp = test_case_name_str.substr(pos);
355         return temp;
356     }
357
358     std::string test_params::print_tensor(cldnn::tensor t)
359     {
360         std::stringstream str;
361         for (size_t i = 0; i < t.sizes(format::bfyx).size(); i++)
362         {
363             str << t.sizes(format::bfyx)[i] << " ";
364         }
365         str << "]";
366         return str.str();
367     }
368     
369     std::string test_params::print()
370     {
371         std::stringstream str;
372         str << "Data type: " << data_type_traits::name(data_type) << std::endl;
373
374         for (int j = 0 ; j < (int)input_layouts.size(); j++)
375         {
376             const cldnn::tensor& t = input_layouts[j].size;
377             
378             str << "Input " << j << ": " << print_tensor(t) << std::endl;
379         }
380         return str.str();
381     }
382     
383     std::vector<cldnn::data_types> generic_test::test_data_types() 
384     {
385         std::vector<cldnn::data_types> result;
386         result.push_back(cldnn::data_types::f32);
387         
388         if(get_test_engine().get_info().supports_fp16)
389         {
390             result.push_back(cldnn::data_types::f16);
391         }
392         return result;
393     }
394
395     std::vector<cldnn::format> generic_test::test_input_formats = { cldnn::format::bfyx , cldnn::format::yxfb, cldnn::format::fyxb, cldnn::format::byxf };
396     std::vector<int32_t> generic_test::test_batch_sizes = { 1, 2 };// 4, 8, 16};
397     std::vector<int32_t> generic_test::test_feature_sizes = { 1, 2 };// , 3, 15};
398     std::vector<tensor> generic_test::test_input_sizes = { { 1, 1, 100, 100 } ,{ 1, 1, 277, 277 } ,{ 1, 1, 400, 600 } };
399
400 }