Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / tests / test_cases / topology_test.cpp
1 #include <gtest/gtest.h>
2 #include <api/CPP/topology.hpp>
3 #include <api/CPP/network.hpp>
4 #include <api/CPP/engine.hpp>
5 #include "test_utils/test_utils.h"
6 #include <include/topology_impl.h>
7 #include <iostream>
8 #include "api/CPP/memory.hpp"
9 #include <api/CPP/lrn.hpp>
10 #include <api/CPP/convolution.hpp>
11 #include <api/CPP/fully_connected.hpp>
12 #include <api/CPP/pooling.hpp>
13 #include <api/CPP/data.hpp>
14 #include <api/CPP/reorder.hpp>
15 #include <api/CPP/scale.hpp>
16 #include <api/CPP/eltwise.hpp>
17 #include <api/CPP/softmax.hpp>
18 #include <api/CPP/activation.hpp>
19 #include <api/CPP/concatenation.hpp>
20 #include <deque>
21 #include <set>
22
23 typedef std::tuple<cldnn::layout*, std::vector<unsigned>> topology_params;
24
25 void PrintTupleTo(const topology_params& t, ::std::ostream* os)
26 {
27     const auto & output_layout = std::get<0>(t);
28     const auto & generator = std::get<1>(t);
29     std::stringstream ss;
30
31     ss << "Topology test failed: ("
32         << cldnn::data_type_traits::name(output_layout->data_type) << " "
33         << tests::test_params::print_tensor(output_layout->size) << ") Generator: [";
34     for (auto v : generator)
35     {
36         ss << v << ", ";
37     }
38     ss.seekp(-1, ss.cur) << "]\n";
39     *os << ss.str();
40 }
41
42 class topology_test : public ::testing::TestWithParam<topology_params>
43 {
44 protected:
45     class topology_generator
46     {
47     public:
48         typedef std::pair<cldnn::primitive_id, cldnn::layout> named_layout;
49         class topology_layer_type
50         {
51         public:
52             // return false for invalid output_layout
53             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) = 0;
54         };
55         static std::vector<topology_layer_type*> layer_types;
56         static cldnn::topology* CreateTopology(cldnn::layout output_layout, const std::vector<unsigned> generator_vec)
57         {
58             if (generator_vec.size() < 2)
59             {
60                 return nullptr;
61             }
62             auto topology = new cldnn::topology();
63             std::deque<named_layout> inputs;
64             const unsigned max_index = generator_vec[0];
65             inputs.push_back({ output_layer_id, output_layout });
66             for (unsigned g_index = 1; g_index < generator_vec.size(); g_index++)
67             {
68                 auto input = inputs.front();
69                 inputs.pop_front();
70                 if (!AddSinglePrimitive(*topology, input.first, input.second, inputs, generator_vec.at(g_index), max_index))
71                 {
72                     delete topology;
73                     return nullptr;
74                 }
75             }
76             // add data inputs
77             for (const auto& input : inputs)
78             {
79                 //first add a reorder to enable optimize_data
80                 cldnn::primitive_id input_data_id = input.first + "_input";
81                 topology->add(cldnn::reorder(input.first, input_data_id, input.second));
82                 AddRandomMemory(*topology, input_data_id, input.second);
83             }
84             return topology;
85         }
86         static cldnn::primitive_id CreateLayerId()
87         {
88             static unsigned layer_id = 0;
89             return "tg_layer_" + std::to_string(layer_id++);
90         }
91         static const cldnn::primitive_id output_layer_id;
92         static bool AddSinglePrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts, unsigned type_index, unsigned max_type)
93         {
94             if (layer_types.size() < max_type)
95             {
96                 return false;//shouldn't happen
97             }
98             for (unsigned t = 0; t < max_type; t++)
99             {
100                 if (layer_types.at((type_index + t) % max_type)->AddPrimitive(topology, id, output_layout, input_layouts))
101                 {
102                     return true;
103                 }
104             }
105             //todo: consider using a data primitive here
106             return false;
107         }
108         static void AddRandomMemory(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout layout)
109         {
110             //todo: allocate mem, randomize values by type, add to topology
111             auto mem_primitive = cldnn::memory::allocate(topology_test::engine, layout);
112             switch (layout.data_type)
113             {
114             case cldnn::data_types::f32:
115                 tests::set_random_values<float>(mem_primitive, true, 10, 1);
116                 break;
117             case cldnn::data_types::f16:
118                 tests::set_random_values<FLOAT16>(mem_primitive, true, 4, 1);
119                 break;
120             default:
121                 assert(0);
122             }
123             topology.add(cldnn::data(id, mem_primitive));
124         }
125     protected:
126         topology_generator() {}
127
128         class convolution_layer_type : public topology_layer_type
129         {
130             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
131             {
132                 if (output_layout.format != cldnn::format::bfyx)
133                 {
134                     return false;
135                 }
136                 // for now using just one set of params
137                 // todo: randomize params
138                 cldnn::primitive_id weights_id = id + "_weights";
139                 cldnn::layout weights_layout(output_layout.data_type,
140                 cldnn::format::yxfb,{ output_layout.size.feature[0], output_layout.size.feature[0], 1, 1 });
141                 AddRandomMemory(topology, weights_id, weights_layout);
142                 cldnn::primitive_id bias_id = id + "_bias";
143                 cldnn::layout bias_layout(output_layout.data_type,
144                 cldnn::format::bfyx,{ 1, 1, output_layout.size.feature[0], 1 });
145                 AddRandomMemory(topology, bias_id, bias_layout);
146
147                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
148                 input_layouts.push_back({ input_id, output_layout });
149                 topology.add(
150                     cldnn::convolution(id, input_id, { weights_id }, { bias_id }));
151                 return true;
152             }
153         };
154         class normalization_layer_type : public topology_layer_type
155         {
156             bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
157             {
158                 if (output_layout.format != cldnn::format::bfyx)
159                 {
160                     return false;
161                 }
162                 // for now using just one set of params
163                 // todo: randomize params
164                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
165                 input_layouts.push_back({ input_id, output_layout });
166                 uint32_t size = 5;
167                 float k = 1.0f;
168                 float alpha = 0.0001f;
169                 float beta = 0.75f;
170                 cldnn_lrn_norm_region norm_type = cldnn_lrn_norm_region_across_channel;
171                 topology.add(cldnn::lrn(id, input_id, size, k, alpha, beta, norm_type));
172                 return true;
173             }
174         };
175         class pooling_layer_type : public topology_layer_type
176         {
177             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
178             {
179                 if (output_layout.size.spatial.size() != 2)
180                 {
181                     return false;
182                 }
183                 // for now using just one set of params
184                 // todo: randomize params
185                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
186                 cldnn::pooling_mode mode = cldnn::pooling_mode::max;
187                 cldnn::tensor stride = { 1, 1, 1, 1 };
188                 cldnn::tensor size = { 1, 1, 3, 3 };
189                 input_layouts.push_back({ input_id, output_layout });
190                 topology.add(cldnn::pooling(id, input_id, mode, stride, size));
191                 return true;
192             }
193         };
194         class fully_connected_layer_type : public topology_layer_type
195         {
196             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
197             {
198                 if (output_layout.format != cldnn::format::bfyx)
199                 {
200                     return false;
201                 }
202
203                 // for now using just one set of params
204                 // todo: randomize params
205
206                 cldnn::layout input_layout(output_layout.data_type, cldnn::format::bfyx,{ output_layout.size.batch[0] , output_layout.size.feature[0], 100, 100 } );
207                 cldnn::primitive_id weights_id = id + "_weights";
208                 cldnn::layout weights_layout(output_layout.data_type,
209                 cldnn::format::bfyx,{ output_layout.size.feature[0], input_layout.size.feature[0], input_layout.size.spatial[0], input_layout.size.spatial[1] });
210                 AddRandomMemory(topology, weights_id, weights_layout);
211                 cldnn::primitive_id bias_id = id + "_bias";
212                 cldnn::layout bias_layout(output_layout.data_type,
213                 cldnn::format::bfyx,{ 1, 1, output_layout.size.feature[0], 1 });
214                 AddRandomMemory(topology, bias_id, bias_layout);
215
216                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
217                 input_layouts.push_back({ input_id, input_layout });
218                 topology.add(
219                     cldnn::fully_connected(id, input_id, { weights_id }, { bias_id }));
220                 return true;
221             }
222         };
223         class reorder_layer_type : public topology_layer_type
224         {
225             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
226             {
227                 // for now using just one set of params
228                 // todo: randomize params
229                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
230                 input_layouts.push_back({ input_id, output_layout });//empty reorder
231                 topology.add(cldnn::reorder(id,input_id,output_layout));
232                 return true;
233             }
234         };
235         class activation_layer_type : public topology_layer_type
236         {
237             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
238             {
239                 // for now using just one set of params
240                 // todo: randomize params
241                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
242                 input_layouts.push_back({ input_id, output_layout });
243                 topology.add(cldnn::activation(id, input_id, activation_relu));
244                 return true;
245             }
246         };
247         class depth_concatenate_layer_type : public topology_layer_type
248         {
249             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
250             {
251                 // for now using just one set of params
252                 // todo: randomize params
253                 if (output_layout.format != cldnn::format::bfyx// should be "output_layout.size.format.dimension() < 4" but requires too many case handling since tensor is immutable
254                     || output_layout.size.feature[0] < 2)
255                 {
256                     return false;
257                 }
258                 cldnn::primitive_id input_id1 = topology_generator::CreateLayerId();
259                 cldnn::primitive_id input_id2 = topology_generator::CreateLayerId();
260                 cldnn::layout input_layout1(
261                     output_layout.data_type,
262                     cldnn::format::bfyx,
263                         {
264                             output_layout.size.batch[0],
265                             output_layout.size.feature[0] - 1,
266                             output_layout.size.spatial[0],
267                             output_layout.size.spatial[1]
268                         }
269                 );
270                 cldnn::layout input_layout2(
271                     output_layout.data_type,
272                     cldnn::format::bfyx,
273                         {
274                             output_layout.size.batch[0],
275                             1,
276                             output_layout.size.spatial[0],
277                             output_layout.size.spatial[1]
278                         }
279                 );
280                 input_layouts.push_back({ input_id1, input_layout1 });
281                 input_layouts.push_back({ input_id2, input_layout2 });
282                 
283                 topology.add(cldnn::concatenation(id, { input_id1,input_id2 }, cldnn::concatenation::along_f));
284                 return true;
285             }
286         };
287         class eltwise_layer_type : public topology_layer_type
288         {
289             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
290             {
291                 // for now using just one set of params
292                 // todo: randomize params
293                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
294                 input_layouts.push_back({ input_id, output_layout });
295                 cldnn::primitive_id eltwise_params_id = id + "_eltwise_params";
296                 AddRandomMemory(topology, eltwise_params_id, output_layout);
297                 topology.add(cldnn::eltwise(id, {input_id, eltwise_params_id}, cldnn::eltwise_mode::max));
298                 return true;
299             }
300         };
301         class scale_layer_type : public topology_layer_type
302         {
303             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
304             {
305                 // for now using just one set of params
306                 // todo: randomize params
307                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
308                 input_layouts.push_back({ input_id, output_layout });
309                 cldnn::primitive_id scale_params_id = id + "_scale_params";
310                 AddRandomMemory(topology, scale_params_id, output_layout);
311                 topology.add(cldnn::scale(id, input_id, scale_params_id, ""));
312                 return true;
313             }
314         };
315         class softmax_layer_type : public topology_layer_type
316         {
317             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
318             {
319                 // for now using just one set of params
320                 // todo: randomize params
321                 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
322                 input_layouts.push_back({ input_id, output_layout });
323                 topology.add(cldnn::softmax(id, input_id));
324                 return true;
325             }
326         };
327 /* missing layers
328         class batch_norm_layer_type : public topology_layer_type {
329             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
330             }
331         };
332         class crop_layer_type : public topology_layer_type {
333             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
334             }
335         };
336         class deconvolution_layer_type : public topology_layer_type {
337             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
338             }
339         };
340         class prior_box_layer_type : public topology_layer_type {
341             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
342             }
343         };
344         class roi_pooling_layer_type : public topology_layer_type {
345             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
346             }
347         };
348         class psroi_pooling_layer_type : public topology_layer_type {
349             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
350             }
351         };
352         class proposal_layer_type : public topology_layer_type {
353             virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
354             }
355         };
356 */
357     };
358 public:
359     static const unsigned topologies_per_type_size = 10;
360     topology_test() : output_layout(std::get<0>(GetParam())), generator(std::get<1>(GetParam())) {}
361     void run_single_test()
362     {
363         cldnn::topology* topology = topology_generator::CreateTopology(*output_layout, generator);
364         EXPECT_NE(topology, nullptr);
365         cldnn::build_options options;
366         options.set_option(cldnn::build_option::optimize_data(true));
367         cldnn::engine temp_engine;// using temp_engine since reusing the same one does not free all resources (network build becomes slower and slower)
368         cldnn::network network(temp_engine, *topology, options);
369         auto outputs = network.execute();
370         EXPECT_NE(outputs.find(topology_generator::output_layer_id), outputs.end());
371
372         delete topology;
373     }
374
375     static std::vector<cldnn::layout*> generate_all_output_layouts()
376     {
377         assert(all_output_layouts.empty());
378         std::vector<cldnn::data_types> data_types = { cldnn::data_types::f32, cldnn::data_types::f16 };
379         std::vector<cldnn::tensor> output_tensors = {
380             { 1, 1, 100, 1 },
381             { 5, 1, 100, 1 },
382             { 1, 10, 100, 100 },
383             { 8, 1, 100, 100 },
384         };
385         // todo: consider iterating on format X dimensions
386
387         for (auto dt : data_types) {
388             for (auto t : output_tensors) {
389                 all_output_layouts.push_back(new cldnn::layout(dt, cldnn::format::bfyx, t));
390             }
391         }
392         return all_output_layouts;
393     }
394     template<unsigned generator_length>
395     static std::set<std::vector<unsigned>> all_generator_vectors()
396     {
397         // create vectors used to create topologies [max_layer_index, layer_index0, layer_index1,...]
398         std::set<std::vector<unsigned>> all_generators;
399         static std::default_random_engine rng(tests::random_seed);
400         std::uniform_int_distribution<unsigned> distribution(0, 0xFF);//assuming we won't exceed 256 total layer types
401
402         const unsigned Initial_layer_types = 10;//don't change this - starting with this index ensures adding layers won't alter previously generated tests
403         for (unsigned types = Initial_layer_types; types <= topology_test::topology_generator::layer_types.size(); types++)
404         {
405             for (unsigned i = 0; i < topologies_per_type_size; i++)
406             {
407                 std::vector<unsigned> generator;
408                 generator.push_back(types);
409                 for (unsigned j = 0; j < generator_length; j++)
410                 {
411                     generator.push_back(distribution(rng) % types);
412                 }
413                 all_generators.insert(generator);
414             }
415         }
416         return all_generators;
417     }
418     static void TearDownTestCase()
419     {
420         for (auto& p : all_output_layouts)
421         {
422             delete p;
423         }
424     }
425     static std::string custom_param_name(const ::testing::TestParamInfo<topology_params>& info)
426     {
427         const auto & output_layout = std::get<0>(info.param);
428         const auto & generator = std::get<1>(info.param);
429         std::stringstream ss;
430         ss << info.index << "_";
431         for (auto v : generator)
432         {
433             ss << v << "_";
434         }
435         ss << cldnn::data_type_traits::name(output_layout->data_type) << "_";
436         ss << cldnn::format::traits(output_layout->format).order;
437         for (const auto& d : output_layout->size.raw)
438         {
439             ss << "_" << d;
440         }
441         
442         return ss.str();
443     }
444 protected:
445     cldnn::layout* output_layout;
446     std::vector<unsigned> generator;
447
448     static const cldnn::engine& engine;
449     static std::vector<cldnn::layout*> all_output_layouts;//just for tear-down
450 };
451
452 const cldnn::engine& topology_test::engine = tests::get_test_engine();
453 std::vector<cldnn::layout*> topology_test::all_output_layouts = {};
454
455 std::vector<topology_test::topology_generator::topology_layer_type*> topology_test::topology_generator::layer_types = {
456     new topology_test::topology_generator::normalization_layer_type(),
457     new topology_test::topology_generator::pooling_layer_type(),
458     new topology_test::topology_generator::convolution_layer_type(),
459     new topology_test::topology_generator::fully_connected_layer_type(),
460     new topology_test::topology_generator::reorder_layer_type(),
461     new topology_test::topology_generator::activation_layer_type(),
462     new topology_test::topology_generator::depth_concatenate_layer_type(),
463     new topology_test::topology_generator::eltwise_layer_type(),
464     new topology_test::topology_generator::scale_layer_type(),
465     new topology_test::topology_generator::softmax_layer_type(),
466     // Only add new types at the end
467 };
468 const cldnn::primitive_id topology_test::topology_generator::output_layer_id("tg_output_layer");
469
470 TEST_P(topology_test, TOPOLOGY)
471 {
472      try
473      {
474          run_single_test();
475          if (::testing::Test::HasFailure())
476          {
477              PrintTupleTo(GetParam(), &std::cout);
478          }
479      }
480      catch (...)
481      {
482          PrintTupleTo(GetParam(), &std::cout);
483          throw;
484      }
485 }
486
487 INSTANTIATE_TEST_CASE_P(DISABLED_TOPOLOGY,
488     topology_test,
489     ::testing::Combine( ::testing::ValuesIn(topology_test::generate_all_output_layouts()),
490                         ::testing::ValuesIn(topology_test::all_generator_vectors<3>())),
491     topology_test::custom_param_name);