1 #include <gtest/gtest.h>
2 #include <api/CPP/topology.hpp>
3 #include <api/CPP/network.hpp>
4 #include <api/CPP/engine.hpp>
5 #include "test_utils/test_utils.h"
6 #include <include/topology_impl.h>
8 #include "api/CPP/memory.hpp"
9 #include <api/CPP/lrn.hpp>
10 #include <api/CPP/convolution.hpp>
11 #include <api/CPP/fully_connected.hpp>
12 #include <api/CPP/pooling.hpp>
13 #include <api/CPP/data.hpp>
14 #include <api/CPP/reorder.hpp>
15 #include <api/CPP/scale.hpp>
16 #include <api/CPP/eltwise.hpp>
17 #include <api/CPP/softmax.hpp>
18 #include <api/CPP/activation.hpp>
19 #include <api/CPP/concatenation.hpp>
23 typedef std::tuple<cldnn::layout*, std::vector<unsigned>> topology_params;
25 void PrintTupleTo(const topology_params& t, ::std::ostream* os)
27 const auto & output_layout = std::get<0>(t);
28 const auto & generator = std::get<1>(t);
31 ss << "Topology test failed: ("
32 << cldnn::data_type_traits::name(output_layout->data_type) << " "
33 << tests::test_params::print_tensor(output_layout->size) << ") Generator: [";
34 for (auto v : generator)
38 ss.seekp(-1, ss.cur) << "]\n";
42 class topology_test : public ::testing::TestWithParam<topology_params>
45 class topology_generator
48 typedef std::pair<cldnn::primitive_id, cldnn::layout> named_layout;
49 class topology_layer_type
52 // return false for invalid output_layout
53 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) = 0;
55 static std::vector<topology_layer_type*> layer_types;
56 static cldnn::topology* CreateTopology(cldnn::layout output_layout, const std::vector<unsigned> generator_vec)
58 if (generator_vec.size() < 2)
62 auto topology = new cldnn::topology();
63 std::deque<named_layout> inputs;
64 const unsigned max_index = generator_vec[0];
65 inputs.push_back({ output_layer_id, output_layout });
66 for (unsigned g_index = 1; g_index < generator_vec.size(); g_index++)
68 auto input = inputs.front();
70 if (!AddSinglePrimitive(*topology, input.first, input.second, inputs, generator_vec.at(g_index), max_index))
77 for (const auto& input : inputs)
79 //first add a reorder to enable optimize_data
80 cldnn::primitive_id input_data_id = input.first + "_input";
81 topology->add(cldnn::reorder(input.first, input_data_id, input.second));
82 AddRandomMemory(*topology, input_data_id, input.second);
86 static cldnn::primitive_id CreateLayerId()
88 static unsigned layer_id = 0;
89 return "tg_layer_" + std::to_string(layer_id++);
91 static const cldnn::primitive_id output_layer_id;
92 static bool AddSinglePrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts, unsigned type_index, unsigned max_type)
94 if (layer_types.size() < max_type)
96 return false;//shouldn't happen
98 for (unsigned t = 0; t < max_type; t++)
100 if (layer_types.at((type_index + t) % max_type)->AddPrimitive(topology, id, output_layout, input_layouts))
105 //todo: consider using a data primitive here
108 static void AddRandomMemory(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout layout)
110 //todo: allocate mem, randomize values by type, add to topology
111 auto mem_primitive = cldnn::memory::allocate(topology_test::engine, layout);
112 switch (layout.data_type)
114 case cldnn::data_types::f32:
115 tests::set_random_values<float>(mem_primitive, true, 10, 1);
117 case cldnn::data_types::f16:
118 tests::set_random_values<FLOAT16>(mem_primitive, true, 4, 1);
123 topology.add(cldnn::data(id, mem_primitive));
126 topology_generator() {}
128 class convolution_layer_type : public topology_layer_type
130 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
132 if (output_layout.format != cldnn::format::bfyx)
136 // for now using just one set of params
137 // todo: randomize params
138 cldnn::primitive_id weights_id = id + "_weights";
139 cldnn::layout weights_layout(output_layout.data_type,
140 cldnn::format::yxfb,{ output_layout.size.feature[0], output_layout.size.feature[0], 1, 1 });
141 AddRandomMemory(topology, weights_id, weights_layout);
142 cldnn::primitive_id bias_id = id + "_bias";
143 cldnn::layout bias_layout(output_layout.data_type,
144 cldnn::format::bfyx,{ 1, 1, output_layout.size.feature[0], 1 });
145 AddRandomMemory(topology, bias_id, bias_layout);
147 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
148 input_layouts.push_back({ input_id, output_layout });
150 cldnn::convolution(id, input_id, { weights_id }, { bias_id }));
154 class normalization_layer_type : public topology_layer_type
156 bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
158 if (output_layout.format != cldnn::format::bfyx)
162 // for now using just one set of params
163 // todo: randomize params
164 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
165 input_layouts.push_back({ input_id, output_layout });
168 float alpha = 0.0001f;
170 cldnn_lrn_norm_region norm_type = cldnn_lrn_norm_region_across_channel;
171 topology.add(cldnn::lrn(id, input_id, size, k, alpha, beta, norm_type));
175 class pooling_layer_type : public topology_layer_type
177 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
179 if (output_layout.size.spatial.size() != 2)
183 // for now using just one set of params
184 // todo: randomize params
185 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
186 cldnn::pooling_mode mode = cldnn::pooling_mode::max;
187 cldnn::tensor stride = { 1, 1, 1, 1 };
188 cldnn::tensor size = { 1, 1, 3, 3 };
189 input_layouts.push_back({ input_id, output_layout });
190 topology.add(cldnn::pooling(id, input_id, mode, stride, size));
194 class fully_connected_layer_type : public topology_layer_type
196 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
198 if (output_layout.format != cldnn::format::bfyx)
203 // for now using just one set of params
204 // todo: randomize params
206 cldnn::layout input_layout(output_layout.data_type, cldnn::format::bfyx,{ output_layout.size.batch[0] , output_layout.size.feature[0], 100, 100 } );
207 cldnn::primitive_id weights_id = id + "_weights";
208 cldnn::layout weights_layout(output_layout.data_type,
209 cldnn::format::bfyx,{ output_layout.size.feature[0], input_layout.size.feature[0], input_layout.size.spatial[0], input_layout.size.spatial[1] });
210 AddRandomMemory(topology, weights_id, weights_layout);
211 cldnn::primitive_id bias_id = id + "_bias";
212 cldnn::layout bias_layout(output_layout.data_type,
213 cldnn::format::bfyx,{ 1, 1, output_layout.size.feature[0], 1 });
214 AddRandomMemory(topology, bias_id, bias_layout);
216 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
217 input_layouts.push_back({ input_id, input_layout });
219 cldnn::fully_connected(id, input_id, { weights_id }, { bias_id }));
223 class reorder_layer_type : public topology_layer_type
225 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
227 // for now using just one set of params
228 // todo: randomize params
229 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
230 input_layouts.push_back({ input_id, output_layout });//empty reorder
231 topology.add(cldnn::reorder(id,input_id,output_layout));
235 class activation_layer_type : public topology_layer_type
237 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
239 // for now using just one set of params
240 // todo: randomize params
241 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
242 input_layouts.push_back({ input_id, output_layout });
243 topology.add(cldnn::activation(id, input_id, activation_relu));
247 class depth_concatenate_layer_type : public topology_layer_type
249 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
251 // for now using just one set of params
252 // todo: randomize params
253 if (output_layout.format != cldnn::format::bfyx// should be "output_layout.size.format.dimension() < 4" but requires too many case handling since tensor is immutable
254 || output_layout.size.feature[0] < 2)
258 cldnn::primitive_id input_id1 = topology_generator::CreateLayerId();
259 cldnn::primitive_id input_id2 = topology_generator::CreateLayerId();
260 cldnn::layout input_layout1(
261 output_layout.data_type,
264 output_layout.size.batch[0],
265 output_layout.size.feature[0] - 1,
266 output_layout.size.spatial[0],
267 output_layout.size.spatial[1]
270 cldnn::layout input_layout2(
271 output_layout.data_type,
274 output_layout.size.batch[0],
276 output_layout.size.spatial[0],
277 output_layout.size.spatial[1]
280 input_layouts.push_back({ input_id1, input_layout1 });
281 input_layouts.push_back({ input_id2, input_layout2 });
283 topology.add(cldnn::concatenation(id, { input_id1,input_id2 }, cldnn::concatenation::along_f));
287 class eltwise_layer_type : public topology_layer_type
289 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
291 // for now using just one set of params
292 // todo: randomize params
293 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
294 input_layouts.push_back({ input_id, output_layout });
295 cldnn::primitive_id eltwise_params_id = id + "_eltwise_params";
296 AddRandomMemory(topology, eltwise_params_id, output_layout);
297 topology.add(cldnn::eltwise(id, {input_id, eltwise_params_id}, cldnn::eltwise_mode::max));
301 class scale_layer_type : public topology_layer_type
303 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
305 // for now using just one set of params
306 // todo: randomize params
307 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
308 input_layouts.push_back({ input_id, output_layout });
309 cldnn::primitive_id scale_params_id = id + "_scale_params";
310 AddRandomMemory(topology, scale_params_id, output_layout);
311 topology.add(cldnn::scale(id, input_id, scale_params_id, ""));
315 class softmax_layer_type : public topology_layer_type
317 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts)
319 // for now using just one set of params
320 // todo: randomize params
321 cldnn::primitive_id input_id = topology_generator::CreateLayerId();
322 input_layouts.push_back({ input_id, output_layout });
323 topology.add(cldnn::softmax(id, input_id));
328 class batch_norm_layer_type : public topology_layer_type {
329 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
332 class crop_layer_type : public topology_layer_type {
333 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
336 class deconvolution_layer_type : public topology_layer_type {
337 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
340 class prior_box_layer_type : public topology_layer_type {
341 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
344 class roi_pooling_layer_type : public topology_layer_type {
345 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
348 class psroi_pooling_layer_type : public topology_layer_type {
349 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
352 class proposal_layer_type : public topology_layer_type {
353 virtual bool AddPrimitive(cldnn::topology& topology, cldnn::primitive_id id, cldnn::layout output_layout, std::deque<named_layout>& input_layouts) {
359 static const unsigned topologies_per_type_size = 10;
360 topology_test() : output_layout(std::get<0>(GetParam())), generator(std::get<1>(GetParam())) {}
361 void run_single_test()
363 cldnn::topology* topology = topology_generator::CreateTopology(*output_layout, generator);
364 EXPECT_NE(topology, nullptr);
365 cldnn::build_options options;
366 options.set_option(cldnn::build_option::optimize_data(true));
367 cldnn::engine temp_engine;// using temp_engine since reusing the same one does not free all resources (network build becomes slower and slower)
368 cldnn::network network(temp_engine, *topology, options);
369 auto outputs = network.execute();
370 EXPECT_NE(outputs.find(topology_generator::output_layer_id), outputs.end());
375 static std::vector<cldnn::layout*> generate_all_output_layouts()
377 assert(all_output_layouts.empty());
378 std::vector<cldnn::data_types> data_types = { cldnn::data_types::f32, cldnn::data_types::f16 };
379 std::vector<cldnn::tensor> output_tensors = {
385 // todo: consider iterating on format X dimensions
387 for (auto dt : data_types) {
388 for (auto t : output_tensors) {
389 all_output_layouts.push_back(new cldnn::layout(dt, cldnn::format::bfyx, t));
392 return all_output_layouts;
394 template<unsigned generator_length>
395 static std::set<std::vector<unsigned>> all_generator_vectors()
397 // create vectors used to create topologies [max_layer_index, layer_index0, layer_index1,...]
398 std::set<std::vector<unsigned>> all_generators;
399 static std::default_random_engine rng(tests::random_seed);
400 std::uniform_int_distribution<unsigned> distribution(0, 0xFF);//assuming we won't exceed 256 total layer types
402 const unsigned Initial_layer_types = 10;//don't change this - starting with this index ensures adding layers won't alter previously generated tests
403 for (unsigned types = Initial_layer_types; types <= topology_test::topology_generator::layer_types.size(); types++)
405 for (unsigned i = 0; i < topologies_per_type_size; i++)
407 std::vector<unsigned> generator;
408 generator.push_back(types);
409 for (unsigned j = 0; j < generator_length; j++)
411 generator.push_back(distribution(rng) % types);
413 all_generators.insert(generator);
416 return all_generators;
418 static void TearDownTestCase()
420 for (auto& p : all_output_layouts)
425 static std::string custom_param_name(const ::testing::TestParamInfo<topology_params>& info)
427 const auto & output_layout = std::get<0>(info.param);
428 const auto & generator = std::get<1>(info.param);
429 std::stringstream ss;
430 ss << info.index << "_";
431 for (auto v : generator)
435 ss << cldnn::data_type_traits::name(output_layout->data_type) << "_";
436 ss << cldnn::format::traits(output_layout->format).order;
437 for (const auto& d : output_layout->size.raw)
445 cldnn::layout* output_layout;
446 std::vector<unsigned> generator;
448 static const cldnn::engine& engine;
449 static std::vector<cldnn::layout*> all_output_layouts;//just for tear-down
452 const cldnn::engine& topology_test::engine = tests::get_test_engine();
453 std::vector<cldnn::layout*> topology_test::all_output_layouts = {};
455 std::vector<topology_test::topology_generator::topology_layer_type*> topology_test::topology_generator::layer_types = {
456 new topology_test::topology_generator::normalization_layer_type(),
457 new topology_test::topology_generator::pooling_layer_type(),
458 new topology_test::topology_generator::convolution_layer_type(),
459 new topology_test::topology_generator::fully_connected_layer_type(),
460 new topology_test::topology_generator::reorder_layer_type(),
461 new topology_test::topology_generator::activation_layer_type(),
462 new topology_test::topology_generator::depth_concatenate_layer_type(),
463 new topology_test::topology_generator::eltwise_layer_type(),
464 new topology_test::topology_generator::scale_layer_type(),
465 new topology_test::topology_generator::softmax_layer_type(),
466 // Only add new types at the end
468 const cldnn::primitive_id topology_test::topology_generator::output_layer_id("tg_output_layer");
470 TEST_P(topology_test, TOPOLOGY)
475 if (::testing::Test::HasFailure())
477 PrintTupleTo(GetParam(), &std::cout);
482 PrintTupleTo(GetParam(), &std::cout);
487 INSTANTIATE_TEST_CASE_P(DISABLED_TOPOLOGY,
489 ::testing::Combine( ::testing::ValuesIn(topology_test::generate_all_output_layouts()),
490 ::testing::ValuesIn(topology_test::all_generator_vectors<3>())),
491 topology_test::custom_param_name);