2 // Copyright (c) 2016 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 ///////////////////////////////////////////////////////////////////////////////////////////////////
19 #include "api/CPP/memory.hpp"
20 #include <api/CPP/primitive.hpp>
21 #include <api/CPP/input_layout.hpp>
22 #include <api/CPP/data.hpp>
23 #include <api/CPP/topology.hpp>
24 #include <api/CPP/network.hpp>
25 #include <api/CPP/engine.hpp>
26 #include "test_utils.h"
28 #include "instrumentation.h"
31 using namespace cldnn;
35 const std::string graph_dump_dir = DUMP_DIRECTORY;
37 generic_test::generic_test() : generic_params(std::get<0>(GetParam())), layer_params(std::get<1>(GetParam())), max_ulps_diff_allowed(4), random_values(true), dump_graphs(false), dump_memory(false)
40 void generic_test::run_single_test()
42 assert((generic_params->data_type == data_types::f32) || (generic_params->data_type == data_types::f16));
45 generic_params->network_build_options.set_option(cldnn::build_option::graph_dumps_dir(DUMP_DIRECTORY));
48 topology.add(*layer_params);
50 std::vector<memory> input_mems;
51 std::vector<std::string> input_layouts_names = {};
54 for (size_t i = 0 ; i < generic_params->input_layouts.size() ; i++)
56 input_mems.push_back( memory::allocate(engine, generic_params->input_layouts[i]) );
60 if (generic_params->data_type == data_types::f32)
62 tests::set_random_values<float>(input_mems[i], true, 7, 10);
66 tests::set_random_values<FLOAT16>(input_mems[i], true, 5, 10);
71 size_t size = generic_params->input_layouts[i].size.batch[0] * generic_params->input_layouts[i].size.feature[0];
73 if (generic_params->data_type == data_types::f32)
75 std::vector<float> values;
76 for (size_t j = 1; j <= size; j++)
78 values.push_back(static_cast<float>(multipler + j));
80 tests::set_values_per_batch_and_feature<float>(input_mems[i], values);
81 multipler = values.size();
85 std::vector<FLOAT16> values;
86 for (size_t j = 1; j <= size; j++)
88 values.push_back(FLOAT16(static_cast<float>(multipler + j)));
90 tests::set_values_per_batch_and_feature<FLOAT16>(input_mems[i], values);
91 multipler = values.size();
94 std::string input_name = "input" + std::to_string(i);
95 if ( (i == 0) && generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled() )
97 // Add reorder after the first input in case of optimize data flag since it might change the input layout.
98 input_name = "input0_init";
101 // First input is provided to the network as input_layout.
102 // Other inputs are provided as input_layout if optimize data flag is off. Otherwise they are provided as data.
103 if ( (i == 0) || !generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled())
105 topology.add(input_layout(input_name, input_mems[i].get_layout()));
106 input_layouts_names.push_back(input_name);
110 topology.add(data(input_name, input_mems[i]));
113 if (!is_format_supported(generic_params->fmt))
115 ASSERT_THROW(network bad(engine, topology), std::exception);
120 if (generic_params->network_build_options.get<cldnn::build_option_type::optimize_data>()->enabled())
122 // Add reorder after the first input in case of optimize data flag since it might change the input layout.
123 topology.add(reorder("input0", "input0_init", input_mems[0].get_layout()));
126 if (layer_params->input[0] == "reorder0")
128 // Add reorder layer with output padding as input to the tested layer.
129 topology.add(reorder("reorder0", "input0", input_mems[0].get_layout().with_padding({ { 0, 0, 1, 3 },{ 0, 0, 5, 2 } })));
132 prepare_input_for_test(input_mems);
134 network network(engine, topology, generic_params->network_build_options);
136 for (size_t i = 0 ; i < input_layouts_names.size() ; i++)
138 network.set_input_data(input_layouts_names[i], input_mems[i]);
141 auto outputs = network.execute();
142 EXPECT_EQ(outputs.size(), size_t(1));
144 auto output = outputs.begin()->second.get_memory();
146 auto output_ref = generate_reference(input_mems);
151 std::string prefix = test_info.name();
152 for (size_t i = 0; i < generic_params->input_layouts.size(); i++)
154 ::instrumentation::logger::log_memory_to_file(input_mems[i], prefix + "input" + std::to_string(i));
156 for (size_t i = 0; i < outputs.size(); i++)
158 ::instrumentation::logger::log_memory_to_file(output, prefix + "output" + std::to_string(i));
162 if (output.get_layout().data_type == data_types::f32)
164 compare_buffers<float>(output, output_ref);
168 compare_buffers<FLOAT16>(output, output_ref);
172 template<typename Type>
173 void generic_test::compare_buffers(const memory& out, const memory& ref)
175 auto out_layout = out.get_layout();
176 auto ref_layout = ref.get_layout();
178 EXPECT_EQ(out_layout.size, ref_layout.size);
179 EXPECT_EQ(out_layout.data_type, ref_layout.data_type);
180 EXPECT_EQ(get_expected_output_tensor(), out_layout.size);
181 EXPECT_EQ(out_layout.get_linear_size(), ref_layout.get_linear_size());
182 EXPECT_EQ(out_layout.data_padding, ref_layout.data_padding);
184 auto output_size = out_layout.size;
186 int batch_size = output_size.batch[0];
187 int feature_size = output_size.feature[0];
188 int y_size = output_size.spatial[1];
189 int x_size = output_size.spatial[0];
191 auto res_data = out.pointer<Type>();
192 auto ref_data = ref.pointer<Type>();
194 const auto out_desc = get_linear_memory_desc(out_layout);
195 const auto ref_desc = get_linear_memory_desc(ref_layout);
197 for (int b = 0; b < batch_size; b++)
199 for (int f = 0; f < feature_size; f++)
201 for (int y = 0; y < y_size; y++)
203 for (int x = 0; x < x_size; x++)
205 size_t res_index = get_linear_index(out_layout, b, f, y, x, out_desc);
206 size_t ref_index = get_linear_index(ref_layout, b, f, y, x, ref_desc);
208 EXPECT_TRUE(floating_point_equal(res_data[res_index], ref_data[ref_index], max_ulps_diff_allowed))
209 << "Expected " << (float)res_data[res_index] << " to be almost equal (within " << max_ulps_diff_allowed << " ULP's) to " << (float)ref_data[ref_index]
210 << " (ref index = " << ref_index << ", B " << b << ", F "<< f << ", Y " << y << ", X " << x << ")!";
222 static size_t calc_offfset(const layout & layout, const pitches& p)
224 auto lower_padding = layout.data_padding.lower_size();
226 p.b * lower_padding.batch[0] +
227 p.f * lower_padding.feature[0] +
228 p.y * lower_padding.spatial[1] +
229 p.x * lower_padding.spatial[0];
232 memory_desc generic_test::get_linear_memory_desc(const layout & layout)
236 switch (layout.format)
241 p.y = layout.get_buffer_size().sizes(format::bfyx)[3] * p.x;
242 p.f = layout.get_buffer_size().sizes(format::bfyx)[2] * p.y;
243 p.b = layout.get_buffer_size().sizes(format::bfyx)[1] * p.f;
249 p.f = layout.get_buffer_size().sizes(format::yxfb)[3] * p.b;
250 p.x = layout.get_buffer_size().sizes(format::yxfb)[2] * p.f;
251 p.y = layout.get_buffer_size().sizes(format::yxfb)[1] * p.x;
257 p.x = layout.get_buffer_size().sizes(format::fyxb)[3] * p.b;
258 p.y = layout.get_buffer_size().sizes(format::fyxb)[2] * p.x;
259 p.f = layout.get_buffer_size().sizes(format::fyxb)[1] * p.y;
265 p.x = layout.get_buffer_size().sizes(format::byxf)[3] * p.f;
266 p.y = layout.get_buffer_size().sizes(format::byxf)[2] * p.x;
267 p.b = layout.get_buffer_size().sizes(format::byxf)[1] * p.y;
272 throw std::runtime_error("Format not supported yet.");
276 return{ p, calc_offfset(layout, p) };
279 size_t generic_test::get_linear_index(const layout&, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc)
289 size_t generic_test::get_linear_index_with_broadcast(const layout& in_layout, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc)
293 (b % in_layout.size.batch[0]) * desc.pitch.b +
294 (f % in_layout.size.feature[0]) * desc.pitch.f +
295 (y % in_layout.size.spatial[1]) * desc.pitch.y +
296 (x % in_layout.size.spatial[0]) * desc.pitch.x;
299 //Default implementation. Should be overridden in derived class otherwise.
300 cldnn::tensor generic_test::get_expected_output_tensor()
302 return generic_params->input_layouts[0].size;
305 std::vector<test_params*> generic_test::generate_generic_test_params(std::vector<test_params*>& all_generic_params)
307 // , { format::yx,{ 531,777 } } , { format::yx,{ 4096,1980 } } ,
308 //{ format::bfyx,{ 1,1,1,1 } } , { format::bfyx,{ 1,1,2,2 } } , { format::yx,{ 3,3 } } , { format::yx,{ 4,4 } } , { format::bfyx,{ 1,1,5,5 } } , { format::yx,{ 6,6 } } , { format::yx,{ 7,7 } } ,
309 //{ format::yx,{ 8,8 } } , { format::yx,{ 9,9 } } , { format::yx,{ 10,10 } } , { format::yx,{ 11,11 } } , { format::yx,{ 12,12 } } , { format::yx,{ 13,13 } } ,
310 //{ format::yx,{ 14,14 } } , { format::yx,{ 15,15 } } , { format::yx,{ 16,16 } } };
312 auto data_types = test_data_types();
314 for (cldnn::data_types data_type : data_types)
316 for (cldnn::format fmt : test_input_formats)
318 for (int batch_size : test_batch_sizes)
320 for (int feature_size : test_feature_sizes)
322 for (tensor input_size : test_input_sizes)
324 all_generic_params.push_back(new test_params(data_type, fmt, batch_size, feature_size, input_size));
331 return all_generic_params;
334 const cldnn::engine & get_test_engine()
336 static const cldnn::engine engine;
340 const std::string test_dump::name() const
342 std::string temp = name_str;
343 std::replace(temp.begin(), temp.end(), '/', '_');
347 const std::string test_dump::test_case_name() const
349 size_t pos = test_case_name_str.find("/");
350 if (pos > test_case_name_str.length())
354 std::string temp = test_case_name_str.substr(pos);
358 std::string test_params::print_tensor(cldnn::tensor t)
360 std::stringstream str;
361 for (size_t i = 0; i < t.sizes(format::bfyx).size(); i++)
363 str << t.sizes(format::bfyx)[i] << " ";
369 std::string test_params::print()
371 std::stringstream str;
372 str << "Data type: " << data_type_traits::name(data_type) << std::endl;
374 for (int j = 0 ; j < (int)input_layouts.size(); j++)
376 const cldnn::tensor& t = input_layouts[j].size;
378 str << "Input " << j << ": " << print_tensor(t) << std::endl;
383 std::vector<cldnn::data_types> generic_test::test_data_types()
385 std::vector<cldnn::data_types> result;
386 result.push_back(cldnn::data_types::f32);
388 if(get_test_engine().get_info().supports_fp16)
390 result.push_back(cldnn::data_types::f16);
395 std::vector<cldnn::format> generic_test::test_input_formats = { cldnn::format::bfyx , cldnn::format::yxfb, cldnn::format::fyxb, cldnn::format::byxf };
396 std::vector<int32_t> generic_test::test_batch_sizes = { 1, 2 };// 4, 8, 16};
397 std::vector<int32_t> generic_test::test_feature_sizes = { 1, 2 };// , 3, 15};
398 std::vector<tensor> generic_test::test_input_sizes = { { 1, 1, 100, 100 } ,{ 1, 1, 277, 277 } ,{ 1, 1, 400, 600 } };