2 // Copyright (c) 2016 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 //todo move to another folder
21 #include "api/CPP/memory.hpp"
22 #include "api/CPP/tensor.hpp"
23 #include "api/CPP/program.hpp"
24 #include "api/CPP/network.hpp"
29 #include <gtest/gtest.h>
30 #include <api/CPP/primitive.hpp>
32 #include "random_gen.h"
33 #include "api/CPP/concatenation.hpp"
34 #include "api/CPP/lrn.hpp"
35 #include "api/CPP/roi_pooling.hpp"
36 #include "api/CPP/scale.hpp"
37 #include "api/CPP/softmax.hpp"
38 #include "api/CPP/reorder.hpp"
39 #include "api/CPP/normalize.hpp"
40 #include "api/CPP/convolution.hpp"
41 #include "api/CPP/activation.hpp"
42 #include "api/CPP/pooling.hpp"
46 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
49 #define USE_RANDOM_SEED 0
51 std::random_device rnd_device;
52 unsigned int const random_seed = rnd_device();
54 unsigned int const random_seed = 1337;
57 // rounds floating point number, fraction precision should be in the range [0,23]
59 // 1 11111111 11111111111111100000000
62 inline float float_round(float x, size_t fraction_precision = 15) {
63 uint32_t mask = ~((1 << (23 - fraction_precision)) - 1);
64 reinterpret_cast<uint32_t&>(x) &= mask;
69 using VF = std::vector<T>; // float vector
71 using VVF = std::vector<VF<T>>; // feature map
73 using VVVF = std::vector<VVF<T>>; // 3d feature map
75 using VVVVF = std::vector<VVVF<T>>; // batch of 3d feature maps
77 using VVVVVF = std::vector<VVVVF<T>>; // split of bfyx filters
80 inline VF<T> flatten_4d(cldnn::format input_format, VVVVF<T> &data) {
81 size_t a = data.size();
82 size_t b = data[0].size();
83 size_t c = data[0][0].size();
84 size_t d = data[0][0][0].size();
85 VF<T> vec(a * b * c * d, (T)(0.0f));
88 switch (input_format.value) {
89 case cldnn::format::yxfb:
90 for (size_t yi = 0; yi < c; ++yi)
91 for (size_t xi = 0; xi < d; ++xi)
92 for (size_t fi = 0; fi < b; ++fi)
93 for (size_t bi = 0; bi < a; ++bi)
94 vec[idx++] = data[bi][fi][yi][xi];
97 case cldnn::format::fyxb:
98 for (size_t fi = 0; fi < b; ++fi)
99 for (size_t yi = 0; yi < c; ++yi)
100 for (size_t xi = 0; xi < d; ++xi)
101 for (size_t bi = 0; bi < a; ++bi)
102 vec[idx++] = data[bi][fi][yi][xi];
105 case cldnn::format::bfyx:
106 for (size_t bi = 0; bi < a; ++bi)
107 for (size_t fi = 0; fi < b; ++fi)
108 for (size_t yi = 0; yi < c; ++yi)
109 for (size_t xi = 0; xi < d; ++xi)
110 vec[idx++] = data[bi][fi][yi][xi];
113 case cldnn::format::byxf:
114 for (size_t bi = 0; bi < a; ++bi)
115 for (size_t yi = 0; yi < c; ++yi)
116 for (size_t xi = 0; xi < d; ++xi)
117 for (size_t fi = 0; fi < b; ++fi)
118 vec[idx++] = data[bi][fi][yi][xi];
128 std::vector<T> generate_random_1d(size_t a, int min, int max, int k = 8) {
129 static std::default_random_engine generator(random_seed);
130 // 1/k is the resolution of the floating point numbers
131 std::uniform_int_distribution<int> distribution(k * min, k * max);
134 for (size_t i = 0; i < a; ++i) {
135 v[i] = (T)distribution(generator);
142 std::vector<std::vector<T>> generate_random_2d(size_t a, size_t b, int min, int max, int k = 8) {
143 std::vector<std::vector<T>> v(a);
144 for (size_t i = 0; i < a; ++i)
145 v[i] = generate_random_1d<T>(b, min, max, k);
150 std::vector<std::vector<std::vector<T>>> generate_random_3d(size_t a, size_t b, size_t c, int min, int max, int k = 8) {
151 std::vector<std::vector<std::vector<T>>> v(a);
152 for (size_t i = 0; i < a; ++i)
153 v[i] = generate_random_2d<T>(b, c, min, max, k);
157 // parameters order is assumed to be bfyx or bfyx
159 std::vector<std::vector<std::vector<std::vector<T>>>> generate_random_4d(size_t a, size_t b, size_t c, size_t d, int min, int max, int k = 8) {
160 std::vector<std::vector<std::vector<std::vector<T>>>> v(a);
161 for (size_t i = 0; i < a; ++i)
162 v[i] = generate_random_3d<T>(b, c, d, min, max, k);
166 // parameters order is assumed to be sbfyx for filters when split > 1
168 std::vector<std::vector<std::vector<std::vector<std::vector<T>>>>> generate_random_5d(size_t a, size_t b, size_t c, size_t d, size_t e, int min, int max, int k = 8) {
169 std::vector<std::vector<std::vector<std::vector<std::vector<T>>>>> v(a);
170 for (size_t i = 0; i < a; ++i)
171 v[i] = generate_random_4d<T>(b, c, d, e, min, max, k);
175 template <class T> void set_value(const cldnn::pointer<T>& ptr, uint32_t index, T value) { ptr[index] = value; }
176 template <class T> T get_value(const cldnn::pointer<T>& ptr, uint32_t index) { return ptr[index]; }
179 void set_values(const cldnn::memory& mem, std::initializer_list<T> args ){
180 auto ptr = mem.pointer<T>();
182 auto it = ptr.begin();
188 void set_values(const cldnn::memory& mem, std::vector<T> args) {
189 auto ptr = mem.pointer<T>();
191 auto it = ptr.begin();
197 void set_values_per_batch_and_feature(const cldnn::memory& mem, std::vector<T> args)
199 auto mem_ptr = mem.pointer<T>();
200 auto&& pitches = mem.get_layout().get_pitches();
201 auto&& size = mem.get_layout().size;
202 for (cldnn::tensor::value_type b = 0; b < size.batch[0]; ++b)
204 for (cldnn::tensor::value_type f = 0; f < size.feature[0]; ++f)
206 for (cldnn::tensor::value_type y = 0; y < size.spatial[1]; ++y)
208 for (cldnn::tensor::value_type x = 0; x < size.spatial[0]; ++x)
210 unsigned int input_it = b*pitches.batch[0] + f*pitches.feature[0] + y*pitches.spatial[1] + x*pitches.spatial[0];
211 mem_ptr[input_it] = args[b*size.feature[0] + f];
221 void set_random_values(const cldnn::memory& mem, bool sign = false, unsigned significand_bit = 8, unsigned scale = 1)
223 auto ptr = mem.pointer<T>();
226 for (auto it = ptr.begin(); it != ptr.end(); ++it)
228 *it = rnd_generators::gen_number<T>(gen, significand_bit, sign, false, scale);
233 // Tries to construct a network, checking if an expected error appears
234 inline void check_exception_massage(const cldnn::engine& engine, cldnn::topology& topology, std::string msg_to_find)
237 cldnn::network(engine, topology);
239 catch (std::exception & exc) {
240 std::string msg(exc.what());
241 if (msg.find(msg_to_find) != std::string::npos) {
245 printf("%s\n", exc.what());
251 // Checks equality of floats.
252 // For values less than absoulte_error_limit, absolute error will be counted
253 // for others, the relatve error will be counted.
254 // Function returns false if error will exceed the threshold.
256 // relative_error_threshold = 1e-3
257 // absolute_error_threshold = 1e-6
258 // absoulte_error_limit = 1e-4
259 inline bool are_equal(
260 const float ref_item,
262 const float relative_error_threshold = 1e-3,
263 const float absolute_error_threshold = 1e-6,
264 const float absoulte_error_limit = 1e-4) {
266 if( fabs(item) < absoulte_error_limit) {
267 if(fabs( item - ref_item ) > absolute_error_threshold) {
268 std::cout << "Ref val: " << ref_item << "\tSecond val: " << item << std::endl;
272 if(fabs(item - ref_item) / fabs(ref_item) > relative_error_threshold){
273 std::cout << "Ref val: " << ref_item << "\tSecond val: " << item << std::endl;
280 inline bool floating_point_equal(FLOAT16 x, FLOAT16 y, int max_ulps_diff = 4) {
281 int16_t sign_bit_mask = 1;
282 sign_bit_mask <<= 15;
283 int16_t a = x.v, b = y.v;
284 if ((a & sign_bit_mask) != (b & sign_bit_mask)) {
287 return a == 0 && b == 0;
290 return std::abs(a - b) < (1 << (max_ulps_diff));
294 inline bool floating_point_equal(float x, float y, int max_ulps_diff = 4) {
295 int32_t sign_bit_mask = 1;
296 sign_bit_mask <<= 31;
297 int32_t a = reinterpret_cast<int32_t&>(x), b = reinterpret_cast<int32_t&>(y);
298 if ((a & sign_bit_mask) != (b & sign_bit_mask)) {
301 return a == 0 && b == 0;
304 return std::abs(a - b) < (1 << (max_ulps_diff));
314 fmt(cldnn::format::bfyx)
318 test_params(cldnn::data_types dt, cldnn::format input_format, int32_t batch_size, int32_t feature_size, cldnn::tensor input_size, cldnn::build_options const& options = cldnn::build_options()) :
321 network_build_options(options)
323 cldnn::tensor t = cldnn::tensor(batch_size, feature_size, input_size.spatial[0], input_size.spatial[1] );
324 input_layouts.push_back( cldnn::layout(dt, fmt, t) );
327 cldnn::data_types data_type;
329 std::vector<cldnn::layout> input_layouts;
331 void * opaque_custom_param = nullptr;
333 cldnn::build_options network_build_options;
336 static std::string print_tensor(cldnn::tensor tensor);
350 const cldnn::engine & get_test_engine();
354 const std::string name() const;
355 const std::string test_case_name() const;
357 const std::string test_case_name_str = ::testing::UnitTest::GetInstance()->current_test_info()->test_case_name();
358 const std::string name_str = ::testing::UnitTest::GetInstance()->current_test_info()->name();
361 class generic_test : public ::testing::TestWithParam<std::tuple<test_params*, cldnn::primitive*>>
367 void run_single_test();
369 template<typename Type>
370 void compare_buffers(const cldnn::memory& out, const cldnn::memory& ref);
372 static size_t get_linear_index(const cldnn::layout & layout, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc);
373 static size_t get_linear_index_with_broadcast(const cldnn::layout& in_layout, size_t b, size_t f, size_t y, size_t x, const memory_desc& desc);
375 static memory_desc get_linear_memory_desc(const cldnn::layout & layout);
377 static std::vector<test_params*> generate_generic_test_params(std::vector<test_params*>& all_generic_params);
379 static void dump_graph(const std::string test_name, cldnn::build_options& bo);
381 virtual bool is_format_supported(cldnn::format format) = 0;
383 virtual cldnn::tensor get_expected_output_tensor();
385 struct custom_param_name_functor {
386 std::string operator()(const ::testing::TestParamInfo<std::tuple<test_params*, cldnn::primitive*>>& info) {
387 return std::to_string(info.index);
392 const cldnn::engine& engine = get_test_engine();
393 test_params* generic_params;
395 cldnn::primitive* layer_params;
396 int max_ulps_diff_allowed; //Max number of ulps allowed between 2 values when comparing the output buffer and the reference buffer.
397 bool random_values; // if set memory buffers will be filled with random values
398 bool dump_graphs; // if set tests will dump graphs to file
399 bool dump_memory; // if set memory buffers will be dumped to file
400 virtual cldnn::memory generate_reference(const std::vector<cldnn::memory>& inputs) = 0;
401 // Allows the test to override the random input data that the framework generates
403 virtual void prepare_input_for_test(std::vector<cldnn::memory>& inputs)
408 static std::vector<cldnn::data_types> test_data_types();
409 static std::vector<cldnn::format> test_input_formats;
410 static std::vector<cldnn::format> test_weight_formats;
411 static std::vector<int32_t> test_batch_sizes;
412 static std::vector<int32_t> test_feature_sizes;
413 static std::vector<cldnn::tensor> test_input_sizes;
416 // When a test assertion such as EXPECT_EQ fails, Google-Test prints the argument values to help with debugging.
417 // It does this using a user - extensible value printer.
418 // This function will be used to print the test params in case of an error.
419 inline void PrintTupleTo(const std::tuple<tests::test_params*, cldnn::primitive*>& t, ::std::ostream* os)
421 std::stringstream str;
423 auto test_param = std::get<0>(t);
424 auto primitive = std::get<1>(t);
426 str << std::endl << "Test params: " << test_param->print();
428 str << "Layer params:\n"
429 << "Output padding lower size: " << test_param->print_tensor(primitive->output_padding.lower_size())
430 << " upper size: " << test_param->print_tensor(primitive->output_padding.upper_size()) << '\n';
432 //TODO: do layers not have param dumping? we could consider adding it
434 if (primitive->type == cldnn::concatenation::type_id())
436 auto dc = static_cast<cldnn::concatenation*>(primitive);
439 else if(primitive->type == cldnn::lrn::type_id())
441 auto lrn = static_cast<cldnn::lrn *>(primitive);
442 std::string norm_region = (lrn->norm_region == cldnn_lrn_norm_region_across_channel) ? "across channel" : "within channel";
443 str << "Norm region: " << norm_region
444 << " Size: " << lrn->size
445 << " Alpha: " << lrn->alpha
446 << " Beta: " << lrn->beta
449 else if(primitive->type == cldnn::roi_pooling::type_id())
451 auto p = static_cast<cldnn::roi_pooling *>(primitive);
452 str << "Pooling mode: " << (p->mode == cldnn::pooling_mode::max ? "MAX" : "AVG")
453 << " Pooled width: " << p->pooled_width
454 << " Pooled height: " << p->pooled_height
455 << " Spatial scale: " << p->spatial_scale
456 << " Spatial bins x: " << p->spatial_bins_x
457 << " Spatial bins y: " << p->spatial_bins_y
458 << " Output dim: " << p->output_dim;
460 else if(primitive->type == cldnn::scale::type_id())
462 auto s = static_cast<cldnn::scale *>(primitive);
465 else if(primitive->type == cldnn::softmax::type_id())
467 auto sm = static_cast<cldnn::softmax *>(primitive);
470 else if (primitive->type == cldnn::reorder::type_id())
472 auto reorder = static_cast<cldnn::reorder*>(primitive);
473 str << "Output data type: " << cldnn::data_type_traits::name(*reorder->output_data_type) << " Mean: " << reorder->mean << "Subtract per feature: " << "TODO" /*std::vector<float> subtract_per_feature*/;
475 else if (primitive->type == cldnn::normalize::type_id())
477 auto normalize = static_cast<cldnn::normalize*>(primitive);
478 std::string norm_region = normalize->across_spatial ? "across_spatial" : "within_spatial";
479 str << "Norm region: " << norm_region << " Epsilon: " << normalize->epsilon << " Scale input id: " << normalize->scale_input;
481 else if (primitive->type == cldnn::convolution::type_id())
483 auto convolution = static_cast<cldnn::convolution*>(primitive);
484 str << "Stride x: " << convolution->stride.spatial[0] << " Stride y: " << convolution->stride.spatial[1]
485 << " Dilation x: " << convolution->dilation.spatial[0] << " Dilation y: " << convolution->dilation.spatial[1]
486 << " Input offset x: " << convolution->input_offset.spatial[0] << " Input offset y: " << convolution->input_offset.spatial[1]
487 << " Activation: " << convolution->with_activation << " Activation slope: " << convolution->activation_negative_slope;
489 else if (primitive->type == cldnn::activation::type_id())
491 auto activation = static_cast<cldnn::activation*>(primitive);
492 str << "Negative slope: " << activation->additional_params.a << " Negative slope input id: " << activation->additional_params_input;
494 else if (primitive->type == cldnn::pooling::type_id())
496 auto pooling = static_cast<cldnn::pooling*>(primitive);
497 std::string pooling_mode = (pooling->mode == cldnn::pooling_mode::max) ? "max" : "average";
498 str << "Pooling mode: " << pooling_mode
499 << " Input offset x: " << pooling->input_offset.spatial[0] << " Input offset y: " << pooling->input_offset.spatial[1]
500 << " Stride x: " << pooling->stride.spatial[0] << " Stride y: " << pooling->stride.spatial[1]
501 << " Size x: " << pooling->size.spatial[0] << " Size y: " << pooling->size.spatial[1];
505 throw std::runtime_error("Not implemented yet for this primitive.");