1 // Copyright (c) 2019 Intel Corporation
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 ///////////////////////////////////////////////////////////////////////////////////////////////////
16 #include <gtest/gtest.h>
18 #include <api/CPP/engine.hpp>
19 #include <api/CPP/input_layout.hpp>
20 #include <api/CPP/memory.hpp>
21 #include <api/CPP/contract.hpp>
22 #include <api/CPP/topology.hpp>
23 #include <api/CPP/network.hpp>
25 #include "test_utils/test_utils.h"
26 #include "test_utils/uniform_quantized_real_distribution.hpp"
30 using namespace cldnn;
31 using namespace ::tests;
34 T reduce_execute(cldnn::contract_mode mode, T x, T y) {
36 case contract_mode::sum:
38 case contract_mode::prod:
40 case contract_mode::all:
42 case contract_mode::any:
44 case contract_mode::max:
52 VVVVF<T> reduce_dim(VVVVF<T> &input,
53 cldnn::contract_mode mode, uint16_t axis,
54 int input_padding_y = 0, int input_padding_x = 0,
55 int output_padding_y = 0, int output_padding_x = 0) {
57 size_t padding_y = input_padding_y + output_padding_y;
58 size_t padding_x = input_padding_x + output_padding_x;
60 out_sizes[0] = input.size();
61 out_sizes[1] = input[0].size();
62 out_sizes[2] = input[0][0].size() + 2 * padding_y;
63 out_sizes[3] = input[0][0][0].size() + 2 * padding_x;
67 for (uint16_t i = axis; i > 0; --i)
69 out_sizes[i] = out_sizes[i - 1];
72 VVVVF<T> output(out_sizes[0], VVVF<T>(out_sizes[1], VVF<T>(out_sizes[2], VF<T>(out_sizes[3]))));
76 for (size_t f = 0; f < out_sizes[1]; ++f)
77 for (size_t y = 0; y < out_sizes[2]; ++y)
78 for (size_t x = 0; x < out_sizes[3]; ++x)
80 T res = input[0][f][y][x];
81 size_t orig_b = input.size();
82 for (size_t b = 1; b < orig_b; ++b)
83 res = reduce_execute<T>(mode, res, input[b][f][y][x]);
84 output[0][f][y][x] = res;
88 for (size_t b = 0; b < out_sizes[1]; ++b)
89 for (size_t y = 0; y < out_sizes[2]; ++y)
90 for (size_t x = 0; x < out_sizes[3]; ++x)
92 T res = input[b][0][y][x];
93 size_t orig_f = input[0].size();
94 for (size_t f = 1; f < orig_f; ++f)
95 res = reduce_execute<T>(mode, res, input[b][f][y][x]);
96 output[0][b][y][x] = res;
100 for (size_t b = 0; b < out_sizes[1]; ++b)
101 for (size_t f = 0; f < out_sizes[2]; ++f)
102 for (size_t x = 0; x < out_sizes[3]; ++x)
104 T res = input[b][f][0][x];
105 size_t orig_y = input[0][0].size();
106 for (size_t y = 1; y < orig_y; ++y)
107 res = reduce_execute<T>(mode, res, input[b][f][y][x]);
108 output[0][b][f][x] = res;
112 for (size_t b = 0; b < out_sizes[1]; ++b)
113 for (size_t f = 0; f < out_sizes[2]; ++f)
114 for (size_t y = 0; y < out_sizes[3]; ++y)
116 T res = input[b][f][y][0];
117 size_t orig_x = input[0][0][0].size();
118 for (size_t x = 1; x < orig_x; ++x)
119 res = reduce_execute<T>(mode, res, input[b][f][y][x]);
120 output[0][b][f][y] = res;
128 template <typename T>
129 VVVVF<T> reduce_input(VVVVF<T> &input,
130 cldnn::contract_mode mode, std::vector<uint16_t> reduction_axes,
131 int input_padding_y = 0, int input_padding_x = 0,
132 int output_padding_y = 0, int output_padding_x = 0) {
133 VVVVF<T> output(input);
134 for (size_t i = 0; i < reduction_axes.size(); ++i)
135 output = reduce_dim<T>(output, mode, reduction_axes[i], input_padding_y, input_padding_x, output_padding_y, output_padding_x);
139 std::string print_axes(std::vector<uint16_t> reduction_axes)
141 std::stringstream res;
143 for (size_t i = 0; i < reduction_axes.size(); ++i)
147 res << reduction_axes[i];
153 template <typename T>
154 void generic_contract_test_float(cldnn::format test_input_fmt, int input_b, int input_f, int input_y, int input_x, cldnn::contract_mode mode,
155 std::vector<uint16_t> reduction_axes, int input_padding_y = 0, int input_padding_x = 0, int output_padding_y = 0, int output_padding_x = 0) {
157 int min_random = -2, max_random = 2;
158 VVVVF<T> input_rnd = generate_random_4d<T>(input_b, input_f, input_y, input_x, min_random, max_random);
159 VF<T> input_rnd_vec = flatten_4d<T>(test_input_fmt, input_rnd);
161 const auto& engine = get_test_engine();
162 tensor input_tensor(input_b, input_f, input_x, input_y);
163 auto input = memory::allocate(engine, { type_to_data_type<T>::value, test_input_fmt, input_tensor });
164 set_values(input, input_rnd_vec);
167 topology.add(input_layout("input", input.get_layout()));
168 topology.add(contract("output", "input", mode, reduction_axes));
170 network network(engine, topology);
171 network.set_input_data("input", input);
172 auto outputs = network.execute();
173 EXPECT_EQ(outputs.size(), size_t(1));
174 EXPECT_EQ(outputs.begin()->first, "output");
176 auto output_memory = outputs.at("output").get_memory();
177 auto output_layout = output_memory.get_layout();
178 auto output_ptr = output_memory.pointer<T>();
180 VVVVF<T> output_cpu = reduce_input<T>(input_rnd, mode, reduction_axes, input_padding_y, input_padding_x, output_padding_y, output_padding_x);
181 EXPECT_EQ(output_layout.format.value, test_input_fmt.value);
182 tensor output_tensor = output_layout.get_buffer_size();
183 int y_size = output_tensor.spatial[1];
184 int x_size = output_tensor.spatial[0];
185 int f_size = output_tensor.feature[0];
186 int b_size = output_tensor.batch[0];
187 EXPECT_EQ(y_size, (int)output_cpu[0][0].size());
188 EXPECT_EQ(x_size, (int)output_cpu[0][0][0].size());
189 EXPECT_EQ(f_size, (int)output_cpu[0].size());
190 EXPECT_EQ(b_size, (int)output_cpu.size());
193 bool test_is_correct = true;
194 VF<T> output_cpu_vec = flatten_4d<T>(test_input_fmt, output_cpu);
195 for (size_t i = 0; i < output_cpu_vec.size(); ++i) {
196 if (!floating_point_equal(output_cpu_vec[i], output_ptr[i]) && !(std::isnan((float)output_cpu_vec[i]) && std::isnan((float)output_ptr[i]))) {
197 test_is_correct = false;
201 EXPECT_EQ(test_is_correct, true) << std::endl
202 << "failing test parameters:" << std::endl
203 << "input_b = " << input_b << std::endl
204 << "input_f = " << input_f << std::endl
205 << "input_y = " << input_y << std::endl
206 << "input_x = " << input_x << std::endl
207 << "contract_mode = " << (int)mode << std::endl
208 << "axes = " << print_axes(reduction_axes) << std::endl
209 << "input_padding_y = " << input_padding_y << std::endl
210 << "input_padding_x = " << input_padding_x << std::endl
211 << "output_padding_y = " << output_padding_y << std::endl
212 << "output_padding_x = " << output_padding_x << std::endl;
215 template <typename T>
216 void generic_contract_test_int(cldnn::format test_input_fmt, int input_b, int input_f, int input_y, int input_x, cldnn::contract_mode mode,
217 std::vector<uint16_t> reduction_axes, int input_padding_y = 0, int input_padding_x = 0, int output_padding_y = 0, int output_padding_x = 0) {
219 int min_random = -2, max_random = 2;
220 VVVVF<T> input_rnd = generate_random_4d<T>(input_b, input_f, input_y, input_x, min_random, max_random);
221 VF<T> input_rnd_vec = flatten_4d<T>(test_input_fmt, input_rnd);
223 const auto& engine = get_test_engine();
224 tensor input_tensor(input_b, input_f, input_x, input_y);
225 auto input = memory::allocate(engine, { type_to_data_type<T>::value, test_input_fmt, input_tensor });
226 set_values(input, input_rnd_vec);
229 topology.add(input_layout("input", input.get_layout()));
230 topology.add(contract("output", "input", mode, reduction_axes));
232 network network(engine, topology);
233 network.set_input_data("input", input);
234 auto outputs = network.execute();
235 EXPECT_EQ(outputs.size(), size_t(1));
236 EXPECT_EQ(outputs.begin()->first, "output");
238 auto output_memory = outputs.at("output").get_memory();
239 auto output_layout = output_memory.get_layout();
240 auto output_ptr = output_memory.pointer<T>();
242 VVVVF<T> output_cpu = reduce_input<T>(input_rnd, mode, reduction_axes, input_padding_y, input_padding_x, output_padding_y, output_padding_x);
243 EXPECT_EQ(output_layout.format.value, test_input_fmt.value);
244 tensor output_tensor = output_layout.get_buffer_size();
245 int y_size = output_tensor.spatial[1];
246 int x_size = output_tensor.spatial[0];
247 int f_size = output_tensor.feature[0];
248 int b_size = output_tensor.batch[0];
249 EXPECT_EQ(y_size, (int)output_cpu[0][0].size());
250 EXPECT_EQ(x_size, (int)output_cpu[0][0][0].size());
251 EXPECT_EQ(f_size, (int)output_cpu[0].size());
252 EXPECT_EQ(b_size, (int)output_cpu.size());
255 bool test_is_correct = true;
256 VF<T> output_cpu_vec = flatten_4d<T>(test_input_fmt, output_cpu);
258 for (size_t i = 0; i < output_cpu_vec.size(); ++i) {
259 if (output_cpu_vec[i] != output_ptr[i]) {
260 test_is_correct = false;
264 EXPECT_EQ(test_is_correct, true) << std::endl
265 << "failing test parameters:" << std::endl
266 << "input_b = " << input_b << std::endl
267 << "input_f = " << input_f << std::endl
268 << "input_y = " << input_y << std::endl
269 << "input_x = " << input_x << std::endl
270 << "contract_mode = " << (int)mode << std::endl
271 << "axes = " << print_axes(reduction_axes) << std::endl
272 << "input_padding_y = " << input_padding_y << std::endl
273 << "input_padding_x = " << input_padding_x << std::endl
274 << "output_padding_y = " << output_padding_y << std::endl
275 << "output_padding_x = " << output_padding_x << std::endl;
278 TEST(contract_gpu_f32, generic_y_sum) {
279 generic_contract_test_float<float>(format::bfyx, 5, 5, 5, 5, contract_mode::sum, { 2 });
282 TEST(contract_gpu_f32, generic_fx_prod) {
283 generic_contract_test_float<float>(format::bfyx, 5, 5, 5, 5, contract_mode::sum, { 1, 3 });
286 TEST(contract_gpu_i32, generic_f_all) {
287 generic_contract_test_int<int32_t>(format::bfyx, 5, 5, 5, 5, contract_mode::all, { 1 });
290 TEST(contract_gpu_i32, generic_bfyx_any) {
291 generic_contract_test_int<int32_t>(format::bfyx, 5, 5, 5, 5, contract_mode::any, { 0, 1, 2, 3 });
294 TEST(contract_gpu_f32, generic_f_max) {
295 generic_contract_test_float<float>(format::bfyx, 5, 5, 5, 5, contract_mode::max, { 1 });
298 TEST(contract_gpu_i32, generic_f_max) {
299 generic_contract_test_int<int32_t>(format::bfyx, 5, 5, 5, 5, contract_mode::max, { 1 });
302 TEST(contract_error, basic_error_empty_r_axes) {
304 const auto& engine = get_test_engine();
305 auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ 1, 1, 1, 1 } });
308 topology.add(input_layout("input", input.get_layout()));
309 topology.add(contract("output", "input", contract_mode::sum, { }));
311 std::string msg_to_find = "Incorrect parameters configuration: reduction_axes should not be empty.";
312 EXPECT_ANY_THROW(check_exception_massage(engine, topology, msg_to_find));
315 TEST(contract_error, basic_error_wrong_r_axes_size) {
317 const auto& engine = get_test_engine();
318 auto input = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
321 topology.add(input_layout("input", input.get_layout()));
322 topology.add(contract("output", "input", contract_mode::sum, { 0, 1, 2, 3, 4 }));
324 std::string msg_to_find = "Incorrect parameters configuration: reduction_axes size should be less or equal 4.";
325 EXPECT_ANY_THROW(check_exception_massage(engine, topology, msg_to_find));
328 TEST(contract_error, basic_error_wrong_r_axis_value) {
330 const auto& engine = get_test_engine();
331 auto input = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
334 topology.add(input_layout("input", input.get_layout()));
335 topology.add(contract("output", "input", contract_mode::sum, { 0, 4 }));
337 std::string msg_to_find = "Incorrect parameters configuration: reduction_axes index should be within reduction_axes range.";
338 EXPECT_ANY_THROW(check_exception_massage(engine, topology, msg_to_find));
341 TEST(contract_error, basic_error_duplicate_r_axis_values) {
343 const auto& engine = get_test_engine();
344 auto input = memory::allocate(engine, { data_types::f32, format::bfyx, { 1, 1, 1, 1 } });
347 topology.add(input_layout("input", input.get_layout()));
348 topology.add(contract("output", "input", contract_mode::sum, { 0, 1, 1 }));
350 std::string msg_to_find = "Incorrect parameters configuration: Duplicate axes numbers was found in reduction_axes.";
351 EXPECT_ANY_THROW(check_exception_massage(engine, topology, msg_to_find));