1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
17 #include "gtest/gtest.h"
18 #include "ngraph/ngraph.hpp"
19 #include "ngraph/runtime/tensor.hpp"
20 #include "runtime/backend.hpp"
21 #include "util/all_close.hpp"
22 #include "util/all_close_f.hpp"
23 #include "util/known_element_types.hpp"
24 #include "util/ndarray.hpp"
25 #include "util/test_control.hpp"
26 #include "util/test_tools.hpp"
28 NGRAPH_SUPPRESS_DEPRECATED_START
31 using namespace ngraph;
33 static string s_manifest = "${MANIFEST}";
35 // Trivial case with no reduced axes.
36 NGRAPH_TEST(${BACKEND_NAME}, max_trivial)
39 auto A = make_shared<op::Parameter>(element::f32, shape);
40 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{}), ParameterVector{A});
42 auto backend = runtime::Backend::create("${BACKEND_NAME}");
44 // Create some tensors for input/output
45 auto a = backend->create_tensor(element::f32, shape);
46 copy_data(a, vector<float>{1, 2, 3, 4});
47 auto result = backend->create_tensor(element::f32, shape);
49 auto handle = backend->compile(f);
50 handle->call_with_validate({result}, {a});
51 EXPECT_TRUE(test::all_close_f(
52 (vector<float>{1, 2, 3, 4}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
55 NGRAPH_TEST(${BACKEND_NAME}, max_trivial_int8)
58 auto A = make_shared<op::Parameter>(element::i8, shape);
59 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{}), ParameterVector{A});
61 auto backend = runtime::Backend::create("${BACKEND_NAME}");
63 // Create some tensors for input/output
64 auto a = backend->create_tensor(element::i8, shape);
65 copy_data(a, vector<int8_t>{1, 2, 3, 4});
66 auto result = backend->create_tensor(element::i8, shape);
68 auto handle = backend->compile(f);
69 handle->call_with_validate({result}, {a});
70 EXPECT_EQ((vector<int8_t>{1, 2, 3, 4}), read_vector<int8_t>(result));
73 // Failure has been reported at 5D for some reason
74 NGRAPH_TEST(${BACKEND_NAME}, max_trivial_5d)
76 Shape shape{2, 2, 2, 2, 2};
77 auto A = make_shared<op::Parameter>(element::f32, shape);
78 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{}), ParameterVector{A});
80 auto backend = runtime::Backend::create("${BACKEND_NAME}");
82 // Create some tensors for input/output
83 auto a = backend->create_tensor(element::f32, shape);
84 copy_data(a, vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
85 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
86 auto result = backend->create_tensor(element::f32, shape);
88 auto handle = backend->compile(f);
89 handle->call_with_validate({result}, {a});
90 EXPECT_TRUE(test::all_close_f((vector<float>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
91 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}),
92 read_vector<float>(result),
93 MIN_FLOAT_TOLERANCE_BITS));
96 NGRAPH_TEST(${BACKEND_NAME}, max_trivial_5d_int32)
98 Shape shape{2, 2, 2, 2, 2};
99 auto A = make_shared<op::Parameter>(element::i32, shape);
100 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{}), ParameterVector{A});
102 auto backend = runtime::Backend::create("${BACKEND_NAME}");
104 // Create some tensors for input/output
105 auto a = backend->create_tensor(element::i32, shape);
106 copy_data(a, vector<int32_t>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1});
108 auto result = backend->create_tensor(element::i32, shape);
110 auto handle = backend->compile(f);
111 handle->call_with_validate({result}, {a});
112 EXPECT_EQ((vector<int32_t>{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}),
114 read_vector<int32_t>(result));
117 NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar)
120 auto A = make_shared<op::Parameter>(element::f32, shape);
121 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1}), ParameterVector{A});
123 auto backend = runtime::Backend::create("${BACKEND_NAME}");
125 // Create some tensors for input/output
126 auto a = backend->create_tensor(element::f32, shape);
127 copy_data(a, vector<float>{1, 2, 3, 4});
128 auto result = backend->create_tensor(element::f32, Shape{});
130 auto handle = backend->compile(f);
131 handle->call_with_validate({result}, {a});
132 EXPECT_TRUE(test::all_close_f((vector<float>{4}), read_vector<float>(result)));
134 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
135 // input tensors, so let's do this too.
136 EXPECT_TRUE(test::all_close_f(
137 (vector<float>{1, 2, 3, 4}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
140 NGRAPH_TEST(${BACKEND_NAME}, max_to_scalar_int8)
143 auto A = make_shared<op::Parameter>(element::i8, shape);
144 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1}), ParameterVector{A});
146 auto backend = runtime::Backend::create("${BACKEND_NAME}");
148 // Create some tensors for input/output
149 auto a = backend->create_tensor(element::i8, shape);
150 copy_data(a, vector<int8_t>{1, 2, 3, 4});
151 auto result = backend->create_tensor(element::i8, Shape{});
153 auto handle = backend->compile(f);
154 handle->call_with_validate({result}, {a});
155 EXPECT_EQ((vector<int8_t>{4}), read_vector<int8_t>(result));
158 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_columns)
161 auto A = make_shared<op::Parameter>(element::f32, shape_a);
163 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0}), ParameterVector{A});
165 auto backend = runtime::Backend::create("${BACKEND_NAME}");
167 // Create some tensors for input/output
168 auto a = backend->create_tensor(element::f32, shape_a);
169 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
170 auto result = backend->create_tensor(element::f32, shape_rt);
172 auto handle = backend->compile(f);
173 handle->call_with_validate({result}, {a});
174 EXPECT_TRUE(test::all_close_f((vector<float>{5, 6}), read_vector<float>(result)));
176 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
177 // input tensors, so let's do this too.
178 EXPECT_TRUE(test::all_close_f(
179 (vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
182 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows)
185 auto A = make_shared<op::Parameter>(element::f32, shape_a);
187 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{1}), ParameterVector{A});
189 auto backend = runtime::Backend::create("${BACKEND_NAME}");
191 // Create some tensors for input/output
192 auto a = backend->create_tensor(element::f32, shape_a);
193 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
194 auto result = backend->create_tensor(element::f32, shape_rt);
196 auto handle = backend->compile(f);
197 handle->call_with_validate({result}, {a});
198 EXPECT_TRUE(test::all_close_f((vector<float>{2, 4, 6}), read_vector<float>(result)));
200 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
201 // input tensors, so let's do this too.
202 EXPECT_TRUE(test::all_close_f(
203 (vector<float>{1, 2, 3, 4, 5, 6}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
206 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_int32)
209 auto A = make_shared<op::Parameter>(element::i32, shape_a);
211 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{1}), ParameterVector{A});
213 auto backend = runtime::Backend::create("${BACKEND_NAME}");
215 // Create some tensors for input/output
216 auto a = backend->create_tensor(element::i32, shape_a);
217 copy_data(a, vector<int32_t>{1, 2, 3, 4, 5, 6});
218 auto result = backend->create_tensor(element::i32, shape_rt);
220 auto handle = backend->compile(f);
221 handle->call_with_validate({result}, {a});
222 EXPECT_EQ((vector<int32_t>{2, 4, 6}), read_vector<int32_t>(result));
224 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
225 // input tensors, so let's do this too.
226 EXPECT_EQ((vector<int32_t>{1, 2, 3, 4, 5, 6}), read_vector<int32_t>(a));
229 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero)
232 auto A = make_shared<op::Parameter>(element::f32, shape_a);
234 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{1}), ParameterVector{A});
236 auto backend = runtime::Backend::create("${BACKEND_NAME}");
238 // Create some tensors for input/output
239 auto a = backend->create_tensor(element::f32, shape_a);
240 copy_data(a, vector<float>{});
241 auto result = backend->create_tensor(element::f32, shape_rt);
242 copy_data(result, vector<float>({3, 3, 3}));
244 auto handle = backend->compile(f);
245 handle->call_with_validate({result}, {a});
246 EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
247 -std::numeric_limits<float>::infinity(),
248 -std::numeric_limits<float>::infinity()}),
249 read_vector<float>(result));
251 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
252 // input tensors, so let's do this too.
254 test::all_close_f((vector<float>{}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
257 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_rows_zero_int32)
260 auto A = make_shared<op::Parameter>(element::i32, shape_a);
262 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{1}), ParameterVector{A});
264 auto backend = runtime::Backend::create("${BACKEND_NAME}");
266 // Create some tensors for input/output
267 auto a = backend->create_tensor(element::i32, shape_a);
268 copy_data(a, vector<int32_t>{});
269 auto result = backend->create_tensor(element::i32, shape_rt);
270 copy_data(result, vector<int32_t>({3, 3, 3}));
272 int32_t minval = std::numeric_limits<int32_t>::has_infinity
273 ? -std::numeric_limits<int32_t>::infinity()
274 : std::numeric_limits<int32_t>::min();
276 auto handle = backend->compile(f);
277 handle->call_with_validate({result}, {a});
278 EXPECT_EQ((vector<int32_t>{minval, minval, minval}), read_vector<int32_t>(result));
279 EXPECT_EQ((vector<int32_t>{}), read_vector<int32_t>(a));
282 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_cols_zero)
284 // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})).
286 auto A = make_shared<op::Parameter>(element::f32, shape_a);
288 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0}), ParameterVector{A});
290 auto backend = runtime::Backend::create("${BACKEND_NAME}");
292 // Create some tensors for input/output
293 auto a = backend->create_tensor(element::f32, shape_a);
294 copy_data(a, vector<float>{});
295 auto result = backend->create_tensor(element::f32, shape_rt);
296 copy_data(result, vector<float>({3, 3}));
298 auto handle = backend->compile(f);
299 handle->call_with_validate({result}, {a});
300 EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity(),
301 -std::numeric_limits<float>::infinity()}),
302 read_vector<float>(result));
304 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
305 // input tensors, so let's do this too.
307 test::all_close_f((vector<float>{}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
310 NGRAPH_TEST(${BACKEND_NAME}, max_vector_zero)
313 auto A = make_shared<op::Parameter>(element::f32, shape_a);
315 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0}), ParameterVector{A});
317 auto backend = runtime::Backend::create("${BACKEND_NAME}");
319 // Create some tensors for input/output
320 auto a = backend->create_tensor(element::f32, shape_a);
321 copy_data(a, vector<float>{});
322 auto result = backend->create_tensor(element::f32, shape_rt);
323 copy_data(result, vector<float>({3}));
325 auto handle = backend->compile(f);
326 handle->call_with_validate({result}, {a});
327 EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
329 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
330 // input tensors, so let's do this too.
332 test::all_close_f((vector<float>{}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
335 NGRAPH_TEST(${BACKEND_NAME}, max_matrix_to_scalar_zero_by_zero)
338 auto A = make_shared<op::Parameter>(element::f32, shape_a);
340 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1}), ParameterVector{A});
342 auto backend = runtime::Backend::create("${BACKEND_NAME}");
344 // Create some tensors for input/output
345 auto a = backend->create_tensor(element::f32, shape_a);
346 copy_data(a, vector<float>{});
347 auto result = backend->create_tensor(element::f32, shape_rt);
348 copy_data(result, vector<float>({3}));
350 auto handle = backend->compile(f);
351 handle->call_with_validate({result}, {a});
352 EXPECT_EQ((vector<float>{-std::numeric_limits<float>::infinity()}), read_vector<float>(result));
354 // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the
355 // input tensors, so let's do this too.
357 test::all_close_f((vector<float>{}), read_vector<float>(a), MIN_FLOAT_TOLERANCE_BITS));
360 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_most_sig)
362 Shape shape_a{3, 3, 3};
363 auto A = make_shared<op::Parameter>(element::f32, shape_a);
364 Shape shape_rt{3, 3};
365 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0}), ParameterVector{A});
367 auto backend = runtime::Backend::create("${BACKEND_NAME}");
369 // Create some tensors for input/output
370 auto a = backend->create_tensor(element::f32, shape_a);
371 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
372 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
373 auto result = backend->create_tensor(element::f32, shape_rt);
375 auto handle = backend->compile(f);
376 handle->call_with_validate({result}, {a});
377 EXPECT_TRUE(test::all_close_f((vector<float>{19, 20, 21, 22, 23, 24, 25, 26, 27}),
378 read_vector<float>(result),
379 MIN_FLOAT_TOLERANCE_BITS));
382 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_matrix_least_sig)
384 Shape shape_a{3, 3, 3};
385 auto A = make_shared<op::Parameter>(element::f32, shape_a);
386 Shape shape_rt{3, 3};
387 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{2}), ParameterVector{A});
389 auto backend = runtime::Backend::create("${BACKEND_NAME}");
391 // Create some tensors for input/output
392 auto a = backend->create_tensor(element::f32, shape_a);
393 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
394 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
395 auto result = backend->create_tensor(element::f32, shape_rt);
397 auto handle = backend->compile(f);
398 handle->call_with_validate({result}, {a});
399 EXPECT_TRUE(test::all_close_f((vector<float>{3, 6, 9, 12, 15, 18, 21, 24, 27}),
400 read_vector<float>(result),
401 MIN_FLOAT_TOLERANCE_BITS));
404 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_vector)
406 Shape shape_a{3, 3, 3};
407 auto A = make_shared<op::Parameter>(element::f32, shape_a);
409 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1}), ParameterVector{A});
411 auto backend = runtime::Backend::create("${BACKEND_NAME}");
413 // Create some tensors for input/output
414 auto a = backend->create_tensor(element::f32, shape_a);
415 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
416 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27});
417 auto result = backend->create_tensor(element::f32, shape_rt);
419 auto handle = backend->compile(f);
420 handle->call_with_validate({result}, {a});
421 EXPECT_TRUE(test::all_close_f((vector<float>{25.0f, 26.0f, 27.0f}),
422 read_vector<float>(result),
423 MIN_FLOAT_TOLERANCE_BITS));
426 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar)
428 Shape shape_a{3, 3, 3};
429 auto A = make_shared<op::Parameter>(element::f32, shape_a);
431 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1, 2}), ParameterVector{A});
433 auto backend = runtime::Backend::create("${BACKEND_NAME}");
435 // Create some tensors for input/output
436 auto a = backend->create_tensor(element::f32, shape_a);
437 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
438 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
439 auto result = backend->create_tensor(element::f32, shape_rt);
441 auto handle = backend->compile(f);
442 handle->call_with_validate({result}, {a});
443 EXPECT_TRUE(test::all_close_f(
444 (vector<float>{14.0f}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
447 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_int32)
449 Shape shape_a{3, 3, 3};
450 auto A = make_shared<op::Parameter>(element::i32, shape_a);
452 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1, 2}), ParameterVector{A});
454 auto backend = runtime::Backend::create("${BACKEND_NAME}");
456 // Create some tensors for input/output
457 auto a = backend->create_tensor(element::i32, shape_a);
458 copy_data(a, vector<int32_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
459 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
460 auto result = backend->create_tensor(element::i32, shape_rt);
462 auto handle = backend->compile(f);
463 handle->call_with_validate({result}, {a});
464 EXPECT_EQ((vector<int32_t>{14}), read_vector<int32_t>(result));
467 NGRAPH_TEST(${BACKEND_NAME}, max_3d_to_scalar_double)
469 Shape shape_a{3, 3, 3};
470 auto A = make_shared<op::Parameter>(element::f64, shape_a);
472 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1, 2}), ParameterVector{A});
474 auto backend = runtime::Backend::create("${BACKEND_NAME}");
476 // Create some tensors for input/output
477 auto a = backend->create_tensor(element::f64, shape_a);
478 copy_data(a, vector<double>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
479 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1});
480 auto result = backend->create_tensor(element::f64, shape_rt);
482 auto handle = backend->compile(f);
483 handle->call_with_validate({result}, {a});
484 EXPECT_TRUE(test::all_close_f((vector<double>{14}), read_vector<double>(result)));
487 NGRAPH_TEST(${BACKEND_NAME}, max_3d_eliminate_zero_dim)
489 Shape shape_a{3, 0, 2};
490 auto A = make_shared<op::Parameter>(element::f32, shape_a);
491 Shape shape_rt{3, 2};
492 auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{1}), ParameterVector{A});
494 auto backend = runtime::Backend::create("${BACKEND_NAME}");
496 // Create some tensors for input/output
497 auto a = backend->create_tensor(element::f32, shape_a);
498 copy_data(a, vector<float>{});
499 auto result = backend->create_tensor(element::f32, shape_rt);
501 // Overwrite the initial result vector to make sure we're not just coincidentally getting the
503 copy_data(result, vector<float>{2112, 2112, 2112, 2112, 2112, 2112});
505 float mi = -std::numeric_limits<float>::infinity();
507 auto handle = backend->compile(f);
508 handle->call_with_validate({result}, {a});
509 EXPECT_EQ((vector<float>{mi, mi, mi, mi, mi, mi}), read_vector<float>(result));