1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
24 #include "gtest/gtest.h"
25 #include "ngraph/ngraph.hpp"
26 #include "ngraph/runtime/tensor.hpp"
27 #include "runtime/backend.hpp"
28 #include "util/all_close.hpp"
29 #include "util/all_close_f.hpp"
30 #include "util/ndarray.hpp"
31 #include "util/test_control.hpp"
32 #include "util/test_tools.hpp"
34 NGRAPH_SUPPRESS_DEPRECATED_START
37 using namespace ngraph;
39 static string s_manifest = "${MANIFEST}";
44 // from numpy import *
45 // x = linspace(1,2*3*3*4,2*3*3*4)
46 // y = linspace(1,3*4*2*3*2,3*4*2*2*3)
48 // y.shape=(3,4,2,2,3)
49 // z = tensordot(x,y,([2,3],[0,1]))
50 // z.shape = 2*3*2*2*3
53 // array([ 6942., 7020., 7098., 7176., 7254., 7332., 7410.,
54 // 7488., 7566., 7644., 7722., 7800., 16590., 16812.,
55 // 17034., 17256., 17478., 17700., 17922., 18144., 18366.,
56 // 18588., 18810., 19032., 26238., 26604., 26970., 27336.,
57 // 27702., 28068., 28434., 28800., 29166., 29532., 29898.,
58 // 30264., 35886., 36396., 36906., 37416., 37926., 38436.,
59 // 38946., 39456., 39966., 40476., 40986., 41496., 45534.,
60 // 46188., 46842., 47496., 48150., 48804., 49458., 50112.,
61 // 50766., 51420., 52074., 52728., 55182., 55980., 56778.,
62 // 57576., 58374., 59172., 59970., 60768., 61566., 62364.,
65 NGRAPH_TEST(${BACKEND_NAME}, dot_4d_5d_multi_axis)
67 vector<float> a_data(2 * 3 * 3 * 4);
68 for (int i = 0; i < 2 * 3 * 3 * 4; i++)
70 a_data[i] = float(i + 1);
73 vector<float> b_data(3 * 4 * 2 * 2 * 3);
74 for (int i = 0; i < 3 * 4 * 2 * 2 * 3; i++)
76 b_data[i] = float(i + 1);
79 Shape shape_a{2, 3, 3, 4};
80 auto A = make_shared<op::Parameter>(element::f32, shape_a);
81 Shape shape_b{3, 4, 2, 3, 2};
82 auto B = make_shared<op::Parameter>(element::f32, shape_b);
83 Shape shape_r{2, 3, 2, 3, 2};
85 auto r = make_shared<op::Dot>(A, B, 2);
86 auto f = make_shared<Function>(r, ParameterVector{A, B});
88 auto backend = runtime::Backend::create("${BACKEND_NAME}");
90 // Create some tensors for input/output
91 auto a = backend->create_tensor(element::f32, shape_a);
93 auto b = backend->create_tensor(element::f32, shape_b);
96 auto result = backend->create_tensor(element::f32, shape_r);
98 auto handle = backend->compile(f);
99 handle->call_with_validate({result}, {a, b});
100 EXPECT_TRUE(test::all_close_f(
101 (vector<float>{6942., 7020., 7098., 7176., 7254., 7332., 7410., 7488., 7566.,
102 7644., 7722., 7800., 16590., 16812., 17034., 17256., 17478., 17700.,
103 17922., 18144., 18366., 18588., 18810., 19032., 26238., 26604., 26970.,
104 27336., 27702., 28068., 28434., 28800., 29166., 29532., 29898., 30264.,
105 35886., 36396., 36906., 37416., 37926., 38436., 38946., 39456., 39966.,
106 40476., 40986., 41496., 45534., 46188., 46842., 47496., 48150., 48804.,
107 49458., 50112., 50766., 51420., 52074., 52728., 55182., 55980., 56778.,
108 57576., 58374., 59172., 59970., 60768., 61566., 62364., 63162., 63960.}),
109 read_vector<float>(result)));
115 // from numpy import *
116 // x = linspace(1,2*3*3*4,2*3*3*4)
117 // y = linspace(1,2*3*3*4*2,2*3*3*4*2)
119 // y.shape=(2,3,3,4,2)
120 // z = tensordot(x,y,([0,1,2,3],[0,1,2,3]))
123 // array([ 251412., 254040.])
125 NGRAPH_TEST(${BACKEND_NAME}, dot_4d_5d_multi_axis_more)
127 vector<float> a_data(2 * 3 * 3 * 4);
128 for (int i = 0; i < 2 * 3 * 3 * 4; i++)
130 a_data[i] = float(i + 1);
133 vector<float> b_data(2 * 3 * 3 * 4 * 2);
134 for (int i = 0; i < 2 * 3 * 3 * 4 * 2; i++)
136 b_data[i] = float(i + 1);
139 Shape shape_a{2, 3, 3, 4};
140 auto A = make_shared<op::Parameter>(element::f32, shape_a);
141 Shape shape_b{2, 3, 3, 4, 2};
142 auto B = make_shared<op::Parameter>(element::f32, shape_b);
145 auto r = make_shared<op::Dot>(A, B, 4);
146 auto f = make_shared<Function>(r, ParameterVector{A, B});
148 auto backend = runtime::Backend::create("${BACKEND_NAME}");
150 // Create some tensors for input/output
151 auto a = backend->create_tensor(element::f32, shape_a);
152 copy_data(a, a_data);
153 auto b = backend->create_tensor(element::f32, shape_b);
154 copy_data(b, b_data);
156 auto result = backend->create_tensor(element::f32, shape_r);
158 auto handle = backend->compile(f);
159 handle->call_with_validate({result}, {a, b});
160 EXPECT_TRUE(test::all_close_f((vector<float>{251412., 254040.}), read_vector<float>(result)));
166 // from numpy import *
167 // x = linspace(1,20*30*30*40,20*30*30*40)
168 // y = linspace(1,20*30*30*40*20,20*30*30*40*20)
169 // x.shape=(20,30,30,40)
170 // y.shape=(20,30,30,40,20)
171 // z = tensordot(x,y,([0,1,2,3],[0,1,2,3]))
172 // set_printoptions(precision=20)
175 // array([ 2.48832025919525478400e+18, 2.48832051839533977600e+18,
176 // 2.48832077759658444800e+18, 2.48832103679413504000e+18,
177 // 2.48832129599669350400e+18, 2.48832155519793971200e+18,
178 // 2.48832181439802265600e+18, 2.48832207359808000000e+18,
179 // 2.48832233279813580800e+18, 2.48832259199822028800e+18,
180 // 2.48832285119946496000e+18, 2.48832311040043008000e+18,
181 // 2.48832336959957401600e+18, 2.48832362880081817600e+18,
182 // 2.48832388800090368000e+18, 2.48832414720096000000e+18,
183 // 2.48832440640101478400e+18, 2.48832466560109772800e+18,
184 // 2.48832492480234188800e+18, 2.48832518400031897600e+18])
186 // Disabled because this test is very slow.
188 NGRAPH_TEST(DISABLED_${BACKEND_NAME}, dot_4d_5d_multi_axis_big_fp64_VERY_SLOW)
190 vector<double> a_data(20 * 30 * 30 * 40);
191 for (int i = 0; i < 20 * 30 * 30 * 40; i++)
193 a_data[i] = double(i + 1);
196 vector<double> b_data(20 * 30 * 30 * 40 * 20);
197 for (int i = 0; i < 20 * 30 * 30 * 40 * 20; i++)
199 b_data[i] = double(i + 1);
202 Shape shape_a{20, 30, 30, 40};
203 auto A = make_shared<op::Parameter>(element::f64, shape_a);
204 Shape shape_b{20, 30, 30, 40, 20};
205 auto B = make_shared<op::Parameter>(element::f64, shape_b);
208 auto r = make_shared<op::Dot>(A, B, 4);
209 auto f = make_shared<Function>(r, ParameterVector{A, B});
211 auto backend = runtime::Backend::create("${BACKEND_NAME}");
213 // Create some tensors for input/output
214 auto a = backend->create_tensor(element::f64, shape_a);
215 copy_data(a, a_data);
216 auto b = backend->create_tensor(element::f64, shape_b);
217 copy_data(b, b_data);
219 auto result = backend->create_tensor(element::f64, shape_r);
221 auto handle = backend->compile(f);
222 handle->call_with_validate({result}, {a, b});
223 EXPECT_TRUE(test::all_close_f(
225 2.48832025919525478400e+18, 2.48832051839533977600e+18, 2.48832077759658444800e+18,
226 2.48832103679413504000e+18, 2.48832129599669350400e+18, 2.48832155519793971200e+18,
227 2.48832181439802265600e+18, 2.48832207359808000000e+18, 2.48832233279813580800e+18,
228 2.48832259199822028800e+18, 2.48832285119946496000e+18, 2.48832311040043008000e+18,
229 2.48832336959957401600e+18, 2.48832362880081817600e+18, 2.48832388800090368000e+18,
230 2.48832414720096000000e+18, 2.48832440640101478400e+18, 2.48832466560109772800e+18,
231 2.48832492480234188800e+18, 2.48832518400031897600e+18},
232 read_vector<double>(result)));
235 NGRAPH_TEST(${BACKEND_NAME}, dot_0_0)
238 auto A = make_shared<op::Parameter>(element::f32, shape);
239 auto B = make_shared<op::Parameter>(element::f32, shape);
241 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
243 auto backend = runtime::Backend::create("${BACKEND_NAME}");
245 // Create some tensors for input/output
246 auto a = backend->create_tensor(element::f32, shape);
247 copy_data(a, vector<float>{});
248 auto b = backend->create_tensor(element::f32, shape);
249 copy_data(b, vector<float>{});
250 auto result = backend->create_tensor(element::f32, shape_r);
252 // Overwrite the initial result vector to make sure we're not just coincidentally getting the
254 copy_data(result, vector<float>{2112});
256 auto handle = backend->compile(f);
257 handle->call_with_validate({result}, {a, b});
258 EXPECT_TRUE(test::all_close_f((vector<float>{0}), read_vector<float>(result)));
261 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_2x0_0x2)
267 auto A = make_shared<op::Parameter>(element::f32, shape_a);
268 auto B = make_shared<op::Parameter>(element::f32, shape_b);
269 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
271 auto backend = runtime::Backend::create("${BACKEND_NAME}");
273 // Create some tensors for input/output
274 auto a = backend->create_tensor(element::f32, shape_a);
275 copy_data(a, vector<float>{});
276 auto b = backend->create_tensor(element::f32, shape_b);
277 copy_data(b, vector<float>{});
278 auto result = backend->create_tensor(element::f32, shape_r);
280 // Overwrite the initial result vector to make sure we're not just coincidentally getting the
282 copy_data(result, vector<float>{2112, 2112, 2112, 2112});
284 auto handle = backend->compile(f);
285 handle->call_with_validate({result}, {a, b});
286 EXPECT_TRUE(test::all_close_f((vector<float>{0, 0, 0, 0}), read_vector<float>(result)));
289 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_0x2_2x0)
293 auto A = make_shared<op::Parameter>(element::f32, shape_a);
295 auto B = make_shared<op::Parameter>(element::f32, shape_b);
297 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
299 auto backend = runtime::Backend::create("${BACKEND_NAME}");
301 // Create some tensors for input/output
302 auto a = backend->create_tensor(element::f32, shape_a);
303 copy_data(a, vector<float>{});
304 auto b = backend->create_tensor(element::f32, shape_b);
305 copy_data(b, vector<float>{});
306 auto result = backend->create_tensor(element::f32, shape_r);
308 auto handle = backend->compile(f);
309 handle->call_with_validate({result}, {a, b});
310 EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(result)));
313 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_3x2_2x0)
317 auto A = make_shared<op::Parameter>(element::f32, shape_a);
319 auto B = make_shared<op::Parameter>(element::f32, shape_b);
321 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
323 auto backend = runtime::Backend::create("${BACKEND_NAME}");
325 // Create some tensors for input/output
326 auto a = backend->create_tensor(element::f32, shape_a);
327 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6});
328 auto b = backend->create_tensor(element::f32, shape_b);
329 copy_data(b, vector<float>{});
330 auto result = backend->create_tensor(element::f32, shape_r);
332 auto handle = backend->compile(f);
333 handle->call_with_validate({result}, {a, b});
334 EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(result)));
337 NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_0x2)
340 auto A = make_shared<op::Parameter>(element::f32, shape_a);
342 auto B = make_shared<op::Parameter>(element::f32, shape_b);
344 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
346 auto backend = runtime::Backend::create("${BACKEND_NAME}");
348 // Create some tensors for input/output
349 auto a = backend->create_tensor(element::f32, shape_a);
350 copy_data(a, vector<float>{1});
351 auto b = backend->create_tensor(element::f32, shape_b);
352 copy_data(b, vector<float>{});
353 auto result = backend->create_tensor(element::f32, shape_r);
355 auto handle = backend->compile(f);
356 handle->call_with_validate({result}, {a, b});
357 EXPECT_TRUE(test::all_close_f((vector<float>{}), read_vector<float>(result)));
360 NGRAPH_TEST(${BACKEND_NAME}, dot_2x0_0)
363 auto A = make_shared<op::Parameter>(element::f32, shape_a);
365 auto B = make_shared<op::Parameter>(element::f32, shape_b);
367 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
369 auto backend = runtime::Backend::create("${BACKEND_NAME}");
371 // Create some tensors for input/output
372 auto a = backend->create_tensor(element::f32, shape_a);
373 copy_data(a, vector<float>{});
374 auto b = backend->create_tensor(element::f32, shape_b);
375 copy_data(b, vector<float>{});
376 auto result = backend->create_tensor(element::f32, shape_r);
378 // Overwrite the initial result vector to make sure we're not just coincidentally getting the
380 copy_data(result, vector<float>{2112, 2112});
382 auto handle = backend->compile(f);
383 handle->call_with_validate({result}, {a, b});
384 EXPECT_TRUE(test::all_close_f((vector<float>{0, 0}), read_vector<float>(result)));
387 NGRAPH_TEST(${BACKEND_NAME}, dot1d)
390 auto A = make_shared<op::Parameter>(element::f32, shape);
391 auto B = make_shared<op::Parameter>(element::f32, shape);
393 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
395 auto backend = runtime::Backend::create("${BACKEND_NAME}");
397 // Create some tensors for input/output
398 auto a = backend->create_tensor(element::f32, shape);
399 copy_data(a, vector<float>{2, 4, 8, 16});
400 auto b = backend->create_tensor(element::f32, shape);
401 copy_data(b, vector<float>{1, 2, 4, 8});
402 auto result = backend->create_tensor(element::f32, shape_r);
404 auto handle = backend->compile(f);
405 handle->call_with_validate({result}, {a, b});
406 EXPECT_TRUE(test::all_close_f((vector<float>{170}), read_vector<float>(result)));
409 NGRAPH_TEST(${BACKEND_NAME}, dot2d)
412 auto A = make_shared<op::Parameter>(element::f32, shape);
413 auto B = make_shared<op::Parameter>(element::f32, shape);
415 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
417 auto backend = runtime::Backend::create("${BACKEND_NAME}");
419 // Create some tensors for input/output
420 auto a = backend->create_tensor(element::f32, shape);
421 copy_data(a, vector<float>{1, 2, 3, 4});
422 auto b = backend->create_tensor(element::f32, shape);
423 copy_data(b, vector<float>{5, 6, 7, 8});
424 auto result = backend->create_tensor(element::f32, shape_r);
426 auto handle = backend->compile(f);
427 handle->call_with_validate({result}, {a, b});
428 EXPECT_TRUE(test::all_close_f((vector<float>{19, 22, 43, 50}), read_vector<float>(result)));
431 NGRAPH_TEST(${BACKEND_NAME}, dot2d_non_square)
433 Shape shape_in1{2, 3};
434 Shape shape_in2{3, 3};
435 Shape shape_out{2, 3};
436 auto A = make_shared<op::Parameter>(element::f32, shape_in1);
437 auto B = make_shared<op::Parameter>(element::f32, shape_in2);
438 auto dot = make_shared<op::Dot>(A, B);
439 auto f = make_shared<Function>(dot, ParameterVector{A, B});
441 auto backend = runtime::Backend::create("${BACKEND_NAME}");
443 // Create some tensors for input/output
444 shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape_in1);
445 shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape_in2);
446 shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape_out);
448 copy_data(a, vector<float>{1.f, 2.f, 3.f, 4.f, 5.f, 6.f});
449 copy_data(b, vector<float>{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f});
451 auto handle = backend->compile(f);
452 handle->call_with_validate({result}, {a, b});
453 EXPECT_TRUE(test::all_close_f(read_vector<float>(result),
454 vector<float>{30.f, 36.f, 42.f, 66.f, 81.f, 96.f}));
458 // Here is what numpy does:
460 // >>> a = linspace(1,2*2*2,2*2*2)
461 // >>> b = linspace(1,2*2*2,2*2*2)
463 // >>> a.shape=(2,2,2)
464 // >>> b.shape=(2,2,2)
466 // >>> tensordot(a,b,axes=([2],[0]))
467 // array([[[[ 11., 14.],
480 NGRAPH_TEST(${BACKEND_NAME}, dot3d_3d)
482 Shape shape{2, 2, 2};
483 auto A = make_shared<op::Parameter>(element::f32, shape);
484 auto B = make_shared<op::Parameter>(element::f32, shape);
485 Shape shape_r{2, 2, 2, 2};
486 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
488 auto backend = runtime::Backend::create("${BACKEND_NAME}");
490 // Create some tensors for input/output
491 auto a = backend->create_tensor(element::f32, shape);
492 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
493 auto b = backend->create_tensor(element::f32, shape);
494 copy_data(b, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
495 auto result = backend->create_tensor(element::f32, shape_r);
497 auto handle = backend->compile(f);
498 handle->call_with_validate({result}, {a, b});
499 EXPECT_TRUE(test::all_close_f(
500 (vector<float>{11, 14, 17, 20, 23, 30, 37, 44, 35, 46, 57, 68, 47, 62, 77, 92}),
501 read_vector<float>(result)));
505 // Here is what numpy does:
507 // >>> from numpy import *
508 // >>> a = linspace(0,4*2*3-1,4*2*3)
509 // >>> b = linspace(0,3*4-1,3*4)
511 // >>> a.shape=(4,2,3)
514 // >>> tensordot(a,b,axes=([2],[0]))
515 // array([[[ 20., 23., 26., 29.],
516 // [ 56., 68., 80., 92.]],
518 // [[ 92., 113., 134., 155.],
519 // [ 128., 158., 188., 218.]],
521 // [[ 164., 203., 242., 281.],
522 // [ 200., 248., 296., 344.]],
524 // [[ 236., 293., 350., 407.],
525 // [ 272., 338., 404., 470.]]])
527 NGRAPH_TEST(${BACKEND_NAME}, dot3d_2d)
529 Shape shape_a{4, 2, 3};
530 auto A = make_shared<op::Parameter>(element::f32, shape_a);
532 auto B = make_shared<op::Parameter>(element::f32, shape_b);
533 Shape shape_r{4, 2, 4};
534 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
536 auto backend = runtime::Backend::create("${BACKEND_NAME}");
538 // Create some tensors for input/output
539 auto a = backend->create_tensor(element::f32, shape_a);
540 copy_data(a, vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
541 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
542 auto b = backend->create_tensor(element::f32, shape_b);
543 copy_data(b, vector<float>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
544 auto result = backend->create_tensor(element::f32, shape_r);
546 auto handle = backend->compile(f);
547 handle->call_with_validate({result}, {a, b});
549 test::all_close_f((vector<float>{20, 23, 26, 29, 56, 68, 80, 92, 92, 113, 134,
550 155, 128, 158, 188, 218, 164, 203, 242, 281, 200, 248,
551 296, 344, 236, 293, 350, 407, 272, 338, 404, 470}),
552 read_vector<float>(result)));
555 NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_tensor_arg0)
558 Shape shape_b{2, 2, 2};
559 auto A = make_shared<op::Parameter>(element::f32, shape_a);
560 auto B = make_shared<op::Parameter>(element::f32, shape_b);
561 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
563 auto backend = runtime::Backend::create("${BACKEND_NAME}");
565 // Create some tensors for input/output
566 auto a = backend->create_tensor(element::f32, shape_a);
567 copy_data(a, vector<float>{6});
568 auto b = backend->create_tensor(element::f32, shape_b);
569 copy_data(b, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
570 auto result = backend->create_tensor(element::f32, shape_b);
572 auto handle = backend->compile(f);
573 handle->call_with_validate({result}, {a, b});
574 EXPECT_TRUE(test::all_close_f((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}),
575 read_vector<float>(result)));
578 NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_tensor_arg1)
580 Shape shape_a{2, 2, 2};
582 auto A = make_shared<op::Parameter>(element::f32, shape_a);
583 auto B = make_shared<op::Parameter>(element::f32, shape_b);
584 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
586 auto backend = runtime::Backend::create("${BACKEND_NAME}");
588 // Create some tensors for input/output
589 auto a = backend->create_tensor(element::f32, shape_a);
590 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
591 auto b = backend->create_tensor(element::f32, shape_b);
592 copy_data(b, vector<float>{6});
593 auto result = backend->create_tensor(element::f32, shape_a);
595 auto handle = backend->compile(f);
596 handle->call_with_validate({result}, {a, b});
597 EXPECT_TRUE(test::all_close_f((vector<float>{6, 12, 18, 24, 30, 36, 42, 48}),
598 read_vector<float>(result)));
601 NGRAPH_TEST(${BACKEND_NAME}, dot_scalar_scalar)
604 auto A = make_shared<op::Parameter>(element::f32, shape);
605 auto B = make_shared<op::Parameter>(element::f32, shape);
606 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
608 auto backend = runtime::Backend::create("${BACKEND_NAME}");
610 // Create some tensors for input/output
611 auto a = backend->create_tensor(element::f32, shape);
612 copy_data(a, vector<float>{8});
613 auto b = backend->create_tensor(element::f32, shape);
614 copy_data(b, vector<float>{6});
615 auto result = backend->create_tensor(element::f32, shape);
617 auto handle = backend->compile(f);
618 handle->call_with_validate({result}, {a, b});
619 EXPECT_TRUE(test::all_close_f((vector<float>{48}), read_vector<float>(result)));
622 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector_4_3)
626 auto A = make_shared<op::Parameter>(element::f32, shape_a);
627 auto B = make_shared<op::Parameter>(element::f32, shape_b);
628 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
631 auto backend = runtime::Backend::create("${BACKEND_NAME}");
633 // Create some tensors for input/output
634 auto a = backend->create_tensor(element::f32, shape_a);
635 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
636 auto b = backend->create_tensor(element::f32, shape_b);
637 copy_data(b, vector<float>{17, 18, 19});
638 auto result = backend->create_tensor(element::f32, shape_r);
640 auto handle = backend->compile(f);
641 handle->call_with_validate({result}, {a, b});
642 EXPECT_TRUE(test::all_close_f((vector<float>{110, 272, 434, 596}), read_vector<float>(result)));
645 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector)
649 auto A = make_shared<op::Parameter>(element::f32, shape_a);
650 auto B = make_shared<op::Parameter>(element::f32, shape_b);
651 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
654 auto backend = runtime::Backend::create("${BACKEND_NAME}");
656 // Create some tensors for input/output
657 auto a = backend->create_tensor(element::f32, shape_a);
658 copy_data(a, vector<float>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
659 auto b = backend->create_tensor(element::f32, shape_b);
660 copy_data(b, vector<float>{17, 18, 19, 20});
661 auto result = backend->create_tensor(element::f32, shape_r);
663 auto handle = backend->compile(f);
664 handle->call_with_validate({result}, {a, b});
666 test::all_close_f((vector<float>{190, 486, 782, 1078}), read_vector<float>(result)));
669 NGRAPH_TEST(${BACKEND_NAME}, dot_matrix_vector_int64)
673 auto A = make_shared<op::Parameter>(element::i64, shape_a);
674 auto B = make_shared<op::Parameter>(element::i64, shape_b);
675 auto f = make_shared<Function>(make_shared<op::Dot>(A, B), ParameterVector{A, B});
678 auto backend = runtime::Backend::create("${BACKEND_NAME}");
680 // Create some tensors for input/output
681 auto a = backend->create_tensor(element::i64, shape_a);
682 copy_data(a, vector<int64_t>{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
683 auto b = backend->create_tensor(element::i64, shape_b);
684 copy_data(b, vector<int64_t>{17, 18, 19, 20});
685 auto result = backend->create_tensor(element::i64, shape_r);
687 auto handle = backend->compile(f);
688 handle->call_with_validate({result}, {a, b});
689 EXPECT_EQ((vector<int64_t>{190, 486, 782, 1078}), read_vector<int64_t>(result));
695 // > from numpy import *
696 // > x = linspace(1,2*3*4,2*3*4)
697 // > y = linspace(1,3*4*5,3*4*5)
700 // > z = tensordot(x,y,([1,2],[0,1]))
703 // array([ 2938., 3016., 3094., 3172., 3250., 7042., 7264., 7486.,
706 NGRAPH_TEST(${BACKEND_NAME}, dot_3d_multi_axis)
708 vector<float> a_data(2 * 3 * 4);
709 for (int i = 0; i < 2 * 3 * 4; i++)
711 a_data[i] = float(i + 1);
714 vector<float> b_data(3 * 4 * 5);
715 for (int i = 0; i < 3 * 4 * 5; i++)
717 b_data[i] = float(i + 1);
720 Shape shape_a{2, 3, 4};
721 auto A = make_shared<op::Parameter>(element::f32, shape_a);
722 Shape shape_b{3, 4, 5};
723 auto B = make_shared<op::Parameter>(element::f32, shape_b);
726 auto r = make_shared<op::Dot>(A, B, 2);
727 auto f = make_shared<Function>(r, ParameterVector{A, B});
729 auto backend = runtime::Backend::create("${BACKEND_NAME}");
731 // Create some tensors for input/output
732 auto a = backend->create_tensor(element::f32, shape_a);
733 copy_data(a, a_data);
734 auto b = backend->create_tensor(element::f32, shape_b);
735 copy_data(b, b_data);
737 auto result = backend->create_tensor(element::f32, shape_r);
739 auto handle = backend->compile(f);
740 handle->call_with_validate({result}, {a, b});
741 EXPECT_TRUE(test::all_close_f(
742 (vector<float>{2938., 3016., 3094., 3172., 3250., 7042., 7264., 7486., 7708., 7930.}),
743 read_vector<float>(result)));
749 // > from numpy import *
750 // > x = array([6,61,2,3,5,21,75,23,23,0,23,2,35,67,1,2,9,16,2,3,6,1,8,0])
751 // > y = array([9,1,4,6,3,5,1,36,7,3,5,0,1,20,35,2,1,0,1,25,3,6,7,8])
754 // > z = tensordot(x,y,([2],[0]))
755 // > z.shape = 2*4*4*2
757 // array([ 483, 189, 331, 86, 85, 1262, 2155, 354, 83, 18, 58,
758 // 543, 77, 241, 325, 286, 859, 144, 438, 1025, 317, 973,
759 // 1041, 2930, 163, 69, 117, 50, 29, 472, 819, 62, 785,
760 // 236, 476, 235, 175, 1521, 2387, 1402, 97, 29, 69, 412,
761 // 63, 286, 429, 218, 45, 11, 29, 162, 27, 106, 149,
762 // 126, 65, 25, 44, 6, 11, 165, 281, 52])
764 NGRAPH_TEST(${BACKEND_NAME}, dot_3d_one_axis_arbitrary)
766 vector<float> a_data{6, 61, 2, 3, 5, 21, 75, 23, 23, 0, 23, 2,
767 35, 67, 1, 2, 9, 16, 2, 3, 6, 1, 8, 0};
768 vector<float> b_data{9, 1, 4, 6, 3, 5, 1, 36, 7, 3, 5, 0,
769 1, 20, 35, 2, 1, 0, 1, 25, 3, 6, 7, 8};
771 Shape shape_a{2, 4, 3};
772 auto A = make_shared<op::Parameter>(element::f32, shape_a);
773 Shape shape_b{3, 4, 2};
774 auto B = make_shared<op::Parameter>(element::f32, shape_b);
775 Shape shape_r{2, 4, 4, 2};
777 auto r = make_shared<op::Dot>(A, B);
778 auto f = make_shared<Function>(r, ParameterVector{A, B});
780 auto backend = runtime::Backend::create("${BACKEND_NAME}");
782 // Create some tensors for input/output
783 auto a = backend->create_tensor(element::f32, shape_a);
784 copy_data(a, a_data);
785 auto b = backend->create_tensor(element::f32, shape_b);
786 copy_data(b, b_data);
788 auto result = backend->create_tensor(element::f32, shape_r);
790 auto handle = backend->compile(f);
791 handle->call_with_validate({result}, {a, b});
792 EXPECT_TRUE(test::all_close_f(
793 (vector<float>{483, 189, 331, 86, 85, 1262, 2155, 354, 83, 18, 58, 543, 77,
794 241, 325, 286, 859, 144, 438, 1025, 317, 973, 1041, 2930, 163, 69,
795 117, 50, 29, 472, 819, 62, 785, 236, 476, 235, 175, 1521, 2387,
796 1402, 97, 29, 69, 412, 63, 286, 429, 218, 45, 11, 29, 162,
797 27, 106, 149, 126, 65, 25, 44, 6, 11, 165, 281, 52}),
798 read_vector<float>(result)));