1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
5 * @file unittest_nntrainer_tensor.cpp
7 * @brief Unit test utility for tensor.
8 * @see https://github.com/nnstreamer/nntrainer
9 * @author Jijoong Moon <jijoong.moon@samsung.com>
12 #include <gtest/gtest.h>
14 #include "nntrainer_test_util.h"
15 #include "util_func.h"
17 #include <nntrainer_error.h>
19 #include <tensor_dim.h>
22 TEST(nntrainer_Tensor, Tensor_01_fp16_p) {
23 int status = ML_ERROR_NONE;
24 nntrainer::Tensor tensor = nntrainer::Tensor(
25 1, 2, 3, nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
27 ASSERT_NE(nullptr, tensor.getData<__fp16>());
28 if (tensor.getValue(0, 0, 0, 0) != 0.0)
29 status = ML_ERROR_INVALID_PARAMETER;
30 EXPECT_EQ(status, ML_ERROR_NONE);
33 TEST(nntrainer_Tensor, Tensor_01_nhwc_fp16_p) {
34 int status = ML_ERROR_NONE;
35 nntrainer::Tensor tensor = nntrainer::Tensor(
36 1, 2, 3, nntrainer::Tformat::NHWC, nntrainer::DataType::FP16);
38 ASSERT_NE(nullptr, tensor.getData<__fp16>());
39 if (tensor.getValue<__fp16>(0, 0, 0, 0) != 0.0)
40 status = ML_ERROR_INVALID_PARAMETER;
41 EXPECT_EQ(status, ML_ERROR_NONE);
44 TEST(nntrainer_Tensor, Tensor_02_fp16_p) {
45 int status = ML_ERROR_NONE;
48 std::vector<std::vector<__fp16>> in;
49 for (int i = 0; i < height; ++i) {
50 std::vector<__fp16> tv;
51 for (int j = 0; j < width; ++j) {
52 tv.push_back(i * 2.0 + j);
57 nntrainer::Tensor tensor = nntrainer::Tensor(in);
58 ASSERT_NE(nullptr, tensor.getData<__fp16>());
60 if (tensor.getValue<__fp16>(0, 0, 0, 1) != 1.0)
61 status = ML_ERROR_INVALID_PARAMETER;
62 EXPECT_EQ(status, ML_ERROR_NONE);
65 TEST(nntrainer_Tensor, Tensor_03_fp16_p) {
66 int status = ML_ERROR_NONE;
70 std::vector<std::vector<std::vector<__fp16>>> in;
71 for (int k = 0; k < batch; ++k) {
72 std::vector<std::vector<__fp16>> ttv;
73 for (int i = 0; i < height; ++i) {
74 std::vector<__fp16> tv;
75 for (int j = 0; j < width; ++j) {
76 tv.push_back(k * height * width + i * width + j);
83 nntrainer::Tensor tensor = nntrainer::Tensor(in);
84 ASSERT_NE(nullptr, tensor.getData<__fp16>());
86 if (tensor.getValue<__fp16>(0, 0, 0, 1) != 1.0)
87 status = ML_ERROR_INVALID_PARAMETER;
88 EXPECT_EQ(status, ML_ERROR_NONE);
91 TEST(nntrainer_Tensor, multiply_i_01_fp16_p) {
92 int status = ML_ERROR_NONE;
98 nntrainer::Tensor input(batch, channel, height, width,
99 nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
100 GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
102 nntrainer::Tensor original;
103 original.copy(input);
105 status = input.multiply_i(2.0);
106 EXPECT_EQ(status, ML_ERROR_NONE);
108 __fp16 *data = original.getData<__fp16>();
109 ASSERT_NE(nullptr, data);
110 __fp16 *indata = input.getData<__fp16>();
111 ASSERT_NE(nullptr, indata);
113 for (int i = 0; i < batch * height * width * channel; ++i) {
114 EXPECT_FLOAT_EQ(data[i] + data[i], indata[i]);
118 TEST(nntrainer_Tensor, multiply_i_02_fp16_p) {
119 int status = ML_ERROR_NONE;
125 nntrainer::Tensor input(batch, channel, height, width,
126 nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
127 GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
129 nntrainer::Tensor original;
130 original.copy(input);
132 status = input.multiply_i(input);
133 EXPECT_EQ(status, ML_ERROR_NONE);
135 __fp16 *data = original.getData<__fp16>();
136 ASSERT_NE(nullptr, data);
137 __fp16 *indata = input.getData<__fp16>();
138 ASSERT_NE(nullptr, indata);
140 for (int i = 0; i < batch * height * width * channel; ++i) {
141 EXPECT_FLOAT_EQ(data[i] * data[i], indata[i]);
145 TEST(nntrainer_Tensor, multiply_i_03_fp16_n) {
146 int status = ML_ERROR_NONE;
152 nntrainer::Tensor input(batch, channel, height, width,
153 nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
154 GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
156 nntrainer::Tensor target2(batch, channel, height - 2, width - 1,
157 nntrainer::Tformat::NCHW,
158 nntrainer::DataType::FP16);
159 status = input.multiply_i(target2);
161 EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
164 // TEST(nntrainer_Tensor, multiply_i_broadcast_01_fp16_p) {
166 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
167 // nntrainer::Tensor t =
168 // ranged(3, 2, 4, 5, nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
169 // nntrainer::Tensor m =
170 // ranged(1, 2, 4, 5, nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
171 // __fp16 answer_data[] = {
172 // 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121,
173 // 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529,
174 // 576, 625, 676, 729, 784, 841, 900, 961, 1024, 1089, 1156, 1225,
175 // 1296, 1369, 1444, 1521, 0, 41, 84, 129, 176, 225, 276, 329,
176 // 384, 441, 500, 561, 624, 689, 756, 825, 896, 969, 1044, 1121,
177 // 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201,
178 // 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 0, 81, 164, 249,
179 // 336, 425, 516, 609, 704, 801, 900, 1001, 1104, 1209, 1316, 1425,
180 // 1536, 1649, 1764, 1881, 2000, 2121, 2244, 2369, 2496, 2625, 2756, 2889,
181 // 3024, 3161, 3300, 3441, 3584, 3729, 3876, 4025, 4176, 4329, 4484, 4641};
182 // nntrainer::Tensor answer(ref_dim, answer_data, nntrainer::DataType::FP16);
183 // int status = t.multiply_i(m);
184 // EXPECT_EQ(status, ML_ERROR_NONE);
185 // EXPECT_EQ(t, answer);
188 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
189 // nntrainer::Tensor t = ranged(3, 2, 4, 5, nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
190 // nntrainer::Tensor m = ranged(3, 1, 4, 5, nntrainer::Tformat::NCHW, nntrainer::DataType::FP16);
191 // __fp16 answer_data[] = {
192 // 0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121,
193 // 144, 169, 196, 225, 256, 289, 324, 361, 0, 21, 44, 69,
194 // 96, 125, 156, 189, 224, 261, 300, 341, 384, 429, 476, 525,
195 // 576, 629, 684, 741, 800, 861, 924, 989, 1056, 1125, 1196, 1269,
196 // 1344, 1421, 1500, 1581, 1664, 1749, 1836, 1925, 2016, 2109, 2204, 2301,
197 // 1200, 1281, 1364, 1449, 1536, 1625, 1716, 1809, 1904, 2001, 2100, 2201,
198 // 2304, 2409, 2516, 2625, 2736, 2849, 2964, 3081, 3200, 3321, 3444, 3569,
199 // 3696, 3825, 3956, 4089, 4224, 4361, 4500, 4641, 4784, 4929, 5076, 5225,
200 // 5376, 5529, 5684, 5841, 4000, 4141, 4284, 4429, 4576, 4725, 4876, 5029,
201 // 5184, 5341, 5500, 5661, 5824, 5989, 6156, 6325, 6496, 6669, 6844, 7021};
202 // nntrainer::Tensor answer(ref_dim, answer_data, nntrainer::DataType::FP16);
203 // int status = t.multiply_i(m);
204 // EXPECT_EQ(status, ML_ERROR_NONE);
205 // EXPECT_EQ(t, answer);
208 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
209 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
210 // nntrainer::Tensor m = ranged(3, 2, 4, 1);
211 // __fp16 answer_data[] = {
212 // 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22,
213 // 24, 26, 28, 45, 48, 51, 54, 57, 80, 84, 88, 92,
214 // 96, 125, 130, 135, 140, 145, 180, 186, 192, 198, 204, 245,
215 // 252, 259, 266, 273, 320, 328, 336, 344, 352, 405, 414, 423,
216 // 432, 441, 500, 510, 520, 530, 540, 605, 616, 627, 638, 649,
217 // 720, 732, 744, 756, 768, 845, 858, 871, 884, 897, 980, 994,
218 // 1008, 1022, 1036, 1125, 1140, 1155, 1170, 1185, 1280, 1296, 1312, 1328,
219 // 1344, 1445, 1462, 1479, 1496, 1513, 1620, 1638, 1656, 1674, 1692, 1805,
220 // 1824, 1843, 1862, 1881, 2000, 2020, 2040, 2060, 2080, 2205, 2226, 2247,
221 // 2268, 2289, 2420, 2442, 2464, 2486, 2508, 2645, 2668, 2691, 2714, 2737};
222 // nntrainer::Tensor answer(ref_dim, answer_data);
223 // int status = t.multiply_i(m);
224 // EXPECT_EQ(status, ML_ERROR_NONE);
225 // EXPECT_EQ(t, answer);
228 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
229 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
230 // nntrainer::Tensor m = ranged(3, 1, 1, 5);
231 // __fp16 answer_data[] = {
232 // 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11,
233 // 24, 39, 56, 0, 16, 34, 54, 76, 0, 21, 44, 69,
234 // 96, 0, 26, 54, 84, 116, 0, 31, 64, 99, 136, 0,
235 // 36, 74, 114, 156, 200, 246, 294, 344, 396, 225, 276, 329,
236 // 384, 441, 250, 306, 364, 424, 486, 275, 336, 399, 464, 531,
237 // 300, 366, 434, 504, 576, 325, 396, 469, 544, 621, 350, 426,
238 // 504, 584, 666, 375, 456, 539, 624, 711, 800, 891, 984, 1079,
239 // 1176, 850, 946, 1044, 1144, 1246, 900, 1001, 1104, 1209, 1316, 950,
240 // 1056, 1164, 1274, 1386, 1000, 1111, 1224, 1339, 1456, 1050, 1166, 1284,
241 // 1404, 1526, 1100, 1221, 1344, 1469, 1596, 1150, 1276, 1404, 1534, 1666};
242 // nntrainer::Tensor answer(ref_dim, answer_data);
243 // int status = t.multiply_i(m);
244 // EXPECT_EQ(status, ML_ERROR_NONE);
245 // EXPECT_EQ(t, answer);
248 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
249 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
250 // nntrainer::Tensor m = ranged(1, 2, 1, 5);
251 // __fp16 answer_data[] = {
252 // 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39,
253 // 56, 0, 16, 34, 54, 76, 100, 126, 154, 184, 216, 125, 156, 189,
254 // 224, 261, 150, 186, 224, 264, 306, 175, 216, 259, 304, 351, 0, 41,
255 // 84, 129, 176, 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0,
256 // 56, 114, 174, 236, 300, 366, 434, 504, 576, 325, 396, 469, 544, 621,
257 // 350, 426, 504, 584, 666, 375, 456, 539, 624, 711, 0, 81, 164, 249,
258 // 336, 0, 86, 174, 264, 356, 0, 91, 184, 279, 376, 0, 96, 194,
259 // 294, 396, 500, 606, 714, 824, 936, 525, 636, 749, 864, 981, 550, 666,
260 // 784, 904, 1026, 575, 696, 819, 944, 1071};
261 // nntrainer::Tensor answer(ref_dim, answer_data);
262 // int status = t.multiply_i(m);
263 // EXPECT_EQ(status, ML_ERROR_NONE);
264 // EXPECT_EQ(t, answer);
267 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
268 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
269 // nntrainer::Tensor m = ranged(3, 1, 4, 1);
270 // __fp16 answer_data[] = {
271 // 0, 0, 0, 0, 0, 5, 6, 7, 8, 9, 20, 22,
272 // 24, 26, 28, 45, 48, 51, 54, 57, 0, 0, 0, 0,
273 // 0, 25, 26, 27, 28, 29, 60, 62, 64, 66, 68, 105,
274 // 108, 111, 114, 117, 160, 164, 168, 172, 176, 225, 230, 235,
275 // 240, 245, 300, 306, 312, 318, 324, 385, 392, 399, 406, 413,
276 // 240, 244, 248, 252, 256, 325, 330, 335, 340, 345, 420, 426,
277 // 432, 438, 444, 525, 532, 539, 546, 553, 640, 648, 656, 664,
278 // 672, 765, 774, 783, 792, 801, 900, 910, 920, 930, 940, 1045,
279 // 1056, 1067, 1078, 1089, 800, 808, 816, 824, 832, 945, 954, 963,
280 // 972, 981, 1100, 1110, 1120, 1130, 1140, 1265, 1276, 1287, 1298, 1309};
281 // nntrainer::Tensor answer(ref_dim, answer_data);
282 // int status = t.multiply_i(m);
283 // EXPECT_EQ(status, ML_ERROR_NONE);
284 // EXPECT_EQ(t, answer);
287 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
288 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
289 // nntrainer::Tensor m = ranged(1, 1, 1, 5);
290 // __fp16 answer_data[] = {
291 // 0, 1, 4, 9, 16, 0, 6, 14, 24, 36, 0, 11, 24, 39, 56,
292 // 0, 16, 34, 54, 76, 0, 21, 44, 69, 96, 0, 26, 54, 84, 116,
293 // 0, 31, 64, 99, 136, 0, 36, 74, 114, 156, 0, 41, 84, 129, 176,
294 // 0, 46, 94, 144, 196, 0, 51, 104, 159, 216, 0, 56, 114, 174, 236,
295 // 0, 61, 124, 189, 256, 0, 66, 134, 204, 276, 0, 71, 144, 219, 296,
296 // 0, 76, 154, 234, 316, 0, 81, 164, 249, 336, 0, 86, 174, 264, 356,
297 // 0, 91, 184, 279, 376, 0, 96, 194, 294, 396, 0, 101, 204, 309, 416,
298 // 0, 106, 214, 324, 436, 0, 111, 224, 339, 456, 0, 116, 234, 354, 476};
299 // nntrainer::Tensor answer(ref_dim, answer_data);
300 // int status = t.multiply_i(m);
301 // EXPECT_EQ(status, ML_ERROR_NONE);
302 // EXPECT_EQ(t, answer);
305 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
306 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
307 // nntrainer::Tensor m = ranged(1, 2, 1, 1);
308 // __fp16 answer_data[] = {
309 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
310 // 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27,
311 // 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0,
312 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
313 // 0, 0, 0, 0, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
314 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0,
315 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
316 // 0, 0, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
317 // 112, 113, 114, 115, 116, 117, 118, 119};
318 // nntrainer::Tensor answer(ref_dim, answer_data);
319 // int status = t.multiply_i(m);
320 // EXPECT_EQ(status, ML_ERROR_NONE);
321 // EXPECT_EQ(t, answer);
324 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
325 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
326 // nntrainer::Tensor m = ranged(3, 1, 1, 1);
327 // __fp16 answer_data[] = {
328 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
329 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
330 // 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 41,
331 // 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
332 // 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
333 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 160, 162, 164, 166,
334 // 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194,
335 // 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222,
336 // 224, 226, 228, 230, 232, 234, 236, 238};
337 // nntrainer::Tensor answer(ref_dim, answer_data);
338 // int status = t.multiply_i(m);
339 // EXPECT_EQ(status, ML_ERROR_NONE);
340 // EXPECT_EQ(t, answer);
343 // nntrainer::TensorDim ref_dim(3, 5, 1, 4);
344 // nntrainer::Tensor t = ranged(3, 5, 1, 4);
345 // nntrainer::Tensor m = ranged(3, 1, 1, 4);
346 // __fp16 answer_data[] = {0, 1, 4, 9, 0, 5, 12, 21, 0, 9,
347 // 20, 33, 0, 13, 28, 45, 0, 17, 36, 57,
348 // 80, 105, 132, 161, 96, 125, 156, 189, 112, 145,
349 // 180, 217, 128, 165, 204, 245, 144, 185, 228, 273,
350 // 320, 369, 420, 473, 352, 405, 460, 517, 384, 441,
351 // 500, 561, 416, 477, 540, 605, 448, 513, 580, 649};
352 // nntrainer::Tensor answer(ref_dim, answer_data);
353 // int status = t.multiply_i(m);
354 // EXPECT_EQ(status, ML_ERROR_NONE);
355 // EXPECT_EQ(t, answer);
359 // TEST(nntrainer_Tensor, multiply_i_broadcast_not_supported_01_n) {
360 // nntrainer::Tensor target(3, 1, 3, 1);
361 // nntrainer::Tensor target2(3, 1, 3, 3);
363 // EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER);
366 // TEST(nntrainer_Tensor, multiply_i_broadcast_not_broadcastable_02_n) {
367 // nntrainer::Tensor target(3, 2, 4, 5);
368 // nntrainer::Tensor target2(3, 2, 3, 1);
370 // EXPECT_EQ(target.multiply_i(target2), ML_ERROR_INVALID_PARAMETER);
373 // TEST(nntrainer_Tensor, multiply_01_p) {
374 // int status = ML_ERROR_NONE;
380 // nntrainer::Tensor input(batch, channel, height, width);
381 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
383 // nntrainer::Tensor result = input.multiply(0.0);
384 // if (result.getValue(0, 0, 1, 1) != 0.0)
385 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
386 // EXPECT_EQ(status, ML_ERROR_NONE);
389 // TEST(nntrainer_Tensor, multiply_02_p) {
390 // int status = ML_ERROR_NONE;
396 // nntrainer::Tensor input(batch, channel, height, width);
397 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
399 // nntrainer::Tensor result = input.multiply(input);
401 // __fp16 *data = result.getData();
402 // ASSERT_NE(nullptr, data);
403 // __fp16 *indata = input.getData();
404 // ASSERT_NE(nullptr, indata);
406 // for (int i = 0; i < batch * height * width; ++i) {
407 // if (data[i] != indata[i] * indata[i]) {
408 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
413 // EXPECT_EQ(status, ML_ERROR_NONE);
416 // TEST(nntrainer_Tensor, multiply_03_n) {
422 // nntrainer::Tensor input(batch, channel, height, width);
423 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
425 // nntrainer::Tensor test(batch - 1, height - 1, width - 1);
427 // EXPECT_THROW({ input.multiply(test); }, std::invalid_argument);
430 // TEST(nntrainer_Tensor, multiply_04_n) {
436 // nntrainer::TensorDim dim(batch, channel, height, width);
438 // nntrainer::Tensor input(batch, channel, height, 2 * width);
439 // nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
440 // nntrainer::Tensor test(dim);
442 // EXPECT_THROW(shared_input.multiply(test), std::invalid_argument);
445 // TEST(nntrainer_Tensor, multiply_05_n) {
451 // nntrainer::TensorDim dim(batch, channel, height, width);
453 // nntrainer::Tensor input(dim);
454 // nntrainer::Tensor test(batch, channel, height, 2 * width);
455 // nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
457 // EXPECT_THROW(input.multiply(shared_test), std::invalid_argument);
460 // TEST(nntrainer_Tensor, multiply_06_n) {
466 // nntrainer::TensorDim dim(batch, channel, height, width);
468 // nntrainer::Tensor input(dim, false);
469 // nntrainer::Tensor test(dim);
470 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
472 // EXPECT_THROW(input.multiply(test), std::invalid_argument);
475 // TEST(nntrainer_Tensor, multiply_07_n) {
481 // nntrainer::TensorDim dim(batch, channel, height, width);
483 // nntrainer::Tensor input(dim);
484 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
485 // nntrainer::Tensor test(dim, false);
487 // EXPECT_THROW(input.multiply(test), std::invalid_argument);
490 // TEST(nntrainer_Tensor, multiply_08_n) {
496 // nntrainer::TensorDim dim(batch, channel, height, width);
498 // nntrainer::Tensor input(dim);
499 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
500 // nntrainer::Tensor test(dim);
501 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2);
502 // nntrainer::Tensor output(dim, false);
504 // EXPECT_THROW(input.multiply(test, output), std::invalid_argument);
507 // TEST(nntrainer_Tensor, multiply___fp16_01_p) {
513 // nntrainer::Tensor input(batch, channel, height, width);
514 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
516 // nntrainer::Tensor expected(batch, channel, height, width);
517 // GEN_TEST_INPUT(expected, (i * (batch * height) + j * (width) + k + 1) * 2);
519 // nntrainer::Tensor result = input.multiply(2.0);
521 // EXPECT_EQ(result, expected);
524 // TEST(nntrainer_Tensor, divide_i_01_p) {
525 // int status = ML_ERROR_NONE;
531 // nntrainer::Tensor input(batch, channel, height, width);
532 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
534 // nntrainer::Tensor original;
535 // original.copy(input);
537 // status = input.divide_i((__fp16)2.0);
538 // EXPECT_EQ(status, ML_ERROR_NONE);
540 // __fp16 *data = original.getData();
541 // ASSERT_NE(nullptr, data);
542 // __fp16 *indata = input.getData();
543 // ASSERT_NE(nullptr, indata);
545 // for (int i = 0; i < batch * height * width * channel; ++i) {
546 // EXPECT_FLOAT_EQ(data[i], indata[i] + indata[i]);
550 // TEST(nntrainer_Tensor, divide_i_02_p) {
551 // int status = ML_ERROR_NONE;
557 // nntrainer::Tensor input(batch, channel, height, width);
558 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
560 // status = input.divide_i(input);
561 // EXPECT_EQ(status, ML_ERROR_NONE);
562 // __fp16 *indata = input.getData();
563 // ASSERT_NE(nullptr, indata);
565 // for (int i = 0; i < batch * height * width * channel; ++i) {
566 // EXPECT_FLOAT_EQ(indata[i], __fp16(1.0));
570 // TEST(nntrainer_Tensor, divide_i_01_n) {
571 // int status = ML_ERROR_NONE;
577 // nntrainer::Tensor input(batch, channel, height, width);
578 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
580 // status = input.divide_i((__fp16)0);
581 // EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
584 // TEST(nntrainer_Tensor, divide_i_02_n) {
585 // int status = ML_ERROR_NONE;
591 // nntrainer::Tensor input(batch, channel, height, width);
592 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
594 // nntrainer::Tensor original(batch, channel, height - 2, width - 1);
596 // status = input.divide_i(original);
597 // EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
600 // TEST(nntrainer_Tensor, divide_01_p) {
606 // nntrainer::Tensor input(batch, channel, height, width);
607 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
609 // nntrainer::Tensor result = input.divide(1.0);
611 // __fp16 *previous = input.getData();
612 // ASSERT_NE(nullptr, previous);
613 // __fp16 *data = result.getData();
614 // ASSERT_NE(nullptr, data);
616 // for (int i = 0; i < batch * height * width * channel; ++i) {
617 // EXPECT_FLOAT_EQ(data[i], previous[i]);
621 // TEST(nntrainer_Tensor, divide_02_n) {
627 // nntrainer::Tensor input(batch, channel, height, width);
628 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
630 // EXPECT_THROW({ input.divide(0.0); }, std::invalid_argument);
633 // TEST(nntrainer_Tensor, divide_03_n) {
639 // nntrainer::Tensor input(batch, channel, height, width);
640 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
642 // nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
644 // EXPECT_THROW({ input.divide(test); }, std::invalid_argument);
647 // TEST(nntrainer_Tensor, divide_04_n) {
653 // nntrainer::TensorDim dim(batch, channel, height, width);
655 // nntrainer::Tensor input(batch, channel, height, 2 * width);
656 // nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
657 // nntrainer::Tensor test(dim);
659 // EXPECT_THROW(shared_input.divide(test), std::invalid_argument);
662 // TEST(nntrainer_Tensor, divide_05_n) {
668 // nntrainer::TensorDim dim(batch, channel, height, width);
670 // nntrainer::Tensor input(dim);
671 // nntrainer::Tensor test(batch, channel, height, 2 * width);
672 // nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
674 // EXPECT_THROW(input.divide(shared_test), std::invalid_argument);
677 // TEST(nntrainer_Tensor, divide_06_n) {
683 // nntrainer::TensorDim dim(batch, channel, height, width);
685 // nntrainer::Tensor input(dim, false);
686 // nntrainer::Tensor test(dim);
687 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
689 // EXPECT_THROW(input.divide(test), std::invalid_argument);
692 // TEST(nntrainer_Tensor, divide_07_n) {
698 // nntrainer::TensorDim dim(batch, channel, height, width);
700 // nntrainer::Tensor input(dim);
701 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
702 // nntrainer::Tensor test(dim, false);
704 // EXPECT_THROW(input.divide(test), std::invalid_argument);
707 // TEST(nntrainer_Tensor, divide_08_n) {
713 // nntrainer::TensorDim dim(batch, channel, height, width);
715 // nntrainer::Tensor input(dim);
716 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
717 // nntrainer::Tensor test(dim);
718 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2);
719 // nntrainer::Tensor output(dim, false);
721 // EXPECT_THROW(input.divide(test, output), std::invalid_argument);
724 // TEST(nntrainer_Tensor, divide_i_broadcast_01_p) {
726 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
727 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
729 // nntrainer::Tensor m = ranged(1, 2, 4, 5);
731 // __fp16 answer_data[] = {
732 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
733 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
734 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
735 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
736 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
737 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
738 // 1.0, 1.0, 1.0, 1.0, 41.0, 21.0,
739 // 14.333333, 11.0, 9.0, 7.6666665, 6.714286, 6.0,
740 // 5.4444447, 5.0, 4.6363635, 4.3333335, 4.076923, 3.857143,
741 // 3.6666667, 3.5, 3.3529413, 3.2222223, 3.1052632, 3.0,
742 // 2.9047618, 2.8181818, 2.7391305, 2.6666667, 2.6, 2.5384614,
743 // 2.4814816, 2.4285715, 2.3793104, 2.3333333, 2.2903225, 2.25,
744 // 2.2121212, 2.1764705, 2.142857, 2.1111112, 2.0810812, 2.0526316,
745 // 2.025641, 2.0, 81.0, 41.0, 27.666666, 21.0,
746 // 17.0, 14.333333, 12.428572, 11.0, 9.888889, 9.0,
747 // 8.272727, 7.6666665, 7.1538463, 6.714286, 6.3333335, 6.0,
748 // 5.7058825, 5.4444447, 5.2105265, 5.0, 4.8095236, 4.6363635,
749 // 4.478261, 4.3333335, 4.2, 4.076923, 3.9629629, 3.857143,
750 // 3.7586207, 3.6666667, 3.580645, 3.5, 3.4242425, 3.3529413,
751 // 3.2857144, 3.2222223, 3.162162, 3.1052632, 3.0512822, 3.0};
752 // nntrainer::Tensor answer(ref_dim, answer_data);
753 // int status = t.divide_i(m);
754 // EXPECT_EQ(status, ML_ERROR_NONE);
755 // EXPECT_EQ(t, answer);
758 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
759 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
761 // nntrainer::Tensor m = ranged(3, 1, 4, 5);
763 // __fp16 answer_data[] = {
764 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
765 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
766 // 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
767 // 1.0, 1.0, 21.0, 11.0, 7.6666665, 6.0,
768 // 5.0, 4.3333335, 3.857143, 3.5, 3.2222223, 3.0,
769 // 2.8181818, 2.6666667, 2.5384614, 2.4285715, 2.3333333, 2.25,
770 // 2.1764705, 2.1111112, 2.0526316, 2.0, 1.9523809, 1.9090909,
771 // 1.8695652, 1.8333334, 1.8, 1.7692307, 1.7407408, 1.7142857,
772 // 1.6896552, 1.6666666, 1.6451613, 1.625, 1.6060606, 1.5882353,
773 // 1.5714285, 1.5555556, 1.5405406, 1.5263158, 1.5128205, 1.5,
774 // 2.9047618, 2.8181818, 2.7391305, 2.6666667, 2.6, 2.5384614,
775 // 2.4814816, 2.4285715, 2.3793104, 2.3333333, 2.2903225, 2.25,
776 // 2.2121212, 2.1764705, 2.142857, 2.1111112, 2.0810812, 2.0526316,
777 // 2.025641, 2.0, 1.9756098, 1.9523809, 1.9302325, 1.9090909,
778 // 1.8888888, 1.8695652, 1.8510638, 1.8333334, 1.8163265, 1.8,
779 // 1.7843137, 1.7692307, 1.754717, 1.7407408, 1.7272727, 1.7142857,
780 // 1.7017543, 1.6896552, 1.6779661, 1.6666666, 2.4634147, 2.4285715,
781 // 2.3953488, 2.3636363, 2.3333333, 2.3043478, 2.2765958, 2.25,
782 // 2.2244897, 2.2, 2.1764705, 2.1538463, 2.1320755, 2.1111112,
783 // 2.090909, 2.0714285, 2.0526316, 2.0344827, 2.0169492, 2.0};
784 // nntrainer::Tensor answer(ref_dim, answer_data);
785 // int status = t.divide_i(m);
786 // EXPECT_EQ(status, ML_ERROR_NONE);
787 // EXPECT_EQ(t, answer);
790 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
791 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
793 // nntrainer::Tensor m = ranged(3, 2, 4, 1);
795 // __fp16 answer_data[] = {
796 // 1.0, 2.0, 3.0, 4.0, 5.0, 3.0,
797 // 3.5, 4.0, 4.5, 5.0, 3.6666667, 4.0,
798 // 4.3333335, 4.6666665, 5.0, 4.0, 4.25, 4.5,
799 // 4.75, 5.0, 4.2, 4.4, 4.6, 4.8,
800 // 5.0, 4.3333335, 4.5, 4.6666665, 4.8333335, 5.0,
801 // 4.428571, 4.571429, 4.714286, 4.857143, 5.0, 4.5,
802 // 4.625, 4.75, 4.875, 5.0, 4.5555553, 4.6666665,
803 // 4.7777777, 4.888889, 5.0, 4.6, 4.7, 4.8,
804 // 4.9, 5.0, 4.6363635, 4.7272725, 4.818182, 4.909091,
805 // 5.0, 4.6666665, 4.75, 4.8333335, 4.9166665, 5.0,
806 // 4.6923075, 4.769231, 4.8461537, 4.923077, 5.0, 4.714286,
807 // 4.785714, 4.857143, 4.928571, 5.0, 4.733333, 4.8,
808 // 4.866667, 4.9333334, 5.0, 4.75, 4.8125, 4.875,
809 // 4.9375, 5.0, 4.7647057, 4.8235292, 4.882353, 4.9411764,
810 // 5.0, 4.7777777, 4.8333335, 4.888889, 4.9444447, 5.0,
811 // 4.7894735, 4.8421054, 4.894737, 4.9473686, 5.0, 4.8,
812 // 4.85, 4.9, 4.95, 5.0, 4.8095236, 4.857143,
813 // 4.904762, 4.952381, 5.0, 4.818182, 4.8636365, 4.909091,
814 // 4.9545455, 5.0, 4.826087, 4.869565, 4.9130435, 4.9565215,
815 // 5.0, 4.8333335, 4.875, 4.9166665, 4.9583335, 5.0};
816 // nntrainer::Tensor answer(ref_dim, answer_data);
817 // int status = t.divide_i(m);
818 // EXPECT_EQ(status, ML_ERROR_NONE);
819 // EXPECT_EQ(t, answer);
822 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
823 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
825 // nntrainer::Tensor m = ranged(3, 1, 1, 5);
827 // __fp16 answer_data[] = {
828 // 1.0, 1.0, 1.0, 1.0, 1.0, 6.0,
829 // 3.5, 2.6666667, 2.25, 2.0, 11.0, 6.0,
830 // 4.3333335, 3.5, 3.0, 16.0, 8.5, 6.0,
831 // 4.75, 4.0, 21.0, 11.0, 7.6666665, 6.0,
832 // 5.0, 26.0, 13.5, 9.333333, 7.25, 6.0,
833 // 31.0, 16.0, 11.0, 8.5, 7.0, 36.0,
834 // 18.5, 12.666667, 9.75, 8.0, 6.8333335, 6.0,
835 // 5.375, 4.888889, 4.5, 7.6666665, 6.714286, 6.0,
836 // 5.4444447, 5.0, 8.5, 7.428571, 6.625, 6.0,
837 // 5.5, 9.333333, 8.142858, 7.25, 6.5555553, 6.0,
838 // 10.166667, 8.857142, 7.875, 7.111111, 6.5, 11.0,
839 // 9.571428, 8.5, 7.6666665, 7.0, 11.833333, 10.285714,
840 // 9.125, 8.222222, 7.5, 12.666667, 11.0, 9.75,
841 // 8.777778, 8.0, 7.3636365, 6.8333335, 6.3846154, 6.0,
842 // 5.6666665, 7.818182, 7.25, 6.769231, 6.357143, 6.0,
843 // 8.272727, 7.6666665, 7.1538463, 6.714286, 6.3333335, 8.727273,
844 // 8.083333, 7.5384617, 7.071429, 6.6666665, 9.181818, 8.5,
845 // 7.923077, 7.428571, 7.0, 9.636364, 8.916667, 8.307693,
846 // 7.785714, 7.3333335, 10.090909, 9.333333, 8.692307, 8.142858,
847 // 7.6666665, 10.545455, 9.75, 9.076923, 8.5, 8.0};
848 // nntrainer::Tensor answer(ref_dim, answer_data);
849 // int status = t.divide_i(m);
850 // EXPECT_EQ(status, ML_ERROR_NONE);
851 // EXPECT_EQ(t, answer);
854 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
855 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
857 // nntrainer::Tensor m = ranged(1, 2, 1, 5);
859 // __fp16 answer_data[] = {
860 // 1.0, 1.0, 1.0, 1.0, 1.0, 6.0,
861 // 3.5, 2.6666667, 2.25, 2.0, 11.0, 6.0,
862 // 4.3333335, 3.5, 3.0, 16.0, 8.5, 6.0,
863 // 4.75, 4.0, 3.5, 3.142857, 2.875, 2.6666667,
864 // 2.5, 4.3333335, 3.857143, 3.5, 3.2222223, 3.0,
865 // 5.1666665, 4.571429, 4.125, 3.7777777, 3.5, 6.0,
866 // 5.285714, 4.75, 4.3333335, 4.0, 41.0, 21.0,
867 // 14.333333, 11.0, 9.0, 46.0, 23.5, 16.0,
868 // 12.25, 10.0, 51.0, 26.0, 17.666666, 13.5,
869 // 11.0, 56.0, 28.5, 19.333334, 14.75, 12.0,
870 // 10.166667, 8.857142, 7.875, 7.111111, 6.5, 11.0,
871 // 9.571428, 8.5, 7.6666665, 7.0, 11.833333, 10.285714,
872 // 9.125, 8.222222, 7.5, 12.666667, 11.0, 9.75,
873 // 8.777778, 8.0, 81.0, 41.0, 27.666666, 21.0,
874 // 17.0, 86.0, 43.5, 29.333334, 22.25, 18.0,
875 // 91.0, 46.0, 31.0, 23.5, 19.0, 96.0,
876 // 48.5, 32.666668, 24.75, 20.0, 16.833334, 14.571428,
877 // 12.875, 11.555555, 10.5, 17.666666, 15.285714, 13.5,
878 // 12.111111, 11.0, 18.5, 16.0, 14.125, 12.666667,
879 // 11.5, 19.333334, 16.714285, 14.75, 13.222222, 12.0};
880 // nntrainer::Tensor answer(ref_dim, answer_data);
881 // int status = t.divide_i(m);
882 // EXPECT_EQ(status, ML_ERROR_NONE);
883 // EXPECT_EQ(t, answer);
886 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
887 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
889 // nntrainer::Tensor m = ranged(3, 1, 4, 1);
891 // __fp16 answer_data[] = {
892 // 1.0, 2.0, 3.0, 4.0, 5.0, 3.0,
893 // 3.5, 4.0, 4.5, 5.0, 3.6666667, 4.0,
894 // 4.3333335, 4.6666665, 5.0, 4.0, 4.25, 4.5,
895 // 4.75, 5.0, 21.0, 22.0, 23.0, 24.0,
896 // 25.0, 13.0, 13.5, 14.0, 14.5, 15.0,
897 // 10.333333, 10.666667, 11.0, 11.333333, 11.666667, 9.0,
898 // 9.25, 9.5, 9.75, 10.0, 8.2, 8.4,
899 // 8.6, 8.8, 9.0, 7.6666665, 7.8333335, 8.0,
900 // 8.166667, 8.333333, 7.285714, 7.428571, 7.571429, 7.714286,
901 // 7.857143, 7.0, 7.125, 7.25, 7.375, 7.5,
902 // 12.2, 12.4, 12.6, 12.8, 13.0, 11.0,
903 // 11.166667, 11.333333, 11.5, 11.666667, 10.142858, 10.285714,
904 // 10.428572, 10.571428, 10.714286, 9.5, 9.625, 9.75,
905 // 9.875, 10.0, 9.0, 9.111111, 9.222222, 9.333333,
906 // 9.444445, 8.6, 8.7, 8.8, 8.9, 9.0,
907 // 8.272727, 8.363636, 8.454545, 8.545455, 8.636364, 8.0,
908 // 8.083333, 8.166667, 8.25, 8.333333, 11.222222, 11.333333,
909 // 11.444445, 11.555555, 11.666667, 10.6, 10.7, 10.8,
910 // 10.9, 11.0, 10.090909, 10.181818, 10.272727, 10.363636,
911 // 10.454545, 9.666667, 9.75, 9.833333, 9.916667, 10.0};
912 // nntrainer::Tensor answer(ref_dim, answer_data);
913 // int status = t.divide_i(m);
914 // EXPECT_EQ(status, ML_ERROR_NONE);
915 // EXPECT_EQ(t, answer);
918 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
919 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
921 // nntrainer::Tensor m = ranged(1, 1, 1, 5);
923 // __fp16 answer_data[] = {
924 // 1.0, 1.0, 1.0, 1.0, 1.0, 6.0, 3.5, 2.6666667, 2.25, 2.0,
925 // 11.0, 6.0, 4.3333335, 3.5, 3.0, 16.0, 8.5, 6.0, 4.75, 4.0,
926 // 21.0, 11.0, 7.6666665, 6.0, 5.0, 26.0, 13.5, 9.333333, 7.25, 6.0,
927 // 31.0, 16.0, 11.0, 8.5, 7.0, 36.0, 18.5, 12.666667, 9.75, 8.0,
928 // 41.0, 21.0, 14.333333, 11.0, 9.0, 46.0, 23.5, 16.0, 12.25, 10.0,
929 // 51.0, 26.0, 17.666666, 13.5, 11.0, 56.0, 28.5, 19.333334, 14.75, 12.0,
930 // 61.0, 31.0, 21.0, 16.0, 13.0, 66.0, 33.5, 22.666666, 17.25, 14.0,
931 // 71.0, 36.0, 24.333334, 18.5, 15.0, 76.0, 38.5, 26.0, 19.75, 16.0,
932 // 81.0, 41.0, 27.666666, 21.0, 17.0, 86.0, 43.5, 29.333334, 22.25, 18.0,
933 // 91.0, 46.0, 31.0, 23.5, 19.0, 96.0, 48.5, 32.666668, 24.75, 20.0,
934 // 101.0, 51.0, 34.333332, 26.0, 21.0, 106.0, 53.5, 36.0, 27.25, 22.0,
935 // 111.0, 56.0, 37.666668, 28.5, 23.0, 116.0, 58.5, 39.333332, 29.75, 24.0};
936 // nntrainer::Tensor answer(ref_dim, answer_data);
937 // int status = t.divide_i(m);
938 // EXPECT_EQ(status, ML_ERROR_NONE);
939 // EXPECT_EQ(t, answer);
942 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
943 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
945 // nntrainer::Tensor m = ranged(1, 2, 1, 1);
947 // __fp16 answer_data[] = {
948 // 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
949 // 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 10.5, 11.0, 11.5, 12.0,
950 // 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0,
951 // 18.5, 19.0, 19.5, 20.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0,
952 // 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0,
953 // 30.5, 31.0, 31.5, 32.0, 32.5, 33.0, 33.5, 34.0, 34.5, 35.0, 35.5, 36.0,
954 // 36.5, 37.0, 37.5, 38.0, 38.5, 39.0, 39.5, 40.0, 81.0, 82.0, 83.0, 84.0,
955 // 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0,
956 // 97.0, 98.0, 99.0, 100.0, 50.5, 51.0, 51.5, 52.0, 52.5, 53.0, 53.5, 54.0,
957 // 54.5, 55.0, 55.5, 56.0, 56.5, 57.0, 57.5, 58.0, 58.5, 59.0, 59.5, 60.0};
958 // nntrainer::Tensor answer(ref_dim, answer_data);
959 // int status = t.divide_i(m);
960 // EXPECT_EQ(status, ML_ERROR_NONE);
961 // EXPECT_EQ(t, answer);
964 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
965 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
967 // nntrainer::Tensor m = ranged(3, 1, 1, 1);
969 // __fp16 answer_data[] = {
970 // 1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
971 // 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
972 // 13.0, 14.0, 15.0, 16.0, 17.0, 18.0,
973 // 19.0, 20.0, 21.0, 22.0, 23.0, 24.0,
974 // 25.0, 26.0, 27.0, 28.0, 29.0, 30.0,
975 // 31.0, 32.0, 33.0, 34.0, 35.0, 36.0,
976 // 37.0, 38.0, 39.0, 40.0, 20.5, 21.0,
977 // 21.5, 22.0, 22.5, 23.0, 23.5, 24.0,
978 // 24.5, 25.0, 25.5, 26.0, 26.5, 27.0,
979 // 27.5, 28.0, 28.5, 29.0, 29.5, 30.0,
980 // 30.5, 31.0, 31.5, 32.0, 32.5, 33.0,
981 // 33.5, 34.0, 34.5, 35.0, 35.5, 36.0,
982 // 36.5, 37.0, 37.5, 38.0, 38.5, 39.0,
983 // 39.5, 40.0, 27.0, 27.333334, 27.666666, 28.0,
984 // 28.333334, 28.666666, 29.0, 29.333334, 29.666666, 30.0,
985 // 30.333334, 30.666666, 31.0, 31.333334, 31.666666, 32.0,
986 // 32.333332, 32.666668, 33.0, 33.333332, 33.666668, 34.0,
987 // 34.333332, 34.666668, 35.0, 35.333332, 35.666668, 36.0,
988 // 36.333332, 36.666668, 37.0, 37.333332, 37.666668, 38.0,
989 // 38.333332, 38.666668, 39.0, 39.333332, 39.666668, 40.0};
990 // nntrainer::Tensor answer(ref_dim, answer_data);
991 // int status = t.divide_i(m);
992 // EXPECT_EQ(status, ML_ERROR_NONE);
993 // EXPECT_EQ(t, answer);
996 // nntrainer::TensorDim ref_dim(3, 5, 1, 4);
997 // nntrainer::Tensor t = ranged(3, 5, 1, 4);
999 // nntrainer::Tensor m = ranged(3, 1, 1, 4);
1001 // __fp16 answer_data[] = {
1002 // 1.0, 1.0, 1.0, 1.0, 5.0, 3.0,
1003 // 2.3333333, 2.0, 9.0, 5.0, 3.6666667, 3.0,
1004 // 13.0, 7.0, 5.0, 4.0, 17.0, 9.0,
1005 // 6.3333335, 5.0, 4.2, 3.6666667, 3.2857144, 3.0,
1006 // 5.0, 4.3333335, 3.857143, 3.5, 5.8, 5.0,
1007 // 4.428571, 4.0, 6.6, 5.6666665, 5.0, 4.5,
1008 // 7.4, 6.3333335, 5.571429, 5.0, 4.5555553, 4.2,
1009 // 3.909091, 3.6666667, 5.0, 4.6, 4.2727275, 4.0,
1010 // 5.4444447, 5.0, 4.6363635, 4.3333335, 5.888889, 5.4,
1011 // 5.0, 4.6666665, 6.3333335, 5.8, 5.3636365, 5.0};
1012 // nntrainer::Tensor answer(ref_dim, answer_data);
1013 // int status = t.divide_i(m);
1014 // EXPECT_EQ(status, ML_ERROR_NONE);
1015 // EXPECT_EQ(t, answer);
1019 // TEST(nntrainer_Tensor, divide_i_broadcast_not_supported_01_n) {
1020 // nntrainer::Tensor target(3, 1, 3, 1);
1021 // nntrainer::Tensor target2(3, 1, 3, 3);
1023 // EXPECT_EQ(target.divide_i(target2), ML_ERROR_INVALID_PARAMETER);
1026 // TEST(nntrainer_Tensor, divide_i_broadcast_not_broadcastable_02_n) {
1027 // nntrainer::Tensor target(3, 2, 4, 5);
1028 // nntrainer::Tensor target2(3, 2, 3, 1);
1030 // EXPECT_EQ(target.divide_i(target2), ML_ERROR_INVALID_PARAMETER);
1033 // TEST(nntrainer_Tensor, add_i_01_p) {
1034 // int status = ML_ERROR_NONE;
1040 // nntrainer::Tensor target(batch, channel, height, width);
1041 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 + channel);
1043 // nntrainer::Tensor original(batch, channel, height, width);
1044 // original.copy(target);
1046 // status = target.add_i(2.1);
1047 // EXPECT_EQ(status, ML_ERROR_NONE);
1049 // __fp16 *previous = original.getData();
1050 // ASSERT_NE(nullptr, previous);
1051 // __fp16 *data = target.getData();
1052 // ASSERT_NE(nullptr, data);
1054 // for (int i = 0; i < batch * height * width; ++i) {
1055 // EXPECT_FLOAT_EQ(data[i], previous[i] + (__fp16)2.1);
1059 // TEST(nntrainer_Tensor, add_i_02_p) {
1060 // int status = ML_ERROR_NONE;
1066 // nntrainer::Tensor target(batch, channel, height, width);
1067 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
1069 // nntrainer::Tensor original(batch, height, width);
1070 // original.copy(target);
1072 // status = target.add_i(target, 3.0);
1073 // EXPECT_EQ(status, ML_ERROR_NONE);
1075 // __fp16 *previous = original.getData();
1076 // ASSERT_NE(nullptr, previous);
1077 // __fp16 *data = target.getData();
1078 // ASSERT_NE(nullptr, data);
1080 // for (int i = 0; i < batch * height * width; ++i) {
1081 // EXPECT_FLOAT_EQ(data[i], previous[i] * 4.0);
1086 // * @brief operand dimension is not right
1088 // TEST(nntrainer_Tensor, add_i_01_n) {
1089 // int status = ML_ERROR_NONE;
1095 // nntrainer::Tensor target(batch, channel, height, width);
1096 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
1098 // nntrainer::Tensor target2(batch, height - 2, width - 3);
1100 // status = target.add_i(target2);
1101 // EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
1104 // TEST(nntrainer_Tensor, add_i_broadcast_01_p) {
1105 // nntrainer::TensorDim ref_dim{3, 2, 4, 5};
1107 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1108 // nntrainer::Tensor m = ranged(1, 2, 4, 5);
1109 // __fp16 answer_data[] = {
1110 // 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26,
1111 // 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
1112 // 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 40, 42,
1113 // 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70,
1114 // 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98,
1115 // 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 80, 82, 84, 86,
1116 // 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114,
1117 // 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142,
1118 // 144, 146, 148, 150, 152, 154, 156, 158};
1119 // nntrainer::Tensor answer(ref_dim, answer_data);
1120 // int status = t.add_i(m);
1121 // EXPECT_EQ(status, ML_ERROR_NONE);
1122 // EXPECT_EQ(t, answer);
1125 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1126 // nntrainer::Tensor m = ranged(3, 1, 4, 5);
1127 // __fp16 answer_data[] = {
1128 // 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26,
1129 // 28, 30, 32, 34, 36, 38, 20, 22, 24, 26, 28, 30, 32, 34,
1130 // 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
1131 // 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90,
1132 // 92, 94, 96, 98, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98,
1133 // 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126,
1134 // 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154,
1135 // 156, 158, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162,
1136 // 164, 166, 168, 170, 172, 174, 176, 178};
1137 // nntrainer::Tensor answer(ref_dim, answer_data);
1138 // int status = t.add_i(m);
1139 // EXPECT_EQ(status, ML_ERROR_NONE);
1140 // EXPECT_EQ(t, answer);
1143 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1144 // nntrainer::Tensor m = ranged(3, 2, 4, 1);
1145 // __fp16 answer_data[] = {
1146 // 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15,
1147 // 16, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 30, 31, 32,
1148 // 33, 34, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 48, 49,
1149 // 50, 51, 52, 54, 55, 56, 57, 58, 60, 61, 62, 63, 64, 66,
1150 // 67, 68, 69, 70, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82,
1151 // 84, 85, 86, 87, 88, 90, 91, 92, 93, 94, 96, 97, 98, 99,
1152 // 100, 102, 103, 104, 105, 106, 108, 109, 110, 111, 112, 114, 115, 116,
1153 // 117, 118, 120, 121, 122, 123, 124, 126, 127, 128, 129, 130, 132, 133,
1154 // 134, 135, 136, 138, 139, 140, 141, 142};
1155 // nntrainer::Tensor answer(ref_dim, answer_data);
1156 // int status = t.add_i(m);
1157 // EXPECT_EQ(status, ML_ERROR_NONE);
1158 // EXPECT_EQ(t, answer);
1161 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1162 // nntrainer::Tensor m = ranged(3, 1, 1, 5);
1163 // __fp16 answer_data[] = {
1164 // 0, 2, 4, 6, 8, 5, 7, 9, 11, 13, 10, 12, 14, 16,
1165 // 18, 15, 17, 19, 21, 23, 20, 22, 24, 26, 28, 25, 27, 29,
1166 // 31, 33, 30, 32, 34, 36, 38, 35, 37, 39, 41, 43, 45, 47,
1167 // 49, 51, 53, 50, 52, 54, 56, 58, 55, 57, 59, 61, 63, 60,
1168 // 62, 64, 66, 68, 65, 67, 69, 71, 73, 70, 72, 74, 76, 78,
1169 // 75, 77, 79, 81, 83, 80, 82, 84, 86, 88, 90, 92, 94, 96,
1170 // 98, 95, 97, 99, 101, 103, 100, 102, 104, 106, 108, 105, 107, 109,
1171 // 111, 113, 110, 112, 114, 116, 118, 115, 117, 119, 121, 123, 120, 122,
1172 // 124, 126, 128, 125, 127, 129, 131, 133};
1173 // nntrainer::Tensor answer(ref_dim, answer_data);
1174 // int status = t.add_i(m);
1175 // EXPECT_EQ(status, ML_ERROR_NONE);
1176 // EXPECT_EQ(t, answer);
1179 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1180 // nntrainer::Tensor m = ranged(1, 2, 1, 5);
1181 // __fp16 answer_data[] = {
1182 // 0, 2, 4, 6, 8, 5, 7, 9, 11, 13, 10, 12, 14, 16,
1183 // 18, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 30, 32, 34,
1184 // 36, 38, 35, 37, 39, 41, 43, 40, 42, 44, 46, 48, 40, 42,
1185 // 44, 46, 48, 45, 47, 49, 51, 53, 50, 52, 54, 56, 58, 55,
1186 // 57, 59, 61, 63, 65, 67, 69, 71, 73, 70, 72, 74, 76, 78,
1187 // 75, 77, 79, 81, 83, 80, 82, 84, 86, 88, 80, 82, 84, 86,
1188 // 88, 85, 87, 89, 91, 93, 90, 92, 94, 96, 98, 95, 97, 99,
1189 // 101, 103, 105, 107, 109, 111, 113, 110, 112, 114, 116, 118, 115, 117,
1190 // 119, 121, 123, 120, 122, 124, 126, 128};
1191 // nntrainer::Tensor answer(ref_dim, answer_data);
1192 // int status = t.add_i(m);
1193 // EXPECT_EQ(status, ML_ERROR_NONE);
1194 // EXPECT_EQ(t, answer);
1197 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1198 // nntrainer::Tensor m = ranged(3, 1, 4, 1);
1199 // __fp16 answer_data[] = {
1200 // 0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15,
1201 // 16, 18, 19, 20, 21, 22, 20, 21, 22, 23, 24, 26, 27, 28,
1202 // 29, 30, 32, 33, 34, 35, 36, 38, 39, 40, 41, 42, 44, 45,
1203 // 46, 47, 48, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 62,
1204 // 63, 64, 65, 66, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74,
1205 // 76, 77, 78, 79, 80, 82, 83, 84, 85, 86, 88, 89, 90, 91,
1206 // 92, 94, 95, 96, 97, 98, 100, 101, 102, 103, 104, 106, 107, 108,
1207 // 109, 110, 108, 109, 110, 111, 112, 114, 115, 116, 117, 118, 120, 121,
1208 // 122, 123, 124, 126, 127, 128, 129, 130};
1209 // nntrainer::Tensor answer(ref_dim, answer_data);
1210 // int status = t.add_i(m);
1211 // EXPECT_EQ(status, ML_ERROR_NONE);
1212 // EXPECT_EQ(t, answer);
1215 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1216 // nntrainer::Tensor m = ranged(1, 1, 1, 5);
1217 // __fp16 answer_data[] = {
1218 // 0, 2, 4, 6, 8, 5, 7, 9, 11, 13, 10, 12, 14, 16,
1219 // 18, 15, 17, 19, 21, 23, 20, 22, 24, 26, 28, 25, 27, 29,
1220 // 31, 33, 30, 32, 34, 36, 38, 35, 37, 39, 41, 43, 40, 42,
1221 // 44, 46, 48, 45, 47, 49, 51, 53, 50, 52, 54, 56, 58, 55,
1222 // 57, 59, 61, 63, 60, 62, 64, 66, 68, 65, 67, 69, 71, 73,
1223 // 70, 72, 74, 76, 78, 75, 77, 79, 81, 83, 80, 82, 84, 86,
1224 // 88, 85, 87, 89, 91, 93, 90, 92, 94, 96, 98, 95, 97, 99,
1225 // 101, 103, 100, 102, 104, 106, 108, 105, 107, 109, 111, 113, 110, 112,
1226 // 114, 116, 118, 115, 117, 119, 121, 123};
1227 // nntrainer::Tensor answer(ref_dim, answer_data);
1228 // int status = t.add_i(m);
1229 // EXPECT_EQ(status, ML_ERROR_NONE);
1230 // EXPECT_EQ(t, answer);
1233 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1234 // nntrainer::Tensor m = ranged(1, 2, 1, 1);
1235 // __fp16 answer_data[] = {
1236 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
1237 // 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28,
1238 // 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 40, 41,
1239 // 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
1240 // 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
1241 // 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 80, 81, 82, 83,
1242 // 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
1243 // 98, 99, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
1244 // 113, 114, 115, 116, 117, 118, 119, 120};
1245 // nntrainer::Tensor answer(ref_dim, answer_data);
1246 // int status = t.add_i(m);
1247 // EXPECT_EQ(status, ML_ERROR_NONE);
1248 // EXPECT_EQ(t, answer);
1251 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1252 // nntrainer::Tensor m = ranged(3, 1, 1, 1);
1253 // __fp16 answer_data[] = {
1254 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
1255 // 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
1256 // 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42,
1257 // 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
1258 // 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
1259 // 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 82, 83, 84, 85,
1260 // 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
1261 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
1262 // 114, 115, 116, 117, 118, 119, 120, 121};
1263 // nntrainer::Tensor answer(ref_dim, answer_data);
1264 // int status = t.add_i(m);
1265 // EXPECT_EQ(status, ML_ERROR_NONE);
1266 // EXPECT_EQ(t, answer);
1269 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
1270 // nntrainer::Tensor m = ranged(1, 1, 1, 1);
1272 // __fp16 answer_data[] = {
1273 // 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
1274 // 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
1275 // 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
1276 // 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
1277 // 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
1278 // 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
1279 // 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
1280 // 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
1281 // 113, 114, 115, 116, 117, 118, 119, 120};
1282 // nntrainer::Tensor answer(ref_dim, answer_data);
1283 // int status = t.add_i(m);
1284 // EXPECT_EQ(status, ML_ERROR_NONE);
1285 // EXPECT_EQ(t, answer);
1288 // nntrainer::TensorDim ref_dim(3, 5, 1, 4);
1289 // nntrainer::Tensor t = ranged(3, 5, 1, 4);
1290 // nntrainer::Tensor m = ranged(3, 1, 1, 4);
1291 // __fp16 answer_data[] = {0, 2, 4, 6, 4, 6, 8, 10, 8, 10, 12, 14,
1292 // 12, 14, 16, 18, 16, 18, 20, 22, 24, 26, 28, 30,
1293 // 28, 30, 32, 34, 32, 34, 36, 38, 36, 38, 40, 42,
1294 // 40, 42, 44, 46, 48, 50, 52, 54, 52, 54, 56, 58,
1295 // 56, 58, 60, 62, 60, 62, 64, 66, 64, 66, 68, 70};
1296 // nntrainer::Tensor answer(ref_dim, answer_data);
1297 // int status = t.add_i(m);
1298 // EXPECT_EQ(status, ML_ERROR_NONE);
1299 // EXPECT_EQ(t, answer);
1302 // nntrainer::TensorDim ref_dim(1, 1, 2, 1);
1303 // nntrainer::Tensor t = ranged(1, 1, 2, 1);
1304 // nntrainer::Tensor m = ranged(1, 1, 2, 1);
1305 // __fp16 answer_data[] = {0.0, 2.0};
1306 // nntrainer::Tensor answer(ref_dim, answer_data);
1307 // int status = t.add_i(m);
1308 // EXPECT_EQ(status, ML_ERROR_NONE);
1309 // EXPECT_EQ(t, answer);
1312 // nntrainer::TensorDim ref_dim(16, 1, 1, 1);
1313 // nntrainer::Tensor t = ranged(16, 1, 1, 1);
1314 // nntrainer::Tensor m = ranged(1, 1, 1, 1);
1315 // __fp16 answer_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
1316 // 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
1317 // nntrainer::Tensor answer(ref_dim, answer_data);
1318 // int status = t.add_i(m);
1319 // EXPECT_EQ(status, ML_ERROR_NONE);
1320 // EXPECT_EQ(t, answer);
1324 // TEST(nntrainer_Tensor, add_i_broadcast_not_supported_01_n) {
1325 // nntrainer::Tensor target(3, 1, 3, 1);
1326 // nntrainer::Tensor target2(3, 1, 3, 3);
1328 // EXPECT_EQ(target.add_i(target2), ML_ERROR_INVALID_PARAMETER);
1331 // TEST(nntrainer_Tensor, add_i_broadcast_not_broadcastable_02_n) {
1332 // nntrainer::Tensor target(3, 2, 4, 5);
1333 // nntrainer::Tensor target2(3, 2, 3, 1);
1335 // EXPECT_EQ(target.add_i(target2), ML_ERROR_INVALID_PARAMETER);
1338 // TEST(nntrainer_Tensor, add_01_p) {
1339 // int status = ML_ERROR_NONE;
1345 // nntrainer::Tensor input(batch, channel, height, width);
1346 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1348 // nntrainer::Tensor result = input.add(1.0);
1350 // __fp16 *data = result.getData();
1351 // ASSERT_NE(nullptr, data);
1352 // __fp16 *indata = input.getData();
1353 // ASSERT_NE(nullptr, indata);
1355 // for (int i = 0; i < batch * height * width; ++i) {
1356 // if (data[i] != indata[i] + (__fp16)1.0) {
1357 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
1362 // EXPECT_EQ(status, ML_ERROR_NONE);
1365 // TEST(nntrainer_Tensor, add_02_p) {
1366 // int status = ML_ERROR_NONE;
1372 // nntrainer::Tensor input(batch, channel, height, width);
1373 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1375 // nntrainer::Tensor result = input.add(input);
1377 // __fp16 *data = result.getData();
1378 // ASSERT_NE(nullptr, data);
1379 // __fp16 *indata = input.getData();
1380 // ASSERT_NE(nullptr, indata);
1382 // for (int i = 0; i < batch * height * width; ++i) {
1383 // if (data[i] != indata[i] + indata[i]) {
1384 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
1389 // EXPECT_EQ(status, ML_ERROR_NONE);
1392 // TEST(nntrainer_Tensor, add_03_n) {
1398 // nntrainer::Tensor input(batch, channel, height, width);
1399 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1401 // nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
1403 // EXPECT_THROW({ input.add(test); }, std::invalid_argument);
1406 // TEST(nntrainer_Tensor, add_04_n) {
1412 // nntrainer::TensorDim dim(batch, channel, height, width);
1414 // nntrainer::Tensor input(batch, channel, height, 2 * width);
1415 // nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
1416 // nntrainer::Tensor test(dim);
1418 // EXPECT_THROW(shared_input.add(test), std::invalid_argument);
1421 // TEST(nntrainer_Tensor, add_05_n) {
1427 // nntrainer::TensorDim dim(batch, channel, height, width);
1429 // nntrainer::Tensor input(dim);
1430 // nntrainer::Tensor test(batch, channel, height, 2 * width);
1431 // nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
1433 // EXPECT_THROW(input.add(shared_test), std::invalid_argument);
1436 // TEST(nntrainer_Tensor, add_06_n) {
1442 // nntrainer::TensorDim dim(batch, channel, height, width);
1444 // nntrainer::Tensor input(dim, false);
1445 // nntrainer::Tensor test(dim);
1446 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
1448 // EXPECT_THROW(input.add(test), std::invalid_argument);
1451 // TEST(nntrainer_Tensor, add_07_n) {
1457 // nntrainer::TensorDim dim(batch, channel, height, width);
1459 // nntrainer::Tensor input(dim);
1460 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1461 // nntrainer::Tensor test(dim, false);
1463 // EXPECT_THROW(input.add(test), std::invalid_argument);
1466 // TEST(nntrainer_Tensor, add_08_n) {
1472 // nntrainer::TensorDim dim(batch, channel, height, width);
1474 // nntrainer::Tensor input(dim);
1475 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1476 // nntrainer::Tensor test(dim);
1477 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2);
1478 // nntrainer::Tensor output(dim, false);
1480 // EXPECT_THROW(input.add(test, output), std::invalid_argument);
1483 // TEST(nntrainer_Tensor, pow_01_p) {
1485 // nntrainer::Tensor input = constant(4.0, 3, 2, 4, 5);
1487 // nntrainer::Tensor actual, expected;
1489 // actual = input.pow(0.5f);
1490 // expected = constant(2.0, 3, 2, 4, 5);
1491 // EXPECT_EQ(actual, expected);
1493 // actual = input.pow(2.0f);
1494 // expected = constant(16.0, 3, 2, 4, 5);
1495 // EXPECT_EQ(actual, expected);
1497 // actual = input.pow(-0.5f);
1498 // expected = constant(0.5, 3, 2, 4, 5);
1499 // EXPECT_EQ(actual, expected);
1502 // TEST(nntrainer_Tensor, erf_01_p) {
1508 // nntrainer::TensorDim dim(batch, channel, height, width);
1510 // nntrainer::Tensor input(dim);
1511 // GEN_TEST_INPUT(input, k + l * 0.5 + 0.5);
1512 // nntrainer::Tensor actual = input.erf();
1513 // nntrainer::Tensor expected(
1514 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1515 // {{{{0.5205, 0.8427}, {0.966105, 0.995322}}}}));
1517 // EXPECT_EQ(actual, expected);
1520 // TEST(nntrainer_Tensor, subtract_i_01_p) {
1521 // int status = ML_ERROR_NONE;
1527 // nntrainer::Tensor target(batch, channel, height, width);
1528 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 + channel);
1530 // nntrainer::Tensor original(batch, height, width);
1531 // original.copy(target);
1533 // status = target.subtract_i(2.1);
1534 // EXPECT_EQ(status, ML_ERROR_NONE);
1536 // __fp16 *previous = original.getData();
1537 // ASSERT_NE(nullptr, previous);
1538 // __fp16 *data = target.getData();
1539 // ASSERT_NE(nullptr, data);
1541 // for (int i = 0; i < batch * height * width; ++i) {
1542 // EXPECT_FLOAT_EQ(data[i], previous[i] - (__fp16)2.1);
1546 // TEST(nntrainer_Tensor, subtract_i_02_p) {
1547 // int status = ML_ERROR_NONE;
1553 // nntrainer::Tensor target(batch, channel, height, width);
1554 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 + channel);
1556 // status = target.subtract_i(target);
1557 // EXPECT_EQ(status, ML_ERROR_NONE);
1559 // __fp16 *data = target.getData();
1560 // ASSERT_NE(nullptr, data);
1562 // for (int i = 0; i < batch * height * width; ++i) {
1563 // EXPECT_FLOAT_EQ(data[i], 0);
1567 // TEST(nntrainer_Tensor, subtract_i_03_n) {
1568 // int status = ML_ERROR_NONE;
1574 // nntrainer::Tensor target(batch, channel, height, width);
1575 // GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 + channel);
1577 // nntrainer::Tensor target2(batch, channel, height - 1, width - 3);
1579 // status = target.subtract_i(target2);
1580 // EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
1583 // TEST(nntrainer_Tensor, subtract_01_p) {
1584 // int status = ML_ERROR_NONE;
1590 // nntrainer::Tensor input(batch, channel, height, width);
1591 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1593 // nntrainer::Tensor result = input.subtract(1.0);
1595 // __fp16 *data = result.getData();
1596 // ASSERT_NE(nullptr, data);
1597 // __fp16 *indata = input.getData();
1598 // ASSERT_NE(nullptr, indata);
1600 // for (int i = 0; i < batch * height * width; ++i) {
1601 // if (data[i] != indata[i] - 1.0) {
1602 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
1607 // EXPECT_EQ(status, ML_ERROR_NONE);
1610 // TEST(nntrainer_Tensor, subtract_02_p) {
1616 // nntrainer::Tensor input(batch, channel, height, width);
1617 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1619 // nntrainer::Tensor result = input.subtract(input);
1621 // EXPECT_EQ(constant(0.0, batch, channel, height, width), result);
1624 // TEST(nntrainer_Tensor, subtract_03_n) {
1630 // nntrainer::Tensor input(batch, channel, height, width);
1631 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1633 // nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
1635 // EXPECT_THROW({ input.subtract(test); }, std::invalid_argument);
1638 // TEST(nntrainer_Tensor, subtract_04_n) {
1644 // nntrainer::TensorDim dim(batch, channel, height, width);
1646 // nntrainer::Tensor input(batch, channel, height, 2 * width);
1647 // nntrainer::Tensor shared_input = input.getSharedDataTensor(dim, 0, false);
1648 // nntrainer::Tensor test(dim);
1650 // EXPECT_THROW(shared_input.subtract(test), std::invalid_argument);
1653 // TEST(nntrainer_Tensor, subtract_05_n) {
1659 // nntrainer::TensorDim dim(batch, channel, height, width);
1661 // nntrainer::Tensor input(dim);
1662 // nntrainer::Tensor test(batch, channel, height, 2 * width);
1663 // nntrainer::Tensor shared_test = test.getSharedDataTensor(dim, 0, false);
1665 // EXPECT_THROW(input.subtract(shared_test), std::invalid_argument);
1668 // TEST(nntrainer_Tensor, subtract_06_n) {
1674 // nntrainer::TensorDim dim(batch, channel, height, width);
1676 // nntrainer::Tensor input(dim, false);
1677 // nntrainer::Tensor test(dim);
1678 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 1);
1680 // EXPECT_THROW(input.subtract(test), std::invalid_argument);
1683 // TEST(nntrainer_Tensor, subtract_07_n) {
1689 // nntrainer::TensorDim dim(batch, channel, height, width);
1691 // nntrainer::Tensor input(dim);
1692 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1693 // nntrainer::Tensor test(dim, false);
1695 // EXPECT_THROW(input.subtract(test), std::invalid_argument);
1698 // TEST(nntrainer_Tensor, subtract_08_n) {
1704 // nntrainer::TensorDim dim(batch, channel, height, width);
1706 // nntrainer::Tensor input(dim);
1707 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1708 // nntrainer::Tensor test(dim);
1709 // GEN_TEST_INPUT(test, i * (batch * height) + j * (width) + k + 2);
1710 // nntrainer::Tensor output(dim, false);
1712 // EXPECT_THROW(input.subtract(test, output), std::invalid_argument);
1715 // TEST(nntrainer_Tensor, subtract___fp16_01_p) {
1721 // nntrainer::Tensor input(batch, channel, height, width);
1722 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
1724 // nntrainer::Tensor expected(batch, channel, height, width);
1725 // GEN_TEST_INPUT(expected, i * (batch * height) + j * (width) + k);
1727 // nntrainer::Tensor result = input.subtract(1.0);
1729 // EXPECT_EQ(result, expected);
1732 // TEST(nntrainer_Tensor, sum_01_n) {
1738 // nntrainer::Tensor input(batch, channel, height, width);
1739 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
1741 // EXPECT_THROW({ input.sum(4); }, std::out_of_range);
1744 // TEST(nntrainer_Tensor, sum_02_n) {
1750 // nntrainer::Tensor input(batch, channel, height, width);
1751 // GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
1753 // EXPECT_THROW({ input.sum(-1); }, std::out_of_range);
1756 // TEST(nntrainer_Tensor, sum_02_p) {
1762 // nntrainer::Tensor ans0(
1763 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1764 // {{{{39, 42, 45, 48, 51, 54, 57, 60, 63, 66},
1765 // {69, 72, 75, 78, 81, 84, 87, 90, 93, 96}},
1766 // {{57, 60, 63, 66, 69, 72, 75, 78, 81, 84},
1767 // {87, 90, 93, 96, 99, 102, 105, 108, 111, 114}}}}));
1769 // nntrainer::Tensor ans1(
1770 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1771 // {{{{8, 10, 12, 14, 16, 18, 20, 22, 24, 26},
1772 // {28, 30, 32, 34, 36, 38, 40, 42, 44, 46}}},
1773 // {{{32, 34, 36, 38, 40, 42, 44, 46, 48, 50},
1774 // {52, 54, 56, 58, 60, 62, 64, 66, 68, 70}}},
1775 // {{{56, 58, 60, 62, 64, 66, 68, 70, 72, 74},
1776 // {76, 78, 80, 82, 84, 86, 88, 90, 92, 94}}}}));
1778 // nntrainer::Tensor ans2(
1779 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1780 // {{{{12, 14, 16, 18, 20, 22, 24, 26, 28, 30}},
1781 // {{24, 26, 28, 30, 32, 34, 36, 38, 40, 42}}},
1782 // {{{36, 38, 40, 42, 44, 46, 48, 50, 52, 54}},
1783 // {{48, 50, 52, 54, 56, 58, 60, 62, 64, 66}}},
1784 // {{{60, 62, 64, 66, 68, 70, 72, 74, 76, 78}},
1785 // {{72, 74, 76, 78, 80, 82, 84, 86, 88, 90}}}}));
1787 // nntrainer::Tensor ans3(
1788 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1789 // {{{{55}, {155}}, {{115}, {215}}},
1790 // {{{175}, {275}}, {{235}, {335}}},
1791 // {{{295}, {395}}, {{355}, {455}}}}));
1793 // nntrainer::Tensor input(batch, channel, height, width);
1794 // GEN_TEST_INPUT(input, i * (batch * height * channel) + j * (batch * height) +
1795 // k * (width) + l + 1);
1797 // nntrainer::Tensor result0 = input.sum(0);
1798 // nntrainer::Tensor result1 = input.sum(1);
1799 // nntrainer::Tensor result2 = input.sum(2);
1800 // nntrainer::Tensor result3 = input.sum(3);
1802 // EXPECT_EQ(ans0, result0);
1803 // EXPECT_EQ(ans1, result1);
1804 // EXPECT_EQ(ans2, result2);
1805 // EXPECT_EQ(ans3, result3);
1808 // TEST(nntrainer_Tensor, sum_03_p) {
1809 // const int batch = 3;
1810 // const int channel = 2;
1811 // const int height = 1;
1812 // const int width = 10;
1814 // nntrainer::Tensor input(batch, channel, height, width);
1815 // GEN_TEST_INPUT(input, i * (height * channel * width) + j * (height * width) +
1816 // k * (width) + l + 1);
1817 // // Test for alpha == 1 and beta == 0 and dimension of reduced axis == 1
1819 // nntrainer::Tensor ans_0_1_0(
1820 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1821 // {{{{63, 66, 69, 72, 75, 78, 81, 84, 87, 90}},
1822 // {{93, 96, 99, 102, 105, 108, 111, 114, 117, 120}}}}));
1824 // nntrainer::Tensor ans_1_1_0(
1825 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1826 // {{{{12, 14, 16, 18, 20, 22, 24, 26, 28, 30}}},
1827 // {{{52, 54, 56, 58, 60, 62, 64, 66, 68, 70}}},
1828 // {{{92, 94, 96, 98, 100, 102, 104, 106, 108, 110}}}}));
1830 // nntrainer::Tensor ans_2_1_0(
1831 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1832 // {{{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
1833 // {{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}}},
1834 // {{{21, 22, 23, 24, 25, 26, 27, 28, 29, 30}},
1835 // {{31, 32, 33, 34, 35, 36, 37, 38, 39, 40}}},
1836 // {{{41, 42, 43, 44, 45, 46, 47, 48, 49, 50}},
1837 // {{51, 52, 53, 54, 55, 56, 57, 58, 59, 60}}}}));
1839 // nntrainer::Tensor ans_3_1_0(
1840 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1841 // {{{{55}}, {{155}}}, {{{255}}, {{355}}}, {{{455}}, {{555}}}}));
1843 // nntrainer::Tensor result_0_1_0 = input.sum(0, 1);
1844 // nntrainer::Tensor result_1_1_0 = input.sum(1, 1);
1845 // nntrainer::Tensor result_2_1_0 = input.sum(2, 1);
1846 // nntrainer::Tensor result_3_1_0 = input.sum(3, 1);
1848 // EXPECT_EQ(ans_0_1_0, result_0_1_0);
1849 // EXPECT_EQ(ans_1_1_0, result_1_1_0);
1850 // EXPECT_EQ(ans_2_1_0, result_2_1_0);
1851 // EXPECT_EQ(ans_3_1_0, result_3_1_0);
1854 // // Test for alpha == 1 and beta == 2 and dimension of reduced axis == 1
1856 // nntrainer::Tensor ans_0_1_2(
1857 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1858 // {{{{65, 70, 75, 80, 85, 90, 95, 100, 105, 110}},
1859 // {{115, 120, 125, 130, 135, 140, 145, 150, 155, 160}}}}));
1861 // nntrainer::Tensor ans_1_1_2(
1862 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1863 // {{{{14, 18, 22, 26, 30, 34, 38, 42, 46, 50}}},
1864 // {{{74, 78, 82, 86, 90, 94, 98, 102, 106, 110}}},
1865 // {{{134, 138, 142, 146, 150, 154, 158, 162, 166, 170}}}}));
1867 // nntrainer::Tensor ans_2_1_2(
1868 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1869 // {{{{3, 6, 9, 12, 15, 18, 21, 24, 27, 30}},
1870 // {{33, 36, 39, 42, 45, 48, 51, 54, 57, 60}}},
1871 // {{{63, 66, 69, 72, 75, 78, 81, 84, 87, 90}},
1872 // {{93, 96, 99, 102, 105, 108, 111, 114, 117, 120}}},
1873 // {{{123, 126, 129, 132, 135, 138, 141, 144, 147, 150}},
1874 // {{153, 156, 159, 162, 165, 168, 171, 174, 177, 180}}}}));
1876 // nntrainer::Tensor ans_3_1_2(
1877 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1878 // {{{{57}}, {{159}}}, {{{261}}, {{363}}}, {{{465}}, {{567}}}}));
1880 // nntrainer::Tensor output_0_1_2(1, channel, height, width);
1882 // const int batch = 1;
1883 // GEN_TEST_INPUT(output_0_1_2, i * (channel * height * width) +
1884 // j * (height * width) + k * (width) + l +
1887 // nntrainer::Tensor output_1_1_2(batch, 1, height, width);
1889 // const int channel = 1;
1890 // GEN_TEST_INPUT(output_1_1_2, i * (channel * height * width) +
1891 // j * (height * width) + k * (width) + l +
1894 // nntrainer::Tensor output_2_1_2(batch, channel, 1, width);
1896 // const int height = 1;
1897 // GEN_TEST_INPUT(output_2_1_2, i * (channel * height * width) +
1898 // j * (height * width) + k * (width) + l +
1901 // nntrainer::Tensor output_3_1_2(batch, channel, height, 1);
1903 // const int width = 1;
1904 // GEN_TEST_INPUT(output_3_1_2, i * (channel * height * width) +
1905 // j * (height * width) + k * (width) + l +
1908 // nntrainer::Tensor result_0_1_2 = input.sum(0, output_0_1_2, 1, 2);
1909 // nntrainer::Tensor result_1_1_2 = input.sum(1, output_1_1_2, 1, 2);
1910 // nntrainer::Tensor result_2_1_2 = input.sum(2, output_2_1_2, 1, 2);
1911 // nntrainer::Tensor result_3_1_2 = input.sum(3, output_3_1_2, 1, 2);
1913 // EXPECT_EQ(ans_0_1_2, result_0_1_2);
1914 // EXPECT_EQ(ans_1_1_2, result_1_1_2);
1915 // EXPECT_EQ(ans_2_1_2, result_2_1_2);
1916 // EXPECT_EQ(ans_3_1_2, result_3_1_2);
1919 // // Test for alpha == 2 and beta == 0
1921 // nntrainer::Tensor ans_0_2_0(
1922 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1923 // {{{{126, 132, 138, 144, 150, 156, 162, 168, 174, 180}},
1924 // {{186, 192, 198, 204, 210, 216, 222, 228, 234, 240}}}}));
1926 // nntrainer::Tensor ans_1_2_0(
1927 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1928 // {{{{24, 28, 32, 36, 40, 44, 48, 52, 56, 60}}},
1929 // {{{104, 108, 112, 116, 120, 124, 128, 132, 136, 140}}},
1930 // {{{184, 188, 192, 196, 200, 204, 208, 212, 216, 220}}}}));
1932 // nntrainer::Tensor ans_2_2_0(
1933 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1934 // {{{{2, 4, 6, 8, 10, 12, 14, 16, 18, 20}},
1935 // {{22, 24, 26, 28, 30, 32, 34, 36, 38, 40}}},
1936 // {{{42, 44, 46, 48, 50, 52, 54, 56, 58, 60}},
1937 // {{62, 64, 66, 68, 70, 72, 74, 76, 78, 80}}},
1938 // {{{82, 84, 86, 88, 90, 92, 94, 96, 98, 100}},
1939 // {{102, 104, 106, 108, 110, 112, 114, 116, 118, 120}}}}));
1941 // nntrainer::Tensor ans_3_2_0(
1942 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1943 // {{{{110}}, {{310}}}, {{{510}}, {{710}}}, {{{910}}, {{1110}}}}));
1945 // nntrainer::Tensor result_0_2_0 = input.sum(0, 2);
1946 // nntrainer::Tensor result_1_2_0 = input.sum(1, 2);
1947 // nntrainer::Tensor result_2_2_0 = input.sum(2, 2);
1948 // nntrainer::Tensor result_3_2_0 = input.sum(3, 2);
1950 // EXPECT_EQ(ans_0_2_0, result_0_2_0);
1951 // EXPECT_EQ(ans_1_2_0, result_1_2_0);
1952 // EXPECT_EQ(ans_2_2_0, result_2_2_0);
1953 // EXPECT_EQ(ans_3_2_0, result_3_2_0);
1956 // // Test for alpha == 2 and beta == 2
1958 // nntrainer::Tensor ans_0_2_2(
1959 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1960 // {{{{128, 136, 144, 152, 160, 168, 176, 184, 192, 200}},
1961 // {{208, 216, 224, 232, 240, 248, 256, 264, 272, 280}}}}));
1963 // nntrainer::Tensor ans_1_2_2(
1964 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1965 // {{{{26, 32, 38, 44, 50, 56, 62, 68, 74, 80}}},
1966 // {{{126, 132, 138, 144, 150, 156, 162, 168, 174, 180}}},
1967 // {{{226, 232, 238, 244, 250, 256, 262, 268, 274, 280}}}}));
1969 // nntrainer::Tensor ans_2_2_2(
1970 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1971 // {{{{4, 8, 12, 16, 20, 24, 28, 32, 36, 40}},
1972 // {{44, 48, 52, 56, 60, 64, 68, 72, 76, 80}}},
1973 // {{{84, 88, 92, 96, 100, 104, 108, 112, 116, 120}},
1974 // {{124, 128, 132, 136, 140, 144, 148, 152, 156, 160}}},
1975 // {{{164, 168, 172, 176, 180, 184, 188, 192, 196, 200}},
1976 // {{204, 208, 212, 216, 220, 224, 228, 232, 236, 240}}}}));
1978 // nntrainer::Tensor ans_3_2_2(
1979 // std::vector<std::vector<std::vector<std::vector<__fp16>>>>(
1980 // {{{{112}}, {{314}}}, {{{516}}, {{718}}}, {{{920}}, {{1122}}}}));
1982 // nntrainer::Tensor output_0_2_2(1, channel, height, width);
1984 // const int batch = 1;
1985 // GEN_TEST_INPUT(output_0_2_2, i * (channel * height * width) +
1986 // j * (height * width) + k * (width) + l +
1989 // nntrainer::Tensor output_1_2_2(batch, 1, height, width);
1991 // const int channel = 1;
1992 // GEN_TEST_INPUT(output_1_2_2, i * (channel * height * width) +
1993 // j * (height * width) + k * (width) + l +
1996 // nntrainer::Tensor output_2_2_2(batch, channel, 1, width);
1998 // const int height = 1;
1999 // GEN_TEST_INPUT(output_2_2_2, i * (channel * height * width) +
2000 // j * (height * width) + k * (width) + l +
2003 // nntrainer::Tensor output_3_2_2(batch, channel, height, 1);
2005 // const int width = 1;
2006 // GEN_TEST_INPUT(output_3_2_2, i * (channel * height * width) +
2007 // j * (height * width) + k * (width) + l +
2010 // nntrainer::Tensor result_0_2_2 = input.sum(0, output_0_2_2, 2, 2);
2011 // nntrainer::Tensor result_1_2_2 = input.sum(1, output_1_2_2, 2, 2);
2012 // nntrainer::Tensor result_2_2_2 = input.sum(2, output_2_2_2, 2, 2);
2013 // nntrainer::Tensor result_3_2_2 = input.sum(3, output_3_2_2, 2, 2);
2015 // EXPECT_EQ(ans_0_2_2, result_0_2_2);
2016 // EXPECT_EQ(ans_1_2_2, result_1_2_2);
2017 // EXPECT_EQ(ans_2_2_2, result_2_2_2);
2018 // EXPECT_EQ(ans_3_2_2, result_3_2_2);
2022 // TEST(nntrainer_Tensor, sum_04_p) {
2023 // int status = ML_ERROR_NONE;
2029 // nntrainer::Tensor input(batch, channel, height, width);
2030 // GEN_TEST_INPUT(input, i * (batch * height * channel) + j * (height * width) +
2031 // k * width + l + 1);
2033 // nntrainer::Tensor result = input.sum_by_batch();
2034 // if (result.getValue(0, 0, 0, 0) != 820 ||
2035 // result.getValue(1, 0, 0, 0) != 1300 ||
2036 // result.getValue(2, 0, 0, 0) != 1780)
2037 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
2039 // EXPECT_EQ(status, ML_ERROR_NONE);
2042 // TEST(nntrainer_Tensor, multiple_sum_invalid_args_01_n) {
2043 // nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1);
2044 // EXPECT_THROW(t.sum(std::vector<unsigned int>()), std::invalid_argument);
2047 // TEST(nntrainer_Tensor, multiple_sum_out_of_range_n) {
2048 // nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1);
2049 // EXPECT_THROW(t.sum({7}), std::out_of_range);
2052 // TEST(nntrainer_Tensor, multiple_sum_p) {
2053 // nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7);
2054 // nntrainer::Tensor actual, expected;
2056 // actual = t.sum({0, 1});
2057 // expected = constant(2 * 3, 1, 1, 5, 7);
2058 // EXPECT_EQ(actual, expected);
2060 // actual = t.sum({1, 2, 3});
2061 // expected = constant(3 * 5 * 7, 2, 1, 1, 1);
2062 // EXPECT_EQ(actual, expected);
2064 // actual = t.sum({3, 1});
2065 // expected = constant(7 * 3, 2, 1, 5, 1);
2066 // EXPECT_EQ(actual, expected);
2068 // actual = t.sum({3, 1}, 0.5);
2069 // expected = constant(7 * 3 * 0.5, 2, 1, 5, 1);
2070 // EXPECT_EQ(actual, expected);
2073 // TEST(nntrainer_Tensor, average_p) {
2074 // nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7);
2076 // nntrainer::Tensor actual, expected;
2078 // actual = t.average();
2079 // expected = constant(1.0, 1, 1, 1, 1);
2080 // EXPECT_EQ(actual, expected);
2083 // t = t.apply([&](__fp16 in) { return idx++ % 2; });
2085 // actual = t.average();
2086 // expected = constant(0.5, 1, 1, 1, 1);
2087 // EXPECT_EQ(actual, expected);
2090 // TEST(nntrainer_Tensor, average_axis_p) {
2091 // nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2);
2093 // std::function<__fp16(__fp16)> f = [&](__fp16 in) { return idx++ % 2; };
2096 // nntrainer::Tensor actual, expected;
2098 // actual = t.average(0);
2099 // expected = constant(0, 1, 2, 2, 2).apply(f);
2100 // EXPECT_EQ(actual, expected);
2102 // actual = t.average(1);
2103 // expected = constant(0, 2, 1, 2, 2).apply(f);
2104 // EXPECT_EQ(actual, expected);
2106 // actual = t.average(2);
2107 // expected = constant(0, 2, 2, 1, 2).apply(f);
2108 // EXPECT_EQ(actual, expected);
2110 // actual = t.average(3);
2111 // expected = constant(0.5, 2, 2, 2, 1);
2112 // EXPECT_EQ(actual, expected);
2115 // TEST(nntrainer_Tensor, average_axis_out_of_range_01_n) {
2116 // nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2);
2117 // EXPECT_THROW(t.average(-1), std::out_of_range);
2120 // TEST(nntrainer_Tensor, average_axis_out_of_range_02_n) {
2121 // nntrainer::Tensor t = constant(1.0, 2, 2, 2, 2);
2122 // EXPECT_THROW(t.average(7), std::out_of_range);
2125 // TEST(nntrainer_Tensor, average_multiple_axes_p) {
2126 // nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7);
2127 // nntrainer::Tensor actual, expected;
2129 // actual = t.average({0, 1, 2});
2130 // expected = constant(1.0, 1, 1, 1, 7);
2131 // EXPECT_EQ(actual, expected);
2133 // actual = t.average({0, 1, 2, 3});
2134 // expected = constant(1.0, 1, 1, 1, 1);
2135 // EXPECT_EQ(actual, expected);
2137 // actual = t.average({3, 1});
2138 // expected = constant(1.0, 2, 1, 5, 1);
2139 // EXPECT_EQ(actual, expected);
2141 // actual = t.average({3, 1, 1, 1, 3});
2142 // expected = constant(1.0, 2, 1, 5, 1);
2143 // EXPECT_EQ(actual, expected);
2146 // TEST(nntrainer_Tensor, average_multiple_axes_01_n) {
2147 // nntrainer::Tensor t = constant(1.0, 2, 3, 5, 7);
2148 // EXPECT_THROW(t.average({5, 7}), std::out_of_range);
2151 // TEST(nntrainer_Tensor, dot_01_n) {
2152 // nntrainer::Tensor input(2, 3, 4, 5);
2153 // nntrainer::Tensor m(1, 3, 4, 5);
2154 // EXPECT_THROW(nntrainer::Tensor result = input.dot(m), std::runtime_error);
2157 // TEST(nntrainer_Tensor, dot_02_n) {
2158 // nntrainer::Tensor input(2, 3, 4, 5);
2159 // nntrainer::Tensor m(1, 3, 4, 5);
2160 // EXPECT_THROW(nntrainer::Tensor result = input.dot(m, true),
2161 // std::runtime_error);
2164 // TEST(nntrainer_Tensor, dot_02_p) {
2165 // nntrainer::Tensor input(2, 3, 4, 5);
2166 // nntrainer::Tensor m(1, 3, 4, 5);
2167 // EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, false, true));
2170 // TEST(nntrainer_Tensor, dot_03_p) {
2171 // nntrainer::Tensor input(1, 3, 4, 5);
2172 // nntrainer::Tensor m(1, 3, 4, 5);
2173 // EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, true));
2176 // TEST(nntrainer_Tensor, dot_04_n) {
2177 // nntrainer::Tensor input(2, 3, 4, 5);
2178 // nntrainer::Tensor m(1, 1, 4, 5);
2179 // EXPECT_THROW(nntrainer::Tensor result = input.dot(m), std::runtime_error);
2180 // EXPECT_NO_THROW(nntrainer::Tensor result = input.dot(m, false, true));
2183 // TEST(nntrainer_Tensor, dot_05_p) {
2184 // int status = ML_ERROR_NONE;
2189 // __fp16 ans[2][3][4][24] = {0};
2191 // nntrainer::Tensor input(batch, channel, height, width);
2192 // GEN_TEST_INPUT(input, i * (channel * width * height) + j * (height * width) +
2193 // k * (width) + l + 1);
2194 // nntrainer::Tensor weight(batch, channel, height, width);
2195 // GEN_TEST_INPUT(weight, i * (channel * width * height) + j * (height * width) +
2196 // k * (width) + l + 1);
2197 // weight.reshape({1, 1, 24, 5});
2199 // nntrainer::Tensor result = input.dot(weight, false, true);
2201 // for (int b = 0; b < batch; b++) {
2202 // for (int c = 0; c < channel; c++) {
2203 // for (int h = 0; h < height; h++) {
2204 // for (int k = 0; k < batch * channel * height; k++) {
2205 // ans[b][c][h][k] = 0;
2206 // for (int w = 0; w < width; w++) {
2207 // __fp16 val1 = input.getValue(b, c, h, w);
2208 // __fp16 val2 = weight.getValue(0, 0, k, w);
2209 // ans[b][c][h][k] += val1 * val2;
2216 // for (unsigned int i = 0; i < result.batch(); ++i) {
2217 // for (unsigned int c = 0; c < result.channel(); ++c) {
2218 // for (unsigned int j = 0; j < result.height(); ++j) {
2219 // for (unsigned int k = 0; k < result.width(); ++k) {
2220 // __fp16 val1 = ans[i][c][j][k];
2221 // __fp16 val2 = result.getValue(i, c, j, k);
2222 // if (val1 != val2) {
2223 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
2224 // goto end_dot_01_p;
2231 // EXPECT_EQ(status, ML_ERROR_NONE);
2234 // TEST(nntrainer_Tensor, dot_06_p) {
2235 // int status = ML_ERROR_NONE;
2240 // __fp16 ans[3][1][1][3] = {
2241 // {{{30, 36, 42}}}, {{{66, 81, 96}}}, {{{102, 126, 150}}}};
2243 // nntrainer::Tensor input(batch, channel, height, width);
2244 // GEN_TEST_INPUT(input, i * (channel * width * height) + j * (height * width) +
2245 // k * (width) + l + 1);
2247 // nntrainer::Tensor result = input.dot(input);
2249 // for (unsigned int i = 0; i < result.batch(); ++i) {
2250 // for (unsigned int j = 0; j < result.height(); ++j) {
2251 // for (unsigned int k = 0; k < result.width(); ++k) {
2252 // if (ans[i][0][j][k] != result.getValue(i, 0, j, k)) {
2253 // status = ML_ERROR_RESULT_OUT_OF_RANGE;
2254 // goto end_dot_01_p;
2260 // EXPECT_EQ(status, ML_ERROR_NONE);
2263 // TEST(nntrainer_Tensor, dot_transpose_p) {
2265 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2266 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2267 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2268 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2269 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
2270 // 92, 113, 134, 155, 128, 158, 188, 218};
2271 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 4), answer_data);
2272 // nntrainer::Tensor ret = a.dot(b, true, true);
2273 // EXPECT_EQ(ret, answer);
2276 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2277 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2278 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2279 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2280 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
2281 // 92, 113, 134, 155, 128, 158, 188, 218};
2282 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 4), answer_data);
2283 // nntrainer::Tensor ret = a.dot(b, true, false);
2284 // EXPECT_EQ(ret, answer);
2287 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2288 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2289 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2290 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2291 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
2292 // 92, 113, 134, 155, 128, 158, 188, 218};
2293 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 4), answer_data);
2294 // nntrainer::Tensor ret = a.dot(b, false, true);
2295 // EXPECT_EQ(ret, answer);
2298 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2299 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2300 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2301 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2302 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92,
2303 // 92, 113, 134, 155, 128, 158, 188, 218};
2304 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 4), answer_data);
2305 // nntrainer::Tensor ret = a.dot(b, false, false);
2306 // EXPECT_EQ(ret, answer);
2309 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2310 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2311 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2312 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2313 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
2314 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 4), answer_data);
2315 // nntrainer::Tensor ret = a.dot(b, true, true);
2316 // EXPECT_EQ(ret, answer);
2319 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2320 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2321 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2322 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2323 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
2324 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 4), answer_data);
2325 // nntrainer::Tensor ret = a.dot(b, true, false);
2326 // EXPECT_EQ(ret, answer);
2329 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2330 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2331 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2332 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2333 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
2334 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 4), answer_data);
2335 // nntrainer::Tensor ret = a.dot(b, false, true);
2336 // EXPECT_EQ(ret, answer);
2339 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2340 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2341 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2342 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2343 // __fp16 answer_data[] = {20, 23, 26, 29, 56, 68, 80, 92};
2344 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 4), answer_data);
2345 // nntrainer::Tensor ret = a.dot(b, false, false);
2346 // EXPECT_EQ(ret, answer);
2349 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2350 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2351 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2352 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2353 // __fp16 answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
2354 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 2), answer_data);
2355 // nntrainer::Tensor ret = a.dot(b, true, true);
2356 // EXPECT_EQ(ret, answer);
2359 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2360 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2361 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2362 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2363 // __fp16 answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
2364 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 2), answer_data);
2365 // nntrainer::Tensor ret = a.dot(b, true, false);
2366 // EXPECT_EQ(ret, answer);
2369 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2370 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2371 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2372 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2373 // __fp16 answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
2374 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 2), answer_data);
2375 // nntrainer::Tensor ret = a.dot(b, false, true);
2376 // EXPECT_EQ(ret, answer);
2379 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2380 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2381 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2382 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2383 // __fp16 answer_data[] = {10, 13, 28, 40, 46, 67, 64, 94};
2384 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 2), answer_data);
2385 // nntrainer::Tensor ret = a.dot(b, false, false);
2386 // EXPECT_EQ(ret, answer);
2389 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2390 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2391 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2392 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2393 // __fp16 answer_data[] = {10, 13, 28, 40};
2394 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 2), answer_data);
2395 // nntrainer::Tensor ret = a.dot(b, true, true);
2396 // EXPECT_EQ(ret, answer);
2399 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2400 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2401 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2402 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2403 // __fp16 answer_data[] = {10, 13, 28, 40};
2404 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 2), answer_data);
2405 // nntrainer::Tensor ret = a.dot(b, true, false);
2406 // EXPECT_EQ(ret, answer);
2409 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2410 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2411 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2412 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2413 // __fp16 answer_data[] = {10, 13, 28, 40};
2414 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 2), answer_data);
2415 // nntrainer::Tensor ret = a.dot(b, false, true);
2416 // EXPECT_EQ(ret, answer);
2419 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2420 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2421 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2422 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2423 // __fp16 answer_data[] = {10, 13, 28, 40};
2424 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 2), answer_data);
2425 // nntrainer::Tensor ret = a.dot(b, false, false);
2426 // EXPECT_EQ(ret, answer);
2430 // TEST(nntrainer_Tensor, dot_shortcuts_p) {
2432 // __fp16 a_data[] = {0, 1, 2};
2433 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2434 // __fp16 b_data[] = {0, 1, 2};
2435 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2436 // __fp16 answer_data[] = {5};
2437 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1), answer_data);
2438 // nntrainer::Tensor ret = a.dot(b, false, false);
2439 // EXPECT_EQ(ret, answer);
2442 // __fp16 a_data[] = {0, 1, 2};
2443 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2444 // __fp16 b_data[] = {0, 1, 2};
2445 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2446 // __fp16 answer_data[] = {5};
2447 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1), answer_data);
2448 // nntrainer::Tensor ret = a.dot(b, true, false);
2449 // EXPECT_EQ(ret, answer);
2452 // __fp16 a_data[] = {0, 1, 2};
2453 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2454 // __fp16 b_data[] = {0, 1, 2};
2455 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2456 // __fp16 answer_data[] = {5};
2457 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1), answer_data);
2458 // nntrainer::Tensor ret = a.dot(b, false, true);
2459 // EXPECT_EQ(ret, answer);
2462 // __fp16 a_data[] = {0, 1, 2};
2463 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2464 // __fp16 b_data[] = {0, 1, 2};
2465 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2466 // __fp16 answer_data[] = {5};
2467 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 1), answer_data);
2468 // nntrainer::Tensor ret = a.dot(b, true, true);
2469 // EXPECT_EQ(ret, answer);
2472 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2473 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2474 // __fp16 b_data[] = {0, 1, 2};
2475 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2476 // __fp16 answer_data[] = {5, 14};
2477 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1), answer_data);
2478 // nntrainer::Tensor ret = a.dot(b, false, false);
2479 // EXPECT_EQ(ret, answer);
2482 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2483 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2484 // __fp16 b_data[] = {0, 1, 2};
2485 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2486 // __fp16 answer_data[] = {5, 14};
2487 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1), answer_data);
2488 // nntrainer::Tensor ret = a.dot(b, true, false);
2489 // EXPECT_EQ(ret, answer);
2492 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5};
2493 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 2, 3), a_data);
2494 // __fp16 b_data[] = {0, 1, 2};
2495 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2496 // __fp16 answer_data[] = {5, 14};
2497 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1), answer_data);
2498 // nntrainer::Tensor ret = a.dot(b, false, true);
2499 // EXPECT_EQ(ret, answer);
2502 // __fp16 a_data[] = {0, 3, 1, 4, 2, 5};
2503 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 2), a_data);
2504 // __fp16 b_data[] = {0, 1, 2};
2505 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2506 // __fp16 answer_data[] = {5, 14};
2507 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 2, 1), answer_data);
2508 // nntrainer::Tensor ret = a.dot(b, true, true);
2509 // EXPECT_EQ(ret, answer);
2512 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2513 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2514 // __fp16 b_data[] = {0, 1, 2};
2515 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2516 // __fp16 answer_data[] = {5, 14, 23, 32};
2517 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1), answer_data);
2518 // nntrainer::Tensor ret = a.dot(b, false, false);
2519 // EXPECT_EQ(ret, answer);
2522 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2523 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2524 // __fp16 b_data[] = {0, 1, 2};
2525 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 1), b_data);
2526 // __fp16 answer_data[] = {5, 14, 23, 32};
2527 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1), answer_data);
2528 // nntrainer::Tensor ret = a.dot(b, true, false);
2529 // EXPECT_EQ(ret, answer);
2532 // __fp16 a_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2533 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 4, 3), a_data);
2534 // __fp16 b_data[] = {0, 1, 2};
2535 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2536 // __fp16 answer_data[] = {5, 14, 23, 32};
2537 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1), answer_data);
2538 // nntrainer::Tensor ret = a.dot(b, false, true);
2539 // EXPECT_EQ(ret, answer);
2542 // __fp16 a_data[] = {0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11};
2543 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 4), a_data);
2544 // __fp16 b_data[] = {0, 1, 2};
2545 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 1, 3), b_data);
2546 // __fp16 answer_data[] = {5, 14, 23, 32};
2547 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 4, 1), answer_data);
2548 // nntrainer::Tensor ret = a.dot(b, true, true);
2549 // EXPECT_EQ(ret, answer);
2552 // __fp16 a_data[] = {0, 1, 2};
2553 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2554 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2555 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2556 // __fp16 answer_data[] = {20, 23, 26, 29};
2557 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2558 // nntrainer::Tensor ret = a.dot(b, false, false);
2559 // EXPECT_EQ(ret, answer);
2562 // __fp16 a_data[] = {0, 1, 2};
2563 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2564 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2565 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2566 // __fp16 answer_data[] = {20, 23, 26, 29};
2567 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2568 // nntrainer::Tensor ret = a.dot(b, true, false);
2569 // EXPECT_EQ(ret, answer);
2572 // __fp16 a_data[] = {0, 1, 2};
2573 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2574 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2575 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2576 // __fp16 answer_data[] = {20, 23, 26, 29};
2577 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2578 // nntrainer::Tensor ret = a.dot(b, false, true);
2579 // EXPECT_EQ(ret, answer);
2582 // __fp16 a_data[] = {0, 1, 2};
2583 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2584 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2585 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2586 // __fp16 answer_data[] = {20, 23, 26, 29};
2587 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2588 // nntrainer::Tensor ret = a.dot(b, true, true);
2589 // EXPECT_EQ(ret, answer);
2592 // __fp16 a_data[] = {0, 1, 2};
2593 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2594 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2595 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2596 // __fp16 answer_data[] = {20, 23, 26, 29};
2597 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2598 // nntrainer::Tensor ret = a.dot(b, false, false);
2599 // EXPECT_EQ(ret, answer);
2602 // __fp16 a_data[] = {0, 1, 2};
2603 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2604 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
2605 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 4), b_data);
2606 // __fp16 answer_data[] = {20, 23, 26, 29};
2607 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2608 // nntrainer::Tensor ret = a.dot(b, true, false);
2609 // EXPECT_EQ(ret, answer);
2612 // __fp16 a_data[] = {0, 1, 2};
2613 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2614 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2615 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2616 // __fp16 answer_data[] = {20, 23, 26, 29};
2617 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2618 // nntrainer::Tensor ret = a.dot(b, false, true);
2619 // EXPECT_EQ(ret, answer);
2622 // __fp16 a_data[] = {0, 1, 2};
2623 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2624 // __fp16 b_data[] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
2625 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 4, 3), b_data);
2626 // __fp16 answer_data[] = {20, 23, 26, 29};
2627 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 4), answer_data);
2628 // nntrainer::Tensor ret = a.dot(b, true, true);
2629 // EXPECT_EQ(ret, answer);
2632 // __fp16 a_data[] = {0, 1, 2};
2633 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2634 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2635 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2636 // __fp16 answer_data[] = {10, 13};
2637 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 2), answer_data);
2638 // nntrainer::Tensor ret = a.dot(b, false, false);
2639 // EXPECT_EQ(ret, answer);
2642 // __fp16 a_data[] = {0, 1, 2};
2643 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2644 // __fp16 b_data[] = {0, 1, 2, 3, 4, 5};
2645 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 3, 2), b_data);
2646 // __fp16 answer_data[] = {10, 13};
2647 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 2), answer_data);
2648 // nntrainer::Tensor ret = a.dot(b, true, false);
2649 // EXPECT_EQ(ret, answer);
2652 // __fp16 a_data[] = {0, 1, 2};
2653 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 1, 3), a_data);
2654 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2655 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2656 // __fp16 answer_data[] = {10, 13};
2657 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 2), answer_data);
2658 // nntrainer::Tensor ret = a.dot(b, false, true);
2659 // EXPECT_EQ(ret, answer);
2662 // __fp16 a_data[] = {0, 1, 2};
2663 // nntrainer::Tensor a(nntrainer::TensorDim(1, 1, 3, 1), a_data);
2664 // __fp16 b_data[] = {0, 2, 4, 1, 3, 5};
2665 // nntrainer::Tensor b(nntrainer::TensorDim(1, 1, 2, 3), b_data);
2666 // __fp16 answer_data[] = {10, 13};
2667 // nntrainer::Tensor answer(nntrainer::TensorDim(1, 1, 1, 2), answer_data);
2668 // nntrainer::Tensor ret = a.dot(b, true, true);
2669 // EXPECT_EQ(ret, answer);
2673 // TEST(nntrainer_Tensor, transpose_p) {
2674 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2676 // /// plain transpose
2678 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2679 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2680 // __fp16 answer_data[] = {
2681 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
2682 // 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
2683 // 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
2684 // 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2685 // 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
2686 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
2687 // 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
2688 // 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
2689 // 112, 113, 114, 115, 116, 117, 118, 119};
2690 // nntrainer::Tensor answer({3, 2, 4, 5}, answer_data);
2691 // nntrainer::Tensor m = t.transpose("0:1:2");
2692 // EXPECT_EQ(answer, m);
2695 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2696 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2697 // __fp16 answer_data[] = {
2698 // 0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
2699 // 13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
2700 // 22, 27, 32, 37, 23, 28, 33, 38, 24, 29, 34, 39, 40, 45,
2701 // 50, 55, 41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58,
2702 // 44, 49, 54, 59, 60, 65, 70, 75, 61, 66, 71, 76, 62, 67,
2703 // 72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 80, 85, 90, 95,
2704 // 81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
2705 // 94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
2706 // 103, 108, 113, 118, 104, 109, 114, 119};
2707 // nntrainer::Tensor answer({3, 2, 5, 4}, answer_data);
2708 // nntrainer::Tensor m = t.transpose("0:2:1");
2709 // EXPECT_EQ(answer, m);
2712 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2713 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2714 // __fp16 answer_data[] = {
2715 // 0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 5, 6, 7, 8,
2716 // 9, 25, 26, 27, 28, 29, 10, 11, 12, 13, 14, 30, 31, 32,
2717 // 33, 34, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 40, 41,
2718 // 42, 43, 44, 60, 61, 62, 63, 64, 45, 46, 47, 48, 49, 65,
2719 // 66, 67, 68, 69, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74,
2720 // 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83,
2721 // 84, 100, 101, 102, 103, 104, 85, 86, 87, 88, 89, 105, 106, 107,
2722 // 108, 109, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 95, 96,
2723 // 97, 98, 99, 115, 116, 117, 118, 119};
2724 // nntrainer::Tensor answer({3, 4, 2, 5}, answer_data);
2725 // nntrainer::Tensor m = t.transpose("1:0:2");
2726 // EXPECT_EQ(answer, m);
2729 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2730 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2731 // __fp16 answer_data[] = {
2732 // 0, 20, 1, 21, 2, 22, 3, 23, 4, 24, 5, 25, 6, 26, 7, 27,
2733 // 8, 28, 9, 29, 10, 30, 11, 31, 12, 32, 13, 33, 14, 34, 15, 35,
2734 // 16, 36, 17, 37, 18, 38, 19, 39, 40, 60, 41, 61, 42, 62, 43, 63,
2735 // 44, 64, 45, 65, 46, 66, 47, 67, 48, 68, 49, 69, 50, 70, 51, 71,
2736 // 52, 72, 53, 73, 54, 74, 55, 75, 56, 76, 57, 77, 58, 78, 59, 79,
2737 // 80, 100, 81, 101, 82, 102, 83, 103, 84, 104, 85, 105, 86, 106, 87, 107,
2738 // 88, 108, 89, 109, 90, 110, 91, 111, 92, 112, 93, 113, 94, 114, 95, 115,
2739 // 96, 116, 97, 117, 98, 118, 99, 119};
2740 // nntrainer::Tensor answer({3, 4, 5, 2}, answer_data);
2741 // nntrainer::Tensor m = t.transpose("1:2:0");
2742 // EXPECT_EQ(answer, m);
2745 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2746 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2747 // __fp16 answer_data[] = {
2748 // 0, 5, 10, 15, 20, 25, 30, 35, 1, 6, 11, 16, 21, 26, 31,
2749 // 36, 2, 7, 12, 17, 22, 27, 32, 37, 3, 8, 13, 18, 23, 28,
2750 // 33, 38, 4, 9, 14, 19, 24, 29, 34, 39, 40, 45, 50, 55, 60,
2751 // 65, 70, 75, 41, 46, 51, 56, 61, 66, 71, 76, 42, 47, 52, 57,
2752 // 62, 67, 72, 77, 43, 48, 53, 58, 63, 68, 73, 78, 44, 49, 54,
2753 // 59, 64, 69, 74, 79, 80, 85, 90, 95, 100, 105, 110, 115, 81, 86,
2754 // 91, 96, 101, 106, 111, 116, 82, 87, 92, 97, 102, 107, 112, 117, 83,
2755 // 88, 93, 98, 103, 108, 113, 118, 84, 89, 94, 99, 104, 109, 114, 119};
2756 // nntrainer::Tensor answer({3, 5, 2, 4}, answer_data);
2757 // nntrainer::Tensor m = t.transpose("2:0:1");
2758 // EXPECT_EQ(answer, m);
2761 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2762 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2763 // __fp16 answer_data[] = {
2764 // 0, 20, 5, 25, 10, 30, 15, 35, 1, 21, 6, 26, 11, 31, 16, 36,
2765 // 2, 22, 7, 27, 12, 32, 17, 37, 3, 23, 8, 28, 13, 33, 18, 38,
2766 // 4, 24, 9, 29, 14, 34, 19, 39, 40, 60, 45, 65, 50, 70, 55, 75,
2767 // 41, 61, 46, 66, 51, 71, 56, 76, 42, 62, 47, 67, 52, 72, 57, 77,
2768 // 43, 63, 48, 68, 53, 73, 58, 78, 44, 64, 49, 69, 54, 74, 59, 79,
2769 // 80, 100, 85, 105, 90, 110, 95, 115, 81, 101, 86, 106, 91, 111, 96, 116,
2770 // 82, 102, 87, 107, 92, 112, 97, 117, 83, 103, 88, 108, 93, 113, 98, 118,
2771 // 84, 104, 89, 109, 94, 114, 99, 119};
2772 // nntrainer::Tensor answer({3, 5, 4, 2}, answer_data);
2773 // nntrainer::Tensor m = t.transpose("2:1:0");
2774 // EXPECT_EQ(answer, m);
2777 // /// outplace transpose
2779 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2780 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2781 // nntrainer::Tensor m = ranged(3, 2, 4, 5);
2782 // __fp16 answer_data[] = {
2783 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
2784 // 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
2785 // 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
2786 // 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2787 // 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
2788 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
2789 // 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
2790 // 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
2791 // 112, 113, 114, 115, 116, 117, 118, 119};
2792 // nntrainer::Tensor answer({3, 2, 4, 5}, answer_data);
2793 // t.transpose("0:1:2", m);
2794 // EXPECT_EQ(answer, m);
2797 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2798 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2799 // nntrainer::Tensor m = ranged(3, 2, 5, 4);
2800 // __fp16 answer_data[] = {
2801 // 0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
2802 // 13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
2803 // 22, 27, 32, 37, 23, 28, 33, 38, 24, 29, 34, 39, 40, 45,
2804 // 50, 55, 41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58,
2805 // 44, 49, 54, 59, 60, 65, 70, 75, 61, 66, 71, 76, 62, 67,
2806 // 72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 80, 85, 90, 95,
2807 // 81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
2808 // 94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
2809 // 103, 108, 113, 118, 104, 109, 114, 119};
2810 // nntrainer::Tensor answer({3, 2, 5, 4}, answer_data);
2811 // t.transpose("0:2:1", m);
2812 // EXPECT_EQ(answer, m);
2815 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2816 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2817 // nntrainer::Tensor m = ranged(3, 4, 2, 5);
2818 // __fp16 answer_data[] = {
2819 // 0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 5, 6, 7, 8,
2820 // 9, 25, 26, 27, 28, 29, 10, 11, 12, 13, 14, 30, 31, 32,
2821 // 33, 34, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 40, 41,
2822 // 42, 43, 44, 60, 61, 62, 63, 64, 45, 46, 47, 48, 49, 65,
2823 // 66, 67, 68, 69, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74,
2824 // 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83,
2825 // 84, 100, 101, 102, 103, 104, 85, 86, 87, 88, 89, 105, 106, 107,
2826 // 108, 109, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 95, 96,
2827 // 97, 98, 99, 115, 116, 117, 118, 119};
2828 // nntrainer::Tensor answer({3, 4, 2, 5}, answer_data);
2829 // t.transpose("1:0:2", m);
2830 // EXPECT_EQ(answer, m);
2833 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2834 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2835 // nntrainer::Tensor m = ranged(3, 4, 5, 2);
2836 // __fp16 answer_data[] = {
2837 // 0, 20, 1, 21, 2, 22, 3, 23, 4, 24, 5, 25, 6, 26, 7, 27,
2838 // 8, 28, 9, 29, 10, 30, 11, 31, 12, 32, 13, 33, 14, 34, 15, 35,
2839 // 16, 36, 17, 37, 18, 38, 19, 39, 40, 60, 41, 61, 42, 62, 43, 63,
2840 // 44, 64, 45, 65, 46, 66, 47, 67, 48, 68, 49, 69, 50, 70, 51, 71,
2841 // 52, 72, 53, 73, 54, 74, 55, 75, 56, 76, 57, 77, 58, 78, 59, 79,
2842 // 80, 100, 81, 101, 82, 102, 83, 103, 84, 104, 85, 105, 86, 106, 87, 107,
2843 // 88, 108, 89, 109, 90, 110, 91, 111, 92, 112, 93, 113, 94, 114, 95, 115,
2844 // 96, 116, 97, 117, 98, 118, 99, 119};
2845 // nntrainer::Tensor answer({3, 4, 5, 2}, answer_data);
2846 // t.transpose("1:2:0", m);
2847 // EXPECT_EQ(answer, m);
2850 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2851 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2852 // nntrainer::Tensor m = ranged(3, 5, 2, 4);
2853 // __fp16 answer_data[] = {
2854 // 0, 5, 10, 15, 20, 25, 30, 35, 1, 6, 11, 16, 21, 26, 31,
2855 // 36, 2, 7, 12, 17, 22, 27, 32, 37, 3, 8, 13, 18, 23, 28,
2856 // 33, 38, 4, 9, 14, 19, 24, 29, 34, 39, 40, 45, 50, 55, 60,
2857 // 65, 70, 75, 41, 46, 51, 56, 61, 66, 71, 76, 42, 47, 52, 57,
2858 // 62, 67, 72, 77, 43, 48, 53, 58, 63, 68, 73, 78, 44, 49, 54,
2859 // 59, 64, 69, 74, 79, 80, 85, 90, 95, 100, 105, 110, 115, 81, 86,
2860 // 91, 96, 101, 106, 111, 116, 82, 87, 92, 97, 102, 107, 112, 117, 83,
2861 // 88, 93, 98, 103, 108, 113, 118, 84, 89, 94, 99, 104, 109, 114, 119};
2862 // nntrainer::Tensor answer({3, 5, 2, 4}, answer_data);
2863 // t.transpose("2:0:1", m);
2864 // EXPECT_EQ(answer, m);
2867 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
2868 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
2869 // nntrainer::Tensor m = ranged(3, 5, 4, 2);
2870 // __fp16 answer_data[] = {
2871 // 0, 20, 5, 25, 10, 30, 15, 35, 1, 21, 6, 26, 11, 31, 16, 36,
2872 // 2, 22, 7, 27, 12, 32, 17, 37, 3, 23, 8, 28, 13, 33, 18, 38,
2873 // 4, 24, 9, 29, 14, 34, 19, 39, 40, 60, 45, 65, 50, 70, 55, 75,
2874 // 41, 61, 46, 66, 51, 71, 56, 76, 42, 62, 47, 67, 52, 72, 57, 77,
2875 // 43, 63, 48, 68, 53, 73, 58, 78, 44, 64, 49, 69, 54, 74, 59, 79,
2876 // 80, 100, 85, 105, 90, 110, 95, 115, 81, 101, 86, 106, 91, 111, 96, 116,
2877 // 82, 102, 87, 107, 92, 112, 97, 117, 83, 103, 88, 108, 93, 113, 98, 118,
2878 // 84, 104, 89, 109, 94, 114, 99, 119};
2879 // nntrainer::Tensor answer({3, 5, 4, 2}, answer_data);
2880 // t.transpose("2:1:0", m);
2881 // EXPECT_EQ(answer, m);
2885 // TEST(nntrainer_Tensor, tranpose_dimension_not_match_n) {
2886 // nntrainer::Tensor a(3, 2, 4, 5);
2887 // nntrainer::Tensor b(3, 1, 2, 3);
2889 // EXPECT_THROW(a.transpose("0:1:2", b), std::invalid_argument);
2892 // TEST(nntrainer_Tensor, set_01_p) {
2893 // nntrainer::Tensor tensor = nntrainer::Tensor(1, 1, 1, 1);
2895 // tensor.setZero();
2896 // EXPECT_EQ(tensor.getValue(0, 0, 0, 0), 0.0);
2898 // tensor.setRandUniform(-0.5, 0);
2899 // __fp16 val = tensor.getValue(0, 0, 0, 0);
2900 // EXPECT_TRUE(val >= -0.5 && val < 0);
2903 // TEST(nntrainer_Tensor, save_read_01_p) {
2908 // nntrainer::Tensor target(3, 4, 5, 6);
2909 // nntrainer::Tensor readed(3, 4, 5, 6);
2911 // GEN_TEST_INPUT(target, i * (channel * width * height) + j * (height * width) +
2912 // k * (width) + l + 1);
2914 // std::ofstream save_file("save.bin", std::ios::out | std::ios::binary);
2915 // target.save(save_file);
2916 // save_file.close();
2918 // std::ifstream read_file("save.bin");
2919 // readed.read(read_file);
2920 // read_file.close();
2922 // EXPECT_EQ(target, readed);
2924 // int status = std::remove("save.bin");
2926 // ASSERT_EQ(status, 0);
2929 // TEST(nntrainer_Tensor, save_read_01_n) {
2934 // nntrainer::Tensor target(3, 4, 5, 6);
2935 // nntrainer::Tensor readed(3, 4, 1, 1);
2937 // GEN_TEST_INPUT(target, i * (channel * width * height) + j * (height * width) +
2938 // k * (width) + l + 1);
2940 // std::ofstream save_file("save.bin", std::ios::out | std::ios::binary);
2941 // target.save(save_file);
2942 // save_file.close();
2944 // std::ifstream read_file("save.bin");
2945 // readed.read(read_file);
2946 // read_file.close();
2948 // EXPECT_NE(target, readed);
2950 // int status = std::remove("save.bin");
2952 // ASSERT_EQ(status, 0);
2955 // TEST(nntrainer_Tensor, copy_and_shares_variable_p) {
2956 // nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
2957 // nntrainer::Tensor B = A.clone();
2958 // nntrainer::Tensor C = A;
2960 // C.setValue(1, 1, 1, 1, 2.0f);
2965 // C.reshape(nntrainer::TensorDim(3, 4, 6, 5));
2966 // EXPECT_EQ(A.getDim(), B.getDim());
2967 // EXPECT_NE(A.getDim(), C.getDim());
2970 // TEST(nntrainer_Tensor, reshape_n_01) {
2971 // nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
2973 // EXPECT_THROW(A.reshape(nntrainer::TensorDim(9, 9, 9, 9)),
2974 // std::invalid_argument);
2977 // TEST(nntrainer_Tensor, reshape_n_02) {
2978 // nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
2979 // nntrainer::TensorDim A_dim = A.getDim();
2981 // /** Changing the dim of a tensor only affects local copy of the dim */
2982 // A_dim.setTensorDim(1, 100);
2983 // EXPECT_EQ(A_dim.getTensorDim(1), 100u);
2985 // nntrainer::TensorDim A_dim_2 = A.getDim();
2986 // EXPECT_EQ(A_dim_2.getTensorDim(1), 4u);
2989 // TEST(nntrainer_Tensor, copy_and_reshape_n) {
2990 // nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
2991 // nntrainer::Tensor B = A;
2992 // nntrainer::Tensor C = A.clone();
2994 // EXPECT_THROW(B.reshape(nntrainer::TensorDim(9, 9, 9, 9)),
2995 // std::invalid_argument);
2998 // /// @note this test case demonstrates it is dangerous to use sharedConstTensor
2999 // /// to const correct the inner data.
3000 // TEST(nntrainer_Tensor, constructor_from_shared_const_ptr_shares_variable_n) {
3001 // nntrainer::sharedConstTensor A =
3002 // MAKE_SHARED_TENSOR(constant(1.0f, 3, 4, 5, 6));
3004 // nntrainer::Tensor B = *A;
3005 // nntrainer::Tensor C = A->clone();
3007 // B.setValue(2, 3, 4, 5, 2.0f);
3008 // EXPECT_EQ(*A, B);
3009 // EXPECT_NE(*A, C);
3011 // C.reshape(nntrainer::TensorDim(3, 4, 6, 5));
3012 // EXPECT_EQ(A->getDim(), B.getDim());
3013 // EXPECT_NE(A->getDim(), C.getDim());
3016 // TEST(nntrainer_Tensor, print_small_size) {
3017 // nntrainer::Tensor target = constant(1.0, 3, 1, 2, 3);
3019 // std::stringstream ss, expected;
3022 // expected << '<' << typeid(target).name() << " at " << &target << ">\n"
3023 // << "data addr: " << target.getData() << '\n'
3024 // << "Shape: 3:1:2:3\n"
3038 // EXPECT_EQ(ss.str(), expected.str());
3041 // // TEST(nntrainer_Tensor, print_large_size) {
3042 // // nntrainer::Tensor target = constant(1.2, 3, 10, 10, 10);
3044 // // std::stringstream ss, expected;
3046 // // expected << '<' << typeid(target).name() << " at " << &target << ">\n"
3047 // // << "data addr: " << target.getData() << '\n'
3048 // // << "Shape: 3:10:10:10\n"
3049 // // << "[1.2 1.2 1.2 ... 1.2 1.2 1.2]\n";
3052 // // EXPECT_EQ(ss.str(), expected.str());
3055 // TEST(nntrainer_Tensor, DISABLED_equation_test_01_p) {
3056 // nntrainer::Tensor a, b, c;
3057 // nntrainer::Tensor ret1, ret2;
3059 // a = randUniform(4, 6, 7, 3, -100, 100);
3060 // b = randUniform(4, 6, 7, 3, -100, 100);
3061 // c = randUniform(4, 6, 7, 3, -100, 100);
3063 // ret1 = a.subtract(b).multiply(c);
3064 // ret2 = a.multiply(c).subtract(b.multiply(c));
3066 // __fp16 *data1 = ret1.getData();
3067 // __fp16 *data2 = ret2.getData();
3068 // EXPECT_EQ(ret1, ret2);
3070 // for (unsigned int i = 0; i < ret1.size(); ++i) {
3071 // EXPECT_FLOAT_EQ(data1[i], data2[i]);
3075 // TEST(nntrainer_Tensor, fill_p) {
3076 // /// same dimension, buffer size
3078 // nntrainer::Tensor target(3, 2, 4, 5);
3079 // nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f);
3080 // target.fill(original, false);
3082 // EXPECT_EQ(target, original);
3085 // /// same dimension, buffer size is different (not tested)
3087 // /// there is no way to make non contiguous tensor publicily yet
3088 // EXPECT_TRUE(true);
3091 // /// uninitialized with initialized flag is true
3093 // nntrainer::Tensor target;
3094 // nntrainer::Tensor original = randUniform(3, 2, 4, 5, -1.0f, 1.0f);
3095 // target.fill(original, true);
3097 // EXPECT_EQ(target, original);
3101 // TEST(nntrainer_Tensor, fill_uninitialized_n) {
3102 // nntrainer::Tensor target;
3103 // nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f);
3104 // EXPECT_THROW(target.fill(original, false), std::invalid_argument);
3107 // TEST(nntrainer_Tensor, fill_different_dimension_n) {
3108 // nntrainer::Tensor target(3, 1, 3, 2);
3109 // nntrainer::Tensor original = randUniform(3, 1, 2, 3, -1.0f, 1.0f);
3110 // EXPECT_THROW(target.fill(original, false), std::invalid_argument);
3113 // TEST(nntrainer_Tensor, DISABLED_fill_non_contiguous_n) {
3114 // /// there is no way to make non contiguous tensor publicily yet
3115 // EXPECT_TRUE(false);
3118 // TEST(nntrainer_Tensor, DISABLED_fill_different_buffer_size_n) {
3119 // /// there is no way to make same dimension, diffrent buffersized tensor
3120 // /// publicily yet
3121 // EXPECT_TRUE(false);
3124 // TEST(nntrainer_Tensor, empty_01) {
3125 // nntrainer::Tensor t;
3127 // EXPECT_TRUE(t.empty());
3130 // TEST(nntrainer_Tensor, empty_02) {
3131 // nntrainer::Tensor t({1, 2, 3, 4}, false);
3133 // EXPECT_FALSE(t.empty());
3136 // TEST(nntrainer_Tensor, empty_03) {
3137 // nntrainer::Tensor t({1, 2, 3, 4}, true);
3139 // EXPECT_FALSE(t.empty());
3142 // TEST(nntrainer_Tensor, allocate_01_n) {
3143 // nntrainer::Tensor t;
3144 // EXPECT_FALSE(t.isAllocated());
3147 // EXPECT_FALSE(t.isAllocated());
3150 // TEST(nntrainer_Tensor, allocate_02_p) {
3151 // nntrainer::Tensor t({1, 2, 3, 4}, false);
3152 // EXPECT_FALSE(t.isAllocated());
3155 // EXPECT_TRUE(t.isAllocated());
3158 // TEST(nntrainer_Tensor, allocate_03_p) {
3159 // nntrainer::Tensor t({1, 2, 3, 4}, true);
3160 // EXPECT_TRUE(t.isAllocated());
3163 // EXPECT_TRUE(t.isAllocated());
3166 // TEST(nntrainer_Tensor, initialize_01_p) {
3167 // nntrainer::Tensor t({1, 2, 3, 4}, true, nntrainer::Tensor::Initializer::ONES);
3169 // nntrainer::Tensor golden(1, 2, 3, 4);
3170 // golden.setValue(1);
3172 // EXPECT_EQ(golden, t);
3175 // TEST(nntrainer_Tensor, initialize_02_p) {
3176 // nntrainer::Tensor t({1, 2, 3, 4}, true);
3178 // nntrainer::Tensor golden(1, 2, 3, 4);
3179 // golden.setValue(1);
3181 // EXPECT_NE(golden, t);
3183 // t.initialize(nntrainer::Tensor::Initializer::ONES);
3184 // EXPECT_EQ(golden, t);
3187 // TEST(nntrainer_Tensor, initialize_03_p) {
3188 // nntrainer::Tensor t({1, 2, 3, 4}, false,
3189 // nntrainer::Tensor::Initializer::ONES);
3192 // nntrainer::Tensor golden(1, 2, 3, 4);
3193 // golden.setValue(1);
3195 // EXPECT_EQ(golden, t);
3198 // TEST(nntrainer_Tensor, initialize_04_p) {
3199 // nntrainer::Tensor t({1, 2, 3, 4}, false);
3200 // t.initialize(nntrainer::Tensor::Initializer::ONES);
3203 // nntrainer::Tensor golden(1, 2, 3, 4);
3204 // golden.setValue(1);
3206 // EXPECT_EQ(golden, t);
3209 // TEST(nntrainer_Tensor, initialize_05_p) {
3210 // nntrainer::Tensor t({1, 2, 3, 4}, false);
3213 // nntrainer::Tensor golden(1, 2, 3, 4);
3214 // golden.setValue(1.f);
3217 // * Ideally, it should be NE, but it can be equal due to no initialization
3218 // * EXPECT_NE(golden, t);
3221 // t.initialize(nntrainer::Tensor::Initializer::ONES);
3222 // EXPECT_EQ(golden, t);
3225 // TEST(nntrainer_Tensor, initialize_06_n) {
3226 // nntrainer::Tensor t({1, 2, 3, 4}, true, nntrainer::Tensor::Initializer::ONES);
3227 // nntrainer::Tensor golden({1, 2, 3, 4}, true,
3228 // nntrainer::Tensor::Initializer::ZEROS);
3230 // EXPECT_NE(golden, t);
3232 // golden.initialize(nntrainer::Tensor::Initializer::ONES);
3233 // EXPECT_EQ(golden, t);
3236 // TEST(nntrainer_Tensor, initialize_07_p) {
3237 // nntrainer::Tensor t({1, 2, 3, 4}, true, nntrainer::Tensor::Initializer::ONES);
3239 // nntrainer::Tensor golden(1, 2, 3, 4);
3240 // golden.setValue(1);
3242 // EXPECT_EQ(golden, t);
3244 // t.setValue(0, 0, 0, 0, 0);
3245 // t.setValue(0, 0, 0, t.size() - 1, 0);
3246 // EXPECT_NE(golden, t);
3249 // EXPECT_EQ(golden, t);
3252 // TEST(nntrainer_Tensor, initialize_08_p) {
3253 // nntrainer::Tensor t({1, 2, 3, 4}, true, nntrainer::Tensor::Initializer::ONES);
3255 // nntrainer::Tensor golden(1, 2, 3, 4, nntrainer::Tformat::NCHW, nntrainer::DataType::FP32);
3256 // golden.setValue(1);
3257 // EXPECT_EQ(golden, t);
3259 // t.initialize(nntrainer::Tensor::Initializer::HE_NORMAL);
3260 // EXPECT_NE(golden, t);
3264 // EXPECT_NE(golden, t);
3266 // t.initialize(nntrainer::Tensor::Initializer::ONES);
3267 // EXPECT_EQ(golden, t);
3270 // EXPECT_EQ(golden, t);
3273 // TEST(nntrainer_Tensor, split_01_p) {
3275 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3276 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3277 // std::vector<nntrainer::Tensor> answer;
3278 // answer.reserve(3);
3280 // __fp16 answer_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
3281 // 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
3282 // 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
3283 // 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
3284 // answer.emplace_back(ml::train::TensorDim{1, 2, 4, 5}, answer_data);
3287 // __fp16 answer_data[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
3288 // 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
3289 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
3290 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79};
3291 // answer.emplace_back(ml::train::TensorDim{1, 2, 4, 5}, answer_data);
3294 // __fp16 answer_data[] = {80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
3295 // 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
3296 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
3297 // 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3298 // answer.emplace_back(ml::train::TensorDim{1, 2, 4, 5}, answer_data);
3300 // EXPECT_EQ(t.split(3, 0), answer);
3303 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3304 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3305 // std::vector<nntrainer::Tensor> answer;
3306 // answer.reserve(2);
3308 // __fp16 answer_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
3309 // 12, 13, 14, 15, 16, 17, 18, 19, 40, 41, 42, 43,
3310 // 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
3311 // 56, 57, 58, 59, 80, 81, 82, 83, 84, 85, 86, 87,
3312 // 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99};
3313 // answer.emplace_back(ml::train::TensorDim{3, 1, 4, 5}, answer_data);
3316 // __fp16 answer_data[] = {20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
3317 // 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
3318 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
3319 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
3320 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
3321 // 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3322 // answer.emplace_back(ml::train::TensorDim{3, 1, 4, 5}, answer_data);
3324 // EXPECT_EQ(t.split(2, 1), answer);
3327 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3328 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3329 // std::vector<nntrainer::Tensor> answer;
3330 // answer.reserve(2);
3332 // __fp16 answer_data[] = {
3333 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20, 21, 22, 23, 24,
3334 // 25, 26, 27, 28, 29, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
3335 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 80, 81, 82, 83, 84,
3336 // 85, 86, 87, 88, 89, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109};
3337 // answer.emplace_back(ml::train::TensorDim{3, 2, 2, 5}, answer_data);
3340 // __fp16 answer_data[] = {
3341 // 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 30, 31, 32, 33, 34,
3342 // 35, 36, 37, 38, 39, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
3343 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 90, 91, 92, 93, 94,
3344 // 95, 96, 97, 98, 99, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3345 // answer.emplace_back(ml::train::TensorDim{3, 2, 2, 5}, answer_data);
3347 // EXPECT_EQ(t.split(2, 2), answer);
3350 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3351 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3352 // std::vector<nntrainer::Tensor> answer;
3353 // answer.reserve(5);
3355 // __fp16 answer_data[] = {0, 5, 10, 15, 20, 25, 30, 35,
3356 // 40, 45, 50, 55, 60, 65, 70, 75,
3357 // 80, 85, 90, 95, 100, 105, 110, 115};
3358 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3361 // __fp16 answer_data[] = {1, 6, 11, 16, 21, 26, 31, 36,
3362 // 41, 46, 51, 56, 61, 66, 71, 76,
3363 // 81, 86, 91, 96, 101, 106, 111, 116};
3364 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3367 // __fp16 answer_data[] = {2, 7, 12, 17, 22, 27, 32, 37,
3368 // 42, 47, 52, 57, 62, 67, 72, 77,
3369 // 82, 87, 92, 97, 102, 107, 112, 117};
3370 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3373 // __fp16 answer_data[] = {3, 8, 13, 18, 23, 28, 33, 38,
3374 // 43, 48, 53, 58, 63, 68, 73, 78,
3375 // 83, 88, 93, 98, 103, 108, 113, 118};
3376 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3379 // __fp16 answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
3380 // 44, 49, 54, 59, 64, 69, 74, 79,
3381 // 84, 89, 94, 99, 104, 109, 114, 119};
3382 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3384 // EXPECT_EQ(t.split(5, 3), answer);
3387 // nntrainer::TensorDim ref_dim(1, 1, 4, 6);
3388 // nntrainer::Tensor t = ranged(1, 1, 4, 6);
3389 // std::vector<nntrainer::Tensor> answer;
3390 // answer.reserve(2);
3392 // __fp16 answer_data[] = {0, 1, 2, 6, 7, 8, 12, 13, 14, 18, 19, 20};
3393 // answer.emplace_back(ml::train::TensorDim{1, 1, 4, 3}, answer_data);
3396 // __fp16 answer_data[] = {3, 4, 5, 9, 10, 11, 15, 16, 17, 21, 22, 23};
3397 // answer.emplace_back(ml::train::TensorDim{1, 1, 4, 3}, answer_data);
3399 // EXPECT_EQ(t.split(2, 3), answer);
3403 // TEST(nntrainer_Tensor, split_02_n) {
3404 // nntrainer::Tensor t(1, 1, 1, 1);
3405 // EXPECT_THROW(t.split(0, 0), std::invalid_argument);
3408 // TEST(nntrainer_Tensor, split_03_n) {
3409 // nntrainer::Tensor t(3, 1, 1, 1);
3410 // EXPECT_THROW(t.split(2, 0), std::invalid_argument);
3413 // TEST(nntrainer_Tensor, split_04_p) {
3415 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3416 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3417 // std::vector<nntrainer::Tensor> answer;
3418 // answer.reserve(2);
3420 // __fp16 answer_data[] = {
3421 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
3422 // 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
3423 // 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
3424 // 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
3425 // 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79};
3426 // answer.emplace_back(ml::train::TensorDim{2, 2, 4, 5}, answer_data);
3429 // __fp16 answer_data[] = {80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
3430 // 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
3431 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
3432 // 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3433 // answer.emplace_back(ml::train::TensorDim{1, 2, 4, 5}, answer_data);
3435 // EXPECT_EQ(t.split({2, 1}, 0), answer);
3438 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3439 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3440 // std::vector<nntrainer::Tensor> answer;
3441 // answer.reserve(2);
3443 // __fp16 answer_data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
3444 // 12, 13, 14, 15, 16, 17, 18, 19, 40, 41, 42, 43,
3445 // 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
3446 // 56, 57, 58, 59, 80, 81, 82, 83, 84, 85, 86, 87,
3447 // 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99};
3448 // answer.emplace_back(ml::train::TensorDim{3, 1, 4, 5}, answer_data);
3451 // __fp16 answer_data[] = {20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
3452 // 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
3453 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
3454 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
3455 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
3456 // 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3457 // answer.emplace_back(ml::train::TensorDim{3, 1, 4, 5}, answer_data);
3459 // EXPECT_EQ(t.split({1, 1}, 1), answer);
3462 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3463 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3464 // std::vector<nntrainer::Tensor> answer;
3465 // answer.reserve(2);
3467 // __fp16 answer_data[] = {
3468 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 20, 21, 22, 23, 24,
3469 // 25, 26, 27, 28, 29, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
3470 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 80, 81, 82, 83, 84,
3471 // 85, 86, 87, 88, 89, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109};
3472 // answer.emplace_back(ml::train::TensorDim{3, 2, 2, 5}, answer_data);
3475 // __fp16 answer_data[] = {
3476 // 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 30, 31, 32, 33, 34,
3477 // 35, 36, 37, 38, 39, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
3478 // 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 90, 91, 92, 93, 94,
3479 // 95, 96, 97, 98, 99, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119};
3480 // answer.emplace_back(ml::train::TensorDim{3, 2, 2, 5}, answer_data);
3482 // EXPECT_EQ(t.split({2, 2}, 2), answer);
3485 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3486 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3487 // std::vector<nntrainer::Tensor> answer;
3488 // answer.reserve(3);
3490 // __fp16 answer_data[] = {0, 5, 10, 15, 20, 25, 30, 35,
3491 // 40, 45, 50, 55, 60, 65, 70, 75,
3492 // 80, 85, 90, 95, 100, 105, 110, 115};
3493 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3496 // __fp16 answer_data[] = {
3497 // 1, 2, 3, 6, 7, 8, 11, 12, 13, 16, 17, 18, 21, 22, 23,
3498 // 26, 27, 28, 31, 32, 33, 36, 37, 38, 41, 42, 43, 46, 47, 48,
3499 // 51, 52, 53, 56, 57, 58, 61, 62, 63, 66, 67, 68, 71, 72, 73,
3500 // 76, 77, 78, 81, 82, 83, 86, 87, 88, 91, 92, 93, 96, 97, 98,
3501 // 101, 102, 103, 106, 107, 108, 111, 112, 113, 116, 117, 118};
3502 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 3}, answer_data);
3505 // __fp16 answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
3506 // 44, 49, 54, 59, 64, 69, 74, 79,
3507 // 84, 89, 94, 99, 104, 109, 114, 119};
3508 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3510 // EXPECT_EQ(t.split({1, 3, 1}, 3), answer);
3513 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3514 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3515 // std::vector<nntrainer::Tensor> answer;
3516 // answer.reserve(3);
3518 // __fp16 answer_data[] = {
3519 // 0, 1, 5, 6, 10, 11, 15, 16, 20, 21, 25, 26, 30, 31, 35, 36,
3520 // 40, 41, 45, 46, 50, 51, 55, 56, 60, 61, 65, 66, 70, 71, 75, 76,
3521 // 80, 81, 85, 86, 90, 91, 95, 96, 100, 101, 105, 106, 110, 111, 115, 116};
3522 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 2}, answer_data);
3525 // __fp16 answer_data[] = {
3526 // 2, 3, 7, 8, 12, 13, 17, 18, 22, 23, 27, 28, 32, 33, 37, 38,
3527 // 42, 43, 47, 48, 52, 53, 57, 58, 62, 63, 67, 68, 72, 73, 77, 78,
3528 // 82, 83, 87, 88, 92, 93, 97, 98, 102, 103, 107, 108, 112, 113, 117, 118};
3529 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 2}, answer_data);
3532 // __fp16 answer_data[] = {4, 9, 14, 19, 24, 29, 34, 39,
3533 // 44, 49, 54, 59, 64, 69, 74, 79,
3534 // 84, 89, 94, 99, 104, 109, 114, 119};
3535 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 1}, answer_data);
3537 // EXPECT_EQ(t.split({2, 2, 1}, 3), answer);
3540 // nntrainer::TensorDim ref_dim(3, 2, 4, 5);
3541 // nntrainer::Tensor t = ranged(3, 2, 4, 5);
3542 // std::vector<nntrainer::Tensor> answer;
3543 // answer.reserve(2);
3545 // __fp16 answer_data[] = {
3546 // 0, 1, 5, 6, 10, 11, 15, 16, 20, 21, 25, 26, 30, 31, 35, 36,
3547 // 40, 41, 45, 46, 50, 51, 55, 56, 60, 61, 65, 66, 70, 71, 75, 76,
3548 // 80, 81, 85, 86, 90, 91, 95, 96, 100, 101, 105, 106, 110, 111, 115, 116};
3549 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 2}, answer_data);
3552 // __fp16 answer_data[] = {
3553 // 2, 3, 4, 7, 8, 9, 12, 13, 14, 17, 18, 19, 22, 23, 24,
3554 // 27, 28, 29, 32, 33, 34, 37, 38, 39, 42, 43, 44, 47, 48, 49,
3555 // 52, 53, 54, 57, 58, 59, 62, 63, 64, 67, 68, 69, 72, 73, 74,
3556 // 77, 78, 79, 82, 83, 84, 87, 88, 89, 92, 93, 94, 97, 98, 99,
3557 // 102, 103, 104, 107, 108, 109, 112, 113, 114, 117, 118, 119};
3558 // answer.emplace_back(ml::train::TensorDim{3, 2, 4, 3}, answer_data);
3560 // EXPECT_EQ(t.split({2, 3}, 3), answer);
3563 // nntrainer::TensorDim ref_dim(1, 1, 4, 6);
3564 // nntrainer::Tensor t = ranged(1, 1, 4, 6);
3565 // std::vector<nntrainer::Tensor> answer;
3566 // answer.reserve(3);
3568 // __fp16 answer_data[] = {0, 6, 12, 18};
3569 // answer.emplace_back(ml::train::TensorDim{1, 1, 4, 1}, answer_data);
3572 // __fp16 answer_data[] = {1, 2, 3, 7, 8, 9, 13, 14, 15, 19, 20, 21};
3573 // answer.emplace_back(ml::train::TensorDim{1, 1, 4, 3}, answer_data);
3576 // __fp16 answer_data[] = {4, 5, 10, 11, 16, 17, 22, 23};
3577 // answer.emplace_back(ml::train::TensorDim{1, 1, 4, 2}, answer_data);
3579 // EXPECT_EQ(t.split({1, 3, 2}, 3), answer);
3583 // TEST(nntrainer_Tensor, split_05_n) {
3584 // nntrainer::Tensor t(3, 1, 1, 1);
3585 // EXPECT_THROW(t.split({1, 1}, 0), std::invalid_argument);
3588 // TEST(nntrainer_Tensor, split_06_n) {
3589 // nntrainer::Tensor t(3, 1, 1, 1);
3590 // EXPECT_THROW(t.split({2, 0, 1}, 0), std::invalid_argument);
3593 // TEST(nntrainer_Tensor, split_07_n) {
3594 // nntrainer::Tensor t(3, 1, 1, 1);
3595 // EXPECT_THROW(t.split({}, 0), std::invalid_argument);
3598 // TEST(nntrainer_Tensor, cat_01_p) {
3600 // std::vector<nntrainer::Tensor> inputs;
3601 // inputs.reserve(2);
3602 // inputs.emplace_back(ranged(2, 1, 1, 2));
3603 // inputs.emplace_back(ranged(2, 2, 1, 2));
3604 // __fp16 answer_data[] = {0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 6, 7};
3605 // nntrainer::Tensor answer(ml::train::TensorDim{2, 3, 1, 2}, answer_data);
3606 // EXPECT_EQ(nntrainer::Tensor::cat(inputs, 1), answer);
3609 // std::vector<nntrainer::Tensor> inputs;
3610 // inputs.reserve(2);
3611 // inputs.emplace_back(ranged(3, 2, 4, 5));
3612 // inputs.emplace_back(ranged(2, 2, 4, 5));
3613 // __fp16 answer_data[] = {
3614 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
3615 // 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
3616 // 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
3617 // 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
3618 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
3619 // 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
3620 // 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
3621 // 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
3622 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
3623 // 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
3624 // 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
3625 // 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
3626 // 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
3627 // 75, 76, 77, 78, 79};
3628 // nntrainer::Tensor answer(ml::train::TensorDim{5, 2, 4, 5}, answer_data);
3629 // EXPECT_EQ(nntrainer::Tensor::cat(inputs, 0), answer);
3632 // std::vector<nntrainer::Tensor> inputs;
3633 // inputs.reserve(2);
3634 // inputs.emplace_back(ranged(3, 3, 4, 5));
3635 // inputs.emplace_back(ranged(3, 2, 4, 5));
3636 // __fp16 answer_data[] = {
3637 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
3638 // 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
3639 // 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
3640 // 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
3641 // 56, 57, 58, 59, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
3642 // 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
3643 // 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
3644 // 38, 39, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
3645 // 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,
3646 // 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
3647 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
3648 // 114, 115, 116, 117, 118, 119, 40, 41, 42, 43, 44, 45, 46, 47,
3649 // 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
3650 // 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
3651 // 76, 77, 78, 79, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
3652 // 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
3653 // 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
3654 // 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
3655 // 172, 173, 174, 175, 176, 177, 178, 179, 80, 81, 82, 83, 84, 85,
3656 // 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
3657 // 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
3658 // 114, 115, 116, 117, 118, 119};
3659 // nntrainer::Tensor answer(ml::train::TensorDim{3, 5, 4, 5}, answer_data);
3660 // EXPECT_EQ(nntrainer::Tensor::cat(inputs, 1), answer);
3663 // std::vector<nntrainer::Tensor> inputs;
3664 // inputs.reserve(2);
3665 // inputs.emplace_back(ranged(3, 2, 1, 5));
3666 // inputs.emplace_back(ranged(3, 2, 2, 5));
3667 // __fp16 answer_data[] = {
3668 // 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 5, 6, 7,
3669 // 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 20,
3670 // 21, 22, 23, 24, 25, 26, 27, 28, 29, 15, 16, 17, 18, 19, 30, 31, 32, 33,
3671 // 34, 35, 36, 37, 38, 39, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 45, 46,
3672 // 47, 48, 49, 25, 26, 27, 28, 29, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59};
3673 // nntrainer::Tensor answer(ml::train::TensorDim{3, 2, 3, 5}, answer_data);
3674 // EXPECT_EQ(nntrainer::Tensor::cat(inputs, 2), answer);
3677 // std::vector<nntrainer::Tensor> inputs;
3678 // inputs.reserve(3);
3679 // inputs.emplace_back(ranged(3, 2, 4, 1));
3680 // inputs.emplace_back(ranged(3, 2, 4, 3));
3681 // inputs.emplace_back(ranged(3, 2, 4, 2));
3682 // __fp16 answer_data[] = {
3683 // 0, 0, 1, 2, 0, 1, 1, 3, 4, 5, 2, 3, 2, 6, 7, 8, 4, 5,
3684 // 3, 9, 10, 11, 6, 7, 4, 12, 13, 14, 8, 9, 5, 15, 16, 17, 10, 11,
3685 // 6, 18, 19, 20, 12, 13, 7, 21, 22, 23, 14, 15, 8, 24, 25, 26, 16, 17,
3686 // 9, 27, 28, 29, 18, 19, 10, 30, 31, 32, 20, 21, 11, 33, 34, 35, 22, 23,
3687 // 12, 36, 37, 38, 24, 25, 13, 39, 40, 41, 26, 27, 14, 42, 43, 44, 28, 29,
3688 // 15, 45, 46, 47, 30, 31, 16, 48, 49, 50, 32, 33, 17, 51, 52, 53, 34, 35,
3689 // 18, 54, 55, 56, 36, 37, 19, 57, 58, 59, 38, 39, 20, 60, 61, 62, 40, 41,
3690 // 21, 63, 64, 65, 42, 43, 22, 66, 67, 68, 44, 45, 23, 69, 70, 71, 46, 47};
3691 // nntrainer::Tensor answer(ml::train::TensorDim{3, 2, 4, 6}, answer_data);
3692 // EXPECT_EQ(nntrainer::Tensor::cat(inputs, 3), answer);
3696 // TEST(nntrainer_Tensor, cat_02_n) {
3698 // std::vector<nntrainer::Tensor> inputs;
3699 // inputs.reserve(2);
3700 // inputs.emplace_back(nntrainer::Tensor(2, 1, 1, 2));
3701 // inputs.emplace_back(nntrainer::Tensor(2, 2, 1, 2));
3702 // EXPECT_THROW(nntrainer::Tensor::cat(inputs, 2), std::invalid_argument);
3706 // TEST(nntrainer_Tensor, zoneout_mask_01_n) {
3707 // const __fp16 zoneout_rate = 0.3f;
3708 // nntrainer::Tensor t(10, 10, 10, 10);
3709 // nntrainer::Tensor opposite(20, 20, 20, 20);
3710 // EXPECT_THROW(t.zoneout_mask(opposite, zoneout_rate), std::invalid_argument);
3713 // TEST(nntrainer_Tensor, zoneout_mask_02_p) {
3714 // const __fp16 zoneout_rate = 0.3f;
3715 // nntrainer::Tensor t(10, 10, 10, 10);
3716 // nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
3717 // constexpr __fp16 epsilon = 1e-3;
3719 // EXPECT_EQ(t.size(), opposite.size());
3721 // auto is_near = [epsilon](__fp16 val1, __fp16 val2) {
3722 // return val2 - epsilon < val1 && val1 < val2 + epsilon;
3725 // for (unsigned int i = 0; i < opposite.size(); ++i) {
3726 // if (is_near(opposite.getValue(i), 0.0f)) {
3727 // EXPECT_NEAR(t.getValue(i), 1.0f, epsilon);
3728 // } else if (is_near(opposite.getValue(i), 1.0f)) {
3729 // EXPECT_NEAR(t.getValue(i), 0.0f, epsilon);
3731 // FAIL() << "This should not be happen";
3736 // TEST(nntrainer_Tensor, zoneout_mask_03_p) {
3737 // const __fp16 zoneout_rate = 0.3f;
3738 // nntrainer::Tensor t(10, 10, 100, 100);
3739 // nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
3740 // constexpr __fp16 epsilon = 1e-3;
3742 // auto is_near = [epsilon](__fp16 val1, __fp16 val2) {
3743 // return val2 - epsilon < val1 && val1 < val2 + epsilon;
3745 // auto percentage = [](unsigned int dividend, unsigned int divisor) {
3746 // return (__fp16)dividend / (__fp16)divisor;
3750 // unsigned int zeros = 0;
3751 // unsigned int ones = 0;
3752 // for (unsigned int i = 0; i < opposite.size(); ++i) {
3753 // if (is_near(opposite.getValue(i), 0.0f)) {
3755 // } else if (is_near(opposite.getValue(i), 1.0f)) {
3758 // FAIL() << "This should not be happen";
3761 // EXPECT_NEAR(percentage(zeros, opposite.size()), 1.0f - zoneout_rate,
3765 // EXPECT_NEAR(percentage(ones, opposite.size()), zoneout_rate, epsilon);
3769 // unsigned int zeros = 0;
3770 // unsigned int ones = 0;
3771 // for (unsigned int i = 0; i < t.size(); ++i) {
3772 // if (is_near(t.getValue(i), 0.0f)) {
3774 // } else if (is_near(t.getValue(i), 1.0f)) {
3777 // FAIL() << "This should not be happen";
3780 // EXPECT_NEAR(percentage(zeros, t.size()), zoneout_rate, epsilon);
3783 // EXPECT_NEAR(percentage(ones, t.size()), 1.0f - zoneout_rate, epsilon);
3787 // TEST(nntrainer_Tensor, zoneout_mask_04_n) {
3788 // const __fp16 zoneout_rate = 0.3f;
3789 // nntrainer::Tensor t(10, 10, 100, 100);
3790 // nntrainer::Tensor opposite = t.zoneout_mask(zoneout_rate);
3791 // constexpr __fp16 epsilon = 1e-3;
3793 // auto is_near = [epsilon](__fp16 val1, __fp16 val2) {
3794 // return val2 - epsilon < val1 && val1 < val2 + epsilon;
3796 // auto percentage = [](unsigned int dividend, unsigned int divisor) {
3797 // return (__fp16)dividend / (__fp16)divisor;
3801 // unsigned int zeros = 0;
3802 // unsigned int ones = 0;
3803 // for (unsigned int i = 0; i < opposite.size(); ++i) {
3804 // if (is_near(opposite.getValue(i), 0.0f)) {
3806 // } else if (is_near(opposite.getValue(i), 1.0f)) {
3809 // FAIL() << "This should not be happen";
3813 // is_near(percentage(ones, opposite.size()), 1.0f - zoneout_rate));
3817 // unsigned int zeros = 0;
3818 // unsigned int ones = 0;
3819 // for (unsigned int i = 0; i < t.size(); ++i) {
3820 // if (is_near(t.getValue(i), 0.0f)) {
3822 // } else if (is_near(t.getValue(i), 1.0f)) {
3825 // FAIL() << "This should not be happen";
3828 // EXPECT_FALSE(is_near(percentage(ones, t.size()), zoneout_rate));
3832 // TEST(nntrainer_Tensor, TensorMap_p) {
3833 // __fp16 dat[] = {1, 2, 3};
3836 // nntrainer::Tensor a = nntrainer::Tensor::Map(dat, 3 * sizeof(__fp16), {3});
3837 // /// check if a.getData() has same address with dat
3838 // EXPECT_EQ(dat, a.getData());
3840 // /// check if b.getData() has same address with data
3841 // nntrainer::Tensor b = a;
3842 // EXPECT_EQ(dat, b.getData());
3845 // /// check if dat is accessible after destruction of all the tensor
3846 // EXPECT_FLOAT_EQ(dat[2], 3);
3849 // TEST(nntrainer_Tensor, TensorWrap_01_n) {
3850 // __fp16 dat[] = {1, 2, 3};
3851 // EXPECT_THROW(nntrainer::Tensor::Map(dat, 3, nntrainer::TensorDim({})),
3852 // std::invalid_argument);
3855 // TEST(nntrainer_Tensor, TensorWrap_02_n) {
3856 // __fp16 dat[] = {1, 2, 3};
3857 // EXPECT_THROW(nntrainer::Tensor::Map(dat, 3, {4}), std::invalid_argument);
3860 // TEST(nntrainer_Tensor, TensorPaddedValue_p) {
3861 // nntrainer::Tensor a = ranged(1, 1, 3, 3);
3862 // __fp16 default_padded = -1;
3864 // for (int i = 0; i < 5; ++i) {
3865 // for (int j = 0; j < 5; ++j) {
3866 // __fp16 expected = default_padded;
3867 // if (1 <= i && i <= 3 && 1 <= j && j <= 3) {
3868 // expected = (i - 1) * 3 + (j - 1);
3870 // __fp16 actual = a.getValuePaddedVirtual<__fp16>(0, 0, i, j, 1, 1, default_padded);
3871 // EXPECT_FLOAT_EQ(actual, expected);
3876 // GTEST_API_ int main(int argc, char **argv) {
3880 // testing::InitGoogleTest(&argc, argv);
3882 // std::cerr << "Error duing InitGoogleTest" << std::endl;
3887 // result = RUN_ALL_TESTS();
3889 // std::cerr << "Error duing RUN_ALL_TESTS()" << std::endl;