EXPECT_EQ(status, ML_ERROR_NONE);
}
-// TEST(nntrainer_Tensor, Tensor_02_p) {
-// int status = ML_ERROR_NONE;
-// int height = 3;
-// int width = 10;
-// std::vector<std::vector<float>> in;
-// for (int i = 0; i < height; ++i) {
-// std::vector<float> tv;
-// for (int j = 0; j < width; ++j) {
-// tv.push_back(i * 2.0 + j);
-// }
-// in.push_back(tv);
-// }
-
-// nntrainer::Tensor tensor = nntrainer::Tensor(in);
-// ASSERT_NE(nullptr, tensor.getData());
-
-// if (tensor.getValue(0, 0, 0, 1) != 1.0)
-// status = ML_ERROR_INVALID_PARAMETER;
-// EXPECT_EQ(status, ML_ERROR_NONE);
-// }
-
-// TEST(nntrainer_Tensor, Tensor_02_nhwc_p) {
-// int status = ML_ERROR_NONE;
-// int width = 10;
-// int channel = 3;
-// std::vector<std::vector<float>> in;
-// for (int i = 0; i < width; ++i) {
-// std::vector<float> tv;
-// for (int j = 0; j < channel; ++j) {
-// tv.push_back(i * 2.0 + j);
-// }
-// in.push_back(tv);
-// }
+TEST(nntrainer_Tensor, Tensor_02_p) {
+ int status = ML_ERROR_NONE;
+ int height = 3;
+ int width = 10;
+ std::vector<std::vector<float>> in;
+ for (int i = 0; i < height; ++i) {
+ std::vector<float> tv;
+ for (int j = 0; j < width; ++j) {
+ tv.push_back(i * 2.0 + j);
+ }
+ in.push_back(tv);
+ }
-// nntrainer::Tensor tensor = nntrainer::Tensor(in, NHWC_);
-// ASSERT_NE(nullptr, tensor.getData());
+ nntrainer::Tensor tensor = nntrainer::Tensor(
+ in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP32});
+ ASSERT_NE(nullptr, tensor.getData());
-// if (tensor.getValue(0, 0, 0, 1) != 1.0)
-// status = ML_ERROR_INVALID_PARAMETER;
-// EXPECT_EQ(status, ML_ERROR_NONE);
+ if (tensor.getValue(0, 0, 0, 1) != 1.0)
+ status = ML_ERROR_INVALID_PARAMETER;
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
TEST(nntrainer_Tensor, Tensor_03_p) {
int status = ML_ERROR_NONE;
nntrainer::Tensor tensor = nntrainer::Tensor(
in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
- ASSERT_NE(nullptr, tensor.getData<int8_t>());
+ ASSERT_NE(nullptr, tensor.getData<int8_t>(0));
if (tensor.getValue<int8_t>(0, 0, 0, 1) != 1)
status = ML_ERROR_INVALID_PARAMETER;
nntrainer::Tensor tensor = nntrainer::Tensor(
in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
- ASSERT_NE(nullptr, tensor.getData<uint16_t>());
+ ASSERT_NE(nullptr, tensor.getData<uint16_t>(0));
for (size_t b = 0; b < tensor.batch(); ++b) {
for (size_t c = 0; c < tensor.channel(); ++c) {
// }
// }
+TEST(nntrainer_Tensor, Tensor_07_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ std::vector<std::vector<std::vector<float>>> in;
+
+ for (int k = 0; k < batch; ++k) {
+ std::vector<std::vector<float>> ttv;
+ for (int i = 0; i < height; ++i) {
+ std::vector<float> tv;
+ ttv.push_back(tv);
+ }
+ in.push_back(ttv);
+ }
+
+ EXPECT_THROW(nntrainer::Tensor(
+ in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP32}),
+ std::out_of_range);
+}
+
+TEST(nntrainer_Tensor, Tensor_08_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ std::vector<std::vector<std::vector<int8_t>>> in;
+
+ for (int k = 0; k < batch; ++k) {
+ std::vector<std::vector<int8_t>> ttv;
+ for (int i = 0; i < height; ++i) {
+ std::vector<int8_t> tv;
+ ttv.push_back(tv);
+ }
+ in.push_back(ttv);
+ }
+
+ EXPECT_THROW(nntrainer::Tensor(
+ in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}),
+ std::out_of_range);
+}
+
+TEST(nntrainer_Tensor, Tensor_09_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int height = 3;
+ int width = 10;
+ std::vector<std::vector<std::vector<uint16_t>>> in;
+
+ for (int k = 0; k < batch; ++k) {
+ std::vector<std::vector<uint16_t>> ttv;
+ for (int i = 0; i < height; ++i) {
+ std::vector<uint16_t> tv;
+ ttv.push_back(tv);
+ }
+ in.push_back(ttv);
+ }
+
+ EXPECT_THROW(nntrainer::Tensor(
+ in, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16}),
+ std::out_of_range);
+}
+
+TEST(nntrainer_Tensor, copy_01_n) {
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(batch, channel, height, width);
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(batch, channel, height, width);
+
+ // use copy() to copy non-contiguous tensor
+ EXPECT_THROW(output.copy(input.getSharedDataTensor({3, 1, 3, 5}, 0, false)),
+ std::runtime_error);
+}
+
+TEST(nntrainer_Tensor, copy_02_n) {
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+
+ // use copy() to copy non-contiguous tensor
+ EXPECT_THROW(output.copy(input.getSharedDataTensor({3, 1, 3, 5}, 0, false)),
+ std::runtime_error);
+}
+
+TEST(nntrainer_Tensor, copy_03_n) {
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+
+ // use copy() to copy non-contiguous tensor
+ EXPECT_THROW(output.copy(input.getSharedDataTensor({3, 1, 3, 5}, 0, false)),
+ std::runtime_error);
+}
+
+TEST(nntrainer_Tensor, copy_04_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(batch, channel, height, width);
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(batch, channel, width, height);
+ output.copy(input);
+
+ EXPECT_EQ(input, output);
+}
+
+TEST(nntrainer_Tensor, copy_05_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+
+ output.copy(input);
+
+ ASSERT_EQ(input, output);
+}
+
+TEST(nntrainer_Tensor, copy_06_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+
+ output.copy(input);
+
+ ASSERT_EQ(input, output);
+}
+
+TEST(nntrainer_Tensor, copy_07_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(batch, channel, height, width);
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+
+ input.copyData(output);
+
+ ASSERT_NE(input, output);
+
+ for (unsigned int idx = 0; idx < input.size(); ++idx) {
+ if ((int)input.getValue<float>(idx) != output.getValue<int8_t>(idx))
+ status = ML_ERROR_RESULT_OUT_OF_RANGE;
+ }
+
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
+TEST(nntrainer_Tensor, copy_08_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(batch, channel, height, width);
+
+ // Currently, CharTensor does not support copyData of a different data type
+ EXPECT_THROW(input.copyData(output), std::invalid_argument);
+}
+
+TEST(nntrainer_Tensor, copy_09_n) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(batch, channel, height, width);
+
+ // Currently, UINT Tensor does not support copyData of a different data type
+ EXPECT_THROW(input.copyData(output), std::invalid_argument);
+}
+
+TEST(nntrainer_Tensor, copy_10_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width / 2,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8});
+
+ EXPECT_NO_THROW(output.copy_with_stride(input.getSharedDataTensor(
+ {3, 1, 3, 5, nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}, 0,
+ false)));
+
+ for (unsigned int b = 0; b < output.batch(); ++b) {
+ for (unsigned int c = 0; c < output.channel(); ++c) {
+ for (unsigned int h = 0; h < output.height(); ++h) {
+ for (unsigned int w = 0; w < output.width(); ++w) {
+ if (input.getValue<int8_t>(b, c, h, w) !=
+ output.getValue<int8_t>(b, c, h, w)) {
+ status = ML_ERROR_RESULT_OUT_OF_RANGE;
+ }
+ }
+ }
+ }
+ }
+
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
+TEST(nntrainer_Tensor, copy_11_p) {
+ int status = ML_ERROR_NONE;
+ int batch = 3;
+ int channel = 1;
+ int height = 3;
+ int width = 10;
+
+ nntrainer::Tensor input(
+ batch, channel, height, width,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+ GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
+
+ nntrainer::Tensor output(
+ batch, channel, height, width / 2,
+ {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16});
+
+ EXPECT_NO_THROW(output.copy_with_stride(input.getSharedDataTensor(
+ {3, 1, 3, 5, nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16}, 0,
+ false)));
+
+ for (unsigned int b = 0; b < output.batch(); ++b) {
+ for (unsigned int c = 0; c < output.channel(); ++c) {
+ for (unsigned int h = 0; h < output.height(); ++h) {
+ for (unsigned int w = 0; w < output.width(); ++w) {
+ if (input.getValue<uint16_t>(b, c, h, w) !=
+ output.getValue<uint16_t>(b, c, h, w)) {
+ status = ML_ERROR_RESULT_OUT_OF_RANGE;
+ }
+ }
+ }
+ }
+ }
+
+ EXPECT_EQ(status, ML_ERROR_NONE);
+}
+
TEST(nntrainer_Tensor, multiply_i_01_p) {
int status = ML_ERROR_NONE;
int batch = 3;
}
TEST(nntrainer_Tensor, pow_01_p) {
-
nntrainer::Tensor input = constant(4.0, 3, 2, 4, 5);
nntrainer::Tensor actual, expected;
ASSERT_EQ(status, 0);
}
+TEST(nntrainer_Tensor, max_element_01_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 6;
+ nntrainer::Tensor target(3, 1, 5, 6, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 - k * i);
+
+ EXPECT_EQ(target.argmax(), std::vector<unsigned int>({24, 0, 0}));
+}
+
+TEST(nntrainer_Tensor, max_element_02_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 6;
+ nntrainer::Tensor target(3, 1, 5, 6, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::UINT16);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 - k * i);
+
+ EXPECT_EQ(target.argmax(), std::vector<unsigned int>({24, 0, 0}));
+}
+
+TEST(nntrainer_Tensor, max_element_03_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 6;
+ nntrainer::Tensor target(3, 1, 5, 6, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 - k * i);
+
+ EXPECT_EQ(target.max_abs(), 31);
+}
+
+TEST(nntrainer_Tensor, max_element_04_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 6;
+ nntrainer::Tensor target(3, 1, 5, 6, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::UINT16);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 - k * i);
+
+ EXPECT_EQ(target.max_abs(), 31);
+}
+
+TEST(nntrainer_Tensor, min_element_01_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 1;
+ nntrainer::Tensor target(3, 1, 5, 1, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1 - k * i);
+
+ EXPECT_EQ(target.minValue(), 1);
+
+ for (int idx = 0; idx < height; ++idx) {
+ target.addValue(0, 0, idx, 0, 15.5f, 1.0f);
+ }
+
+ EXPECT_EQ(target.minValue(), 16);
+}
+
+TEST(nntrainer_Tensor, min_element_02_p) {
+ int batch = 3;
+ int channel = 1;
+ int height = 5;
+ int width = 1;
+ nntrainer::Tensor target(3, 1, 5, 1, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::UINT16);
+
+ GEN_TEST_INPUT(target, i * (batch * height) + j * (width) + k + 1);
+
+ EXPECT_EQ(target.minValue(), 1);
+
+ for (int idx = 0; idx < height; ++idx) {
+ target.addValue(0, 0, idx, 0, 15.5f, 1.0f);
+ }
+
+ EXPECT_EQ(target.minValue(), 16);
+}
+
TEST(nntrainer_Tensor, copy_and_shares_variable_01_p) {
nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
nntrainer::Tensor B = A.clone();
}
TEST(nntrainer_Tensor, copy_and_shares_variable_02_p) {
+ nntrainer::Tensor A = constant(10, 3, 4, 5, 6, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8);
+ nntrainer::Tensor B = A.clone();
+ nntrainer::Tensor C = A;
+
+ C.setValue(1, 1, 1, 1, 9);
+
+ EXPECT_EQ(A, C);
+ EXPECT_NE(B, C);
+
+ C.reshape(nntrainer::TensorDim(3, 4, 6, 5, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8));
+ EXPECT_EQ(A.getDim(), B.getDim());
+ EXPECT_NE(A.getDim(), C.getDim());
+}
+
+TEST(nntrainer_Tensor, copy_and_shares_variable_03_p) {
nntrainer::Tensor A = constant(10, 3, 4, 5, 6, nntrainer::Tformat::NCHW,
nntrainer::Tdatatype::UINT16);
nntrainer::Tensor B = A.clone();
EXPECT_NE(A->getDim(), C.getDim());
}
-TEST(nntrainer_Tensor, print_small_size) {
+TEST(nntrainer_Tensor, print_small_size_01) {
nntrainer::Tensor target = constant(1.0, 3, 1, 2, 3);
std::stringstream ss, expected;
EXPECT_EQ(ss.str(), expected.str());
}
-TEST(nntrainer_Tensor, print_large_size) {
+TEST(nntrainer_Tensor, print_small_size_02) {
+ nntrainer::Tensor target = constant(1.0, 4, 1, 3, 2, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::QINT8);
+
+ std::stringstream ss, expected;
+ ss << target;
+
+ expected << '<' << typeid(target).name() << " at " << &target << ">\n"
+ << "data addr: " << target.getData() << '\n'
+ << "Shape: 4:1:3:2 [ QINT8 : NCHW ]\n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << "\n"
+ << "-------\n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << "\n"
+ << "-------\n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << "\n"
+ << "-------\n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << " 1 1 \n"
+ << "\n"
+ << "-------\n";
+
+ EXPECT_EQ(ss.str(), expected.str());
+}
+
+TEST(nntrainer_Tensor, print_small_size_03) {
+ nntrainer::Tensor target = constant(1.0, 2, 1, 3, 3, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::UINT16);
+
+ std::stringstream ss, expected;
+ ss << target;
+
+ expected << '<' << typeid(target).name() << " at " << &target << ">\n"
+ << "data addr: " << target.getData() << '\n'
+ << "Shape: 2:1:3:3 [ UINT16 : NCHW ]\n"
+ << " 1 1 1 \n"
+ << " 1 1 1 \n"
+ << " 1 1 1 \n"
+ << "\n"
+ << "-------\n"
+ << " 1 1 1 \n"
+ << " 1 1 1 \n"
+ << " 1 1 1 \n"
+ << "\n"
+ << "-------\n";
+
+ EXPECT_EQ(ss.str(), expected.str());
+}
+
+TEST(nntrainer_Tensor, print_large_size_01) {
nntrainer::Tensor target = constant(1.2, 3, 10, 10, 10);
std::stringstream ss, expected;
EXPECT_EQ(ss.str(), expected.str());
}
+TEST(nntrainer_Tensor, print_large_size_02) {
+ nntrainer::Tensor target = constant(
+ 7, 3, 3, 256, 256, nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8);
+
+ std::stringstream ss, expected;
+
+ expected << '<' << typeid(target).name() << " at " << &target << ">\n"
+ << "data addr: " << target.getData() << '\n'
+ << "Shape: 3:3:256:256 [ QINT8 : NCHW ]\n"
+ << "[7 7 7 ... 7 7 7]\n";
+ ss << target;
+
+ EXPECT_EQ(ss.str(), expected.str());
+}
+
+TEST(nntrainer_Tensor, print_large_size_03) {
+ nntrainer::Tensor target =
+ constant(165, 1, 3, 128, 512, nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::UINT16);
+
+ std::stringstream ss, expected;
+
+ expected << '<' << typeid(target).name() << " at " << &target << ">\n"
+ << "data addr: " << target.getData() << '\n'
+ << "Shape: 1:3:128:512 [ UINT16 : NCHW ]\n"
+ << "[165 165 165 ... 165 165 165]\n";
+ ss << target;
+
+ EXPECT_EQ(ss.str(), expected.str());
+}
+
TEST(nntrainer_Tensor, DISABLED_equation_test_01_p) {
nntrainer::Tensor a, b, c;
nntrainer::Tensor ret1, ret2;
t.allocate();
EXPECT_TRUE(t.isAllocated());
+
+ t.deallocate();
+ EXPECT_FALSE(t.isAllocated());
}
TEST(nntrainer_Tensor, allocate_05_p) {
t.allocate();
EXPECT_TRUE(t.isAllocated());
+
+ t.deallocate();
+ EXPECT_FALSE(t.isAllocated());
}
TEST(nntrainer_Tensor, initialize_01_p) {
}
TEST(nntrainer_Tensor, initialize_11_n) {
+ nntrainer::Tensor t(
+ {1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::QINT8}},
+ true);
+
+ /// @note CharTensor does not support HE_NORMAL initialization
+ EXPECT_THROW(t.initialize(nntrainer::Initializer::HE_NORMAL),
+ std::invalid_argument);
+}
+
+TEST(nntrainer_Tensor, initialize_12_n) {
nntrainer::Tensor t(
{1, 2, 3, 4, {nntrainer::Tformat::NCHW, nntrainer::Tdatatype::UINT16}},
true);